1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/ethtool.h>
5 #include <linux/netdevice.h>
6 #include <linux/pci.h>
7 #include <net/ipv6.h>
8 
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_tlv.h"
12 
13 struct fbnic_stat {
14 	u8 string[ETH_GSTRING_LEN];
15 	unsigned int size;
16 	unsigned int offset;
17 };
18 
19 #define FBNIC_STAT_FIELDS(type, name, stat) { \
20 	.string = name, \
21 	.size = sizeof_field(struct type, stat), \
22 	.offset = offsetof(struct type, stat), \
23 }
24 
25 /* Hardware statistics not captured in rtnl_link_stats */
26 #define FBNIC_HW_STAT(name, stat) \
27 	FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
28 
29 static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
30 	/* RPC */
31 	FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
32 	FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
33 	FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
34 	FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
35 	FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
36 	FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
37 	FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
38 	FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
39 };
40 
41 #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
42 #define FBNIC_HW_STATS_LEN	FBNIC_HW_FIXED_STATS_LEN
43 
44 static void
fbnic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)45 fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
46 {
47 	struct fbnic_net *fbn = netdev_priv(netdev);
48 	struct fbnic_dev *fbd = fbn->fbd;
49 
50 	fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
51 				    sizeof(drvinfo->fw_version));
52 }
53 
fbnic_get_regs_len(struct net_device * netdev)54 static int fbnic_get_regs_len(struct net_device *netdev)
55 {
56 	struct fbnic_net *fbn = netdev_priv(netdev);
57 
58 	return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
59 }
60 
fbnic_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * data)61 static void fbnic_get_regs(struct net_device *netdev,
62 			   struct ethtool_regs *regs, void *data)
63 {
64 	struct fbnic_net *fbn = netdev_priv(netdev);
65 
66 	fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
67 }
68 
fbnic_clone_create(struct fbnic_net * orig)69 static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
70 {
71 	struct fbnic_net *clone;
72 
73 	clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
74 	if (!clone)
75 		return NULL;
76 
77 	memset(clone->tx, 0, sizeof(clone->tx));
78 	memset(clone->rx, 0, sizeof(clone->rx));
79 	memset(clone->napi, 0, sizeof(clone->napi));
80 	return clone;
81 }
82 
fbnic_clone_swap_cfg(struct fbnic_net * orig,struct fbnic_net * clone)83 static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
84 				 struct fbnic_net *clone)
85 {
86 	swap(clone->rcq_size, orig->rcq_size);
87 	swap(clone->hpq_size, orig->hpq_size);
88 	swap(clone->ppq_size, orig->ppq_size);
89 	swap(clone->txq_size, orig->txq_size);
90 	swap(clone->num_rx_queues, orig->num_rx_queues);
91 	swap(clone->num_tx_queues, orig->num_tx_queues);
92 	swap(clone->num_napi, orig->num_napi);
93 }
94 
fbnic_aggregate_vector_counters(struct fbnic_net * fbn,struct fbnic_napi_vector * nv)95 static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
96 					    struct fbnic_napi_vector *nv)
97 {
98 	int i, j;
99 
100 	for (i = 0; i < nv->txt_count; i++) {
101 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
102 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
103 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
104 	}
105 
106 	for (j = 0; j < nv->rxt_count; j++, i++) {
107 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
108 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
109 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
110 	}
111 }
112 
fbnic_clone_swap(struct fbnic_net * orig,struct fbnic_net * clone)113 static void fbnic_clone_swap(struct fbnic_net *orig,
114 			     struct fbnic_net *clone)
115 {
116 	struct fbnic_dev *fbd = orig->fbd;
117 	unsigned int i;
118 
119 	for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
120 		fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
121 	for (i = 0; i < orig->num_napi; i++)
122 		fbnic_aggregate_vector_counters(orig, orig->napi[i]);
123 
124 	fbnic_clone_swap_cfg(orig, clone);
125 
126 	for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
127 		swap(clone->napi[i], orig->napi[i]);
128 	for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
129 		swap(clone->tx[i], orig->tx[i]);
130 	for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
131 		swap(clone->rx[i], orig->rx[i]);
132 }
133 
fbnic_clone_free(struct fbnic_net * clone)134 static void fbnic_clone_free(struct fbnic_net *clone)
135 {
136 	kfree(clone);
137 }
138 
fbnic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)139 static int fbnic_get_coalesce(struct net_device *netdev,
140 			      struct ethtool_coalesce *ec,
141 			      struct kernel_ethtool_coalesce *kernel_coal,
142 			      struct netlink_ext_ack *extack)
143 {
144 	struct fbnic_net *fbn = netdev_priv(netdev);
145 
146 	ec->tx_coalesce_usecs = fbn->tx_usecs;
147 	ec->rx_coalesce_usecs = fbn->rx_usecs;
148 	ec->rx_max_coalesced_frames = fbn->rx_max_frames;
149 
150 	return 0;
151 }
152 
fbnic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)153 static int fbnic_set_coalesce(struct net_device *netdev,
154 			      struct ethtool_coalesce *ec,
155 			      struct kernel_ethtool_coalesce *kernel_coal,
156 			      struct netlink_ext_ack *extack)
157 {
158 	struct fbnic_net *fbn = netdev_priv(netdev);
159 
160 	/* Verify against hardware limits */
161 	if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
162 		NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
163 		return -EINVAL;
164 	}
165 	if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
166 		NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
167 		return -EINVAL;
168 	}
169 	if (ec->rx_max_coalesced_frames >
170 	    FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
171 	    FBNIC_MIN_RXD_PER_FRAME) {
172 		NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
173 		return -EINVAL;
174 	}
175 
176 	fbn->tx_usecs = ec->tx_coalesce_usecs;
177 	fbn->rx_usecs = ec->rx_coalesce_usecs;
178 	fbn->rx_max_frames = ec->rx_max_coalesced_frames;
179 
180 	if (netif_running(netdev)) {
181 		int i;
182 
183 		for (i = 0; i < fbn->num_napi; i++) {
184 			struct fbnic_napi_vector *nv = fbn->napi[i];
185 
186 			fbnic_config_txrx_usecs(nv, 0);
187 			fbnic_config_rx_frames(nv);
188 		}
189 	}
190 
191 	return 0;
192 }
193 
194 static void
fbnic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)195 fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
196 		    struct kernel_ethtool_ringparam *kernel_ring,
197 		    struct netlink_ext_ack *extack)
198 {
199 	struct fbnic_net *fbn = netdev_priv(netdev);
200 
201 	ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
202 	ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
203 	ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
204 	ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
205 
206 	ring->rx_pending = fbn->rcq_size;
207 	ring->rx_mini_pending = fbn->hpq_size;
208 	ring->rx_jumbo_pending = fbn->ppq_size;
209 	ring->tx_pending = fbn->txq_size;
210 }
211 
fbnic_set_rings(struct fbnic_net * fbn,struct ethtool_ringparam * ring)212 static void fbnic_set_rings(struct fbnic_net *fbn,
213 			    struct ethtool_ringparam *ring)
214 {
215 	fbn->rcq_size = ring->rx_pending;
216 	fbn->hpq_size = ring->rx_mini_pending;
217 	fbn->ppq_size = ring->rx_jumbo_pending;
218 	fbn->txq_size = ring->tx_pending;
219 }
220 
221 static int
fbnic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)222 fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
223 		    struct kernel_ethtool_ringparam *kernel_ring,
224 		    struct netlink_ext_ack *extack)
225 
226 {
227 	struct fbnic_net *fbn = netdev_priv(netdev);
228 	struct fbnic_net *clone;
229 	int err;
230 
231 	ring->rx_pending	= roundup_pow_of_two(ring->rx_pending);
232 	ring->rx_mini_pending	= roundup_pow_of_two(ring->rx_mini_pending);
233 	ring->rx_jumbo_pending	= roundup_pow_of_two(ring->rx_jumbo_pending);
234 	ring->tx_pending	= roundup_pow_of_two(ring->tx_pending);
235 
236 	/* These are absolute minimums allowing the device and driver to operate
237 	 * but not necessarily guarantee reasonable performance. Settings below
238 	 * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
239 	 * at best.
240 	 */
241 	if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
242 	    ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
243 	    ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
244 	    ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
245 		NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
246 		return -EINVAL;
247 	}
248 
249 	if (!netif_running(netdev)) {
250 		fbnic_set_rings(fbn, ring);
251 		return 0;
252 	}
253 
254 	clone = fbnic_clone_create(fbn);
255 	if (!clone)
256 		return -ENOMEM;
257 
258 	fbnic_set_rings(clone, ring);
259 
260 	err = fbnic_alloc_napi_vectors(clone);
261 	if (err)
262 		goto err_free_clone;
263 
264 	err = fbnic_alloc_resources(clone);
265 	if (err)
266 		goto err_free_napis;
267 
268 	fbnic_down_noidle(fbn);
269 	err = fbnic_wait_all_queues_idle(fbn->fbd, true);
270 	if (err)
271 		goto err_start_stack;
272 
273 	err = fbnic_set_netif_queues(clone);
274 	if (err)
275 		goto err_start_stack;
276 
277 	/* Nothing can fail past this point */
278 	fbnic_flush(fbn);
279 
280 	fbnic_clone_swap(fbn, clone);
281 
282 	fbnic_up(fbn);
283 
284 	fbnic_free_resources(clone);
285 	fbnic_free_napi_vectors(clone);
286 	fbnic_clone_free(clone);
287 
288 	return 0;
289 
290 err_start_stack:
291 	fbnic_flush(fbn);
292 	fbnic_up(fbn);
293 	fbnic_free_resources(clone);
294 err_free_napis:
295 	fbnic_free_napi_vectors(clone);
296 err_free_clone:
297 	fbnic_clone_free(clone);
298 	return err;
299 }
300 
fbnic_get_strings(struct net_device * dev,u32 sset,u8 * data)301 static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
302 {
303 	int i;
304 
305 	switch (sset) {
306 	case ETH_SS_STATS:
307 		for (i = 0; i < FBNIC_HW_STATS_LEN; i++)
308 			ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
309 		break;
310 	}
311 }
312 
fbnic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)313 static void fbnic_get_ethtool_stats(struct net_device *dev,
314 				    struct ethtool_stats *stats, u64 *data)
315 {
316 	struct fbnic_net *fbn = netdev_priv(dev);
317 	const struct fbnic_stat *stat;
318 	int i;
319 
320 	fbnic_get_hw_stats(fbn->fbd);
321 
322 	for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
323 		stat = &fbnic_gstrings_hw_stats[i];
324 		data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
325 	}
326 }
327 
fbnic_get_sset_count(struct net_device * dev,int sset)328 static int fbnic_get_sset_count(struct net_device *dev, int sset)
329 {
330 	switch (sset) {
331 	case ETH_SS_STATS:
332 		return FBNIC_HW_STATS_LEN;
333 	default:
334 		return -EOPNOTSUPP;
335 	}
336 }
337 
fbnic_get_rss_hash_idx(u32 flow_type)338 static int fbnic_get_rss_hash_idx(u32 flow_type)
339 {
340 	switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
341 	case TCP_V4_FLOW:
342 		return FBNIC_TCP4_HASH_OPT;
343 	case TCP_V6_FLOW:
344 		return FBNIC_TCP6_HASH_OPT;
345 	case UDP_V4_FLOW:
346 		return FBNIC_UDP4_HASH_OPT;
347 	case UDP_V6_FLOW:
348 		return FBNIC_UDP6_HASH_OPT;
349 	case AH_V4_FLOW:
350 	case ESP_V4_FLOW:
351 	case AH_ESP_V4_FLOW:
352 	case SCTP_V4_FLOW:
353 	case IPV4_FLOW:
354 	case IPV4_USER_FLOW:
355 		return FBNIC_IPV4_HASH_OPT;
356 	case AH_V6_FLOW:
357 	case ESP_V6_FLOW:
358 	case AH_ESP_V6_FLOW:
359 	case SCTP_V6_FLOW:
360 	case IPV6_FLOW:
361 	case IPV6_USER_FLOW:
362 		return FBNIC_IPV6_HASH_OPT;
363 	case ETHER_FLOW:
364 		return FBNIC_ETHER_HASH_OPT;
365 	}
366 
367 	return -1;
368 }
369 
370 static int
fbnic_get_rss_hash_opts(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd)371 fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
372 {
373 	int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
374 
375 	if (hash_opt_idx < 0)
376 		return -EINVAL;
377 
378 	/* Report options from rss_en table in fbn */
379 	cmd->data = fbn->rss_flow_hash[hash_opt_idx];
380 
381 	return 0;
382 }
383 
fbnic_get_cls_rule_all(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd,u32 * rule_locs)384 static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
385 				  struct ethtool_rxnfc *cmd,
386 				  u32 *rule_locs)
387 {
388 	struct fbnic_dev *fbd = fbn->fbd;
389 	int i, cnt = 0;
390 
391 	/* Report maximum rule count */
392 	cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
393 
394 	for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
395 		int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
396 		struct fbnic_act_tcam *act_tcam;
397 
398 		act_tcam = &fbd->act_tcam[idx];
399 		if (act_tcam->state != FBNIC_TCAM_S_VALID)
400 			continue;
401 
402 		if (rule_locs) {
403 			if (cnt == cmd->rule_cnt)
404 				return -EMSGSIZE;
405 
406 			rule_locs[cnt] = i;
407 		}
408 
409 		cnt++;
410 	}
411 
412 	return cnt;
413 }
414 
fbnic_get_cls_rule(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd)415 static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
416 {
417 	struct ethtool_rx_flow_spec *fsp;
418 	struct fbnic_dev *fbd = fbn->fbd;
419 	struct fbnic_act_tcam *act_tcam;
420 	int idx;
421 
422 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
423 
424 	if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
425 		return -EINVAL;
426 
427 	idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
428 	act_tcam = &fbd->act_tcam[idx];
429 
430 	if (act_tcam->state != FBNIC_TCAM_S_VALID)
431 		return -EINVAL;
432 
433 	/* Report maximum rule count */
434 	cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
435 
436 	/* Set flow type field */
437 	if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
438 		fsp->flow_type = ETHER_FLOW;
439 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
440 			       act_tcam->mask.tcam[1])) {
441 			struct fbnic_mac_addr *mac_addr;
442 
443 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
444 					act_tcam->value.tcam[1]);
445 			mac_addr = &fbd->mac_addr[idx];
446 
447 			ether_addr_copy(fsp->h_u.ether_spec.h_dest,
448 					mac_addr->value.addr8);
449 			eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
450 		}
451 	} else if (act_tcam->value.tcam[1] &
452 		   FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
453 		fsp->flow_type = IPV6_USER_FLOW;
454 		fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
455 		fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
456 
457 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
458 			       act_tcam->mask.tcam[0])) {
459 			struct fbnic_ip_addr *ip_addr;
460 			int i;
461 
462 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
463 					act_tcam->value.tcam[0]);
464 			ip_addr = &fbd->ipo_src[idx];
465 
466 			for (i = 0; i < 4; i++) {
467 				fsp->h_u.usr_ip6_spec.ip6src[i] =
468 					ip_addr->value.s6_addr32[i];
469 				fsp->m_u.usr_ip6_spec.ip6src[i] =
470 					~ip_addr->mask.s6_addr32[i];
471 			}
472 		}
473 
474 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
475 			       act_tcam->mask.tcam[0])) {
476 			struct fbnic_ip_addr *ip_addr;
477 			int i;
478 
479 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
480 					act_tcam->value.tcam[0]);
481 			ip_addr = &fbd->ipo_dst[idx];
482 
483 			for (i = 0; i < 4; i++) {
484 				fsp->h_u.usr_ip6_spec.ip6dst[i] =
485 					ip_addr->value.s6_addr32[i];
486 				fsp->m_u.usr_ip6_spec.ip6dst[i] =
487 					~ip_addr->mask.s6_addr32[i];
488 			}
489 		}
490 	} else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
491 		if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
492 			if (act_tcam->value.tcam[1] &
493 			    FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
494 				fsp->flow_type = UDP_V6_FLOW;
495 			else
496 				fsp->flow_type = TCP_V6_FLOW;
497 			fsp->h_u.tcp_ip6_spec.psrc =
498 				cpu_to_be16(act_tcam->value.tcam[3]);
499 			fsp->m_u.tcp_ip6_spec.psrc =
500 				cpu_to_be16(~act_tcam->mask.tcam[3]);
501 			fsp->h_u.tcp_ip6_spec.pdst =
502 				cpu_to_be16(act_tcam->value.tcam[4]);
503 			fsp->m_u.tcp_ip6_spec.pdst =
504 				cpu_to_be16(~act_tcam->mask.tcam[4]);
505 		} else {
506 			fsp->flow_type = IPV6_USER_FLOW;
507 		}
508 
509 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
510 			       act_tcam->mask.tcam[0])) {
511 			struct fbnic_ip_addr *ip_addr;
512 			int i;
513 
514 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
515 					act_tcam->value.tcam[0]);
516 			ip_addr = &fbd->ip_src[idx];
517 
518 			for (i = 0; i < 4; i++) {
519 				fsp->h_u.usr_ip6_spec.ip6src[i] =
520 					ip_addr->value.s6_addr32[i];
521 				fsp->m_u.usr_ip6_spec.ip6src[i] =
522 					~ip_addr->mask.s6_addr32[i];
523 			}
524 		}
525 
526 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
527 			       act_tcam->mask.tcam[0])) {
528 			struct fbnic_ip_addr *ip_addr;
529 			int i;
530 
531 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
532 					act_tcam->value.tcam[0]);
533 			ip_addr = &fbd->ip_dst[idx];
534 
535 			for (i = 0; i < 4; i++) {
536 				fsp->h_u.usr_ip6_spec.ip6dst[i] =
537 					ip_addr->value.s6_addr32[i];
538 				fsp->m_u.usr_ip6_spec.ip6dst[i] =
539 					~ip_addr->mask.s6_addr32[i];
540 			}
541 		}
542 	} else {
543 		if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
544 			if (act_tcam->value.tcam[1] &
545 			    FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
546 				fsp->flow_type = UDP_V4_FLOW;
547 			else
548 				fsp->flow_type = TCP_V4_FLOW;
549 			fsp->h_u.tcp_ip4_spec.psrc =
550 				cpu_to_be16(act_tcam->value.tcam[3]);
551 			fsp->m_u.tcp_ip4_spec.psrc =
552 				cpu_to_be16(~act_tcam->mask.tcam[3]);
553 			fsp->h_u.tcp_ip4_spec.pdst =
554 				cpu_to_be16(act_tcam->value.tcam[4]);
555 			fsp->m_u.tcp_ip4_spec.pdst =
556 				cpu_to_be16(~act_tcam->mask.tcam[4]);
557 		} else {
558 			fsp->flow_type = IPV4_USER_FLOW;
559 			fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
560 		}
561 
562 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
563 			       act_tcam->mask.tcam[0])) {
564 			struct fbnic_ip_addr *ip_addr;
565 
566 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
567 					act_tcam->value.tcam[0]);
568 			ip_addr = &fbd->ip_src[idx];
569 
570 			fsp->h_u.usr_ip4_spec.ip4src =
571 				ip_addr->value.s6_addr32[3];
572 			fsp->m_u.usr_ip4_spec.ip4src =
573 				~ip_addr->mask.s6_addr32[3];
574 		}
575 
576 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
577 			       act_tcam->mask.tcam[0])) {
578 			struct fbnic_ip_addr *ip_addr;
579 
580 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
581 					act_tcam->value.tcam[0]);
582 			ip_addr = &fbd->ip_dst[idx];
583 
584 			fsp->h_u.usr_ip4_spec.ip4dst =
585 				ip_addr->value.s6_addr32[3];
586 			fsp->m_u.usr_ip4_spec.ip4dst =
587 				~ip_addr->mask.s6_addr32[3];
588 		}
589 	}
590 
591 	/* Record action */
592 	if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
593 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
594 	else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
595 		fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
596 					     act_tcam->dest);
597 	else
598 		fsp->flow_type |= FLOW_RSS;
599 
600 	cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
601 				     act_tcam->dest);
602 
603 	return 0;
604 }
605 
fbnic_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)606 static int fbnic_get_rxnfc(struct net_device *netdev,
607 			   struct ethtool_rxnfc *cmd, u32 *rule_locs)
608 {
609 	struct fbnic_net *fbn = netdev_priv(netdev);
610 	int ret = -EOPNOTSUPP;
611 	u32 special = 0;
612 
613 	switch (cmd->cmd) {
614 	case ETHTOOL_GRXRINGS:
615 		cmd->data = fbn->num_rx_queues;
616 		ret = 0;
617 		break;
618 	case ETHTOOL_GRXFH:
619 		ret = fbnic_get_rss_hash_opts(fbn, cmd);
620 		break;
621 	case ETHTOOL_GRXCLSRULE:
622 		ret = fbnic_get_cls_rule(fbn, cmd);
623 		break;
624 	case ETHTOOL_GRXCLSRLCNT:
625 		rule_locs = NULL;
626 		special = RX_CLS_LOC_SPECIAL;
627 		fallthrough;
628 	case ETHTOOL_GRXCLSRLALL:
629 		ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
630 		if (ret < 0)
631 			break;
632 
633 		cmd->data |= special;
634 		cmd->rule_cnt = ret;
635 		ret = 0;
636 		break;
637 	}
638 
639 	return ret;
640 }
641 
642 #define FBNIC_L2_HASH_OPTIONS \
643 	(RXH_L2DA | RXH_DISCARD)
644 #define FBNIC_L3_HASH_OPTIONS \
645 	(FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
646 #define FBNIC_L4_HASH_OPTIONS \
647 	(FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
648 
649 static int
fbnic_set_rss_hash_opts(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)650 fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
651 {
652 	int hash_opt_idx;
653 
654 	/* Verify the type requested is correct */
655 	hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
656 	if (hash_opt_idx < 0)
657 		return -EINVAL;
658 
659 	/* Verify the fields asked for can actually be assigned based on type */
660 	if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
661 	    (hash_opt_idx > FBNIC_L4_HASH_OPT &&
662 	     cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
663 	    (hash_opt_idx > FBNIC_IP_HASH_OPT &&
664 	     cmd->data & ~FBNIC_L2_HASH_OPTIONS))
665 		return -EINVAL;
666 
667 	fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
668 
669 	if (netif_running(fbn->netdev)) {
670 		fbnic_rss_reinit(fbn->fbd, fbn);
671 		fbnic_write_rules(fbn->fbd);
672 	}
673 
674 	return 0;
675 }
676 
fbnic_cls_rule_any_loc(struct fbnic_dev * fbd)677 static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
678 {
679 	int i;
680 
681 	for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
682 		int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
683 
684 		if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
685 			return i;
686 	}
687 
688 	return -ENOSPC;
689 }
690 
fbnic_set_cls_rule_ins(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)691 static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
692 				  const struct ethtool_rxnfc *cmd)
693 {
694 	u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
695 	u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
696 	u16 misc = 0, misc_mask = ~0;
697 	u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
698 			      FBNIC_RPC_ACT_TBL0_DEST_HOST);
699 	struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
700 	struct fbnic_mac_addr *mac_addr = NULL;
701 	struct ethtool_rx_flow_spec *fsp;
702 	struct fbnic_dev *fbd = fbn->fbd;
703 	struct fbnic_act_tcam *act_tcam;
704 	struct in6_addr *addr6, *mask6;
705 	struct in_addr *addr4, *mask4;
706 	int hash_idx, location;
707 	u32 flow_type;
708 	int idx, j;
709 
710 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
711 
712 	if (fsp->location != RX_CLS_LOC_ANY)
713 		return -EINVAL;
714 	location = fbnic_cls_rule_any_loc(fbd);
715 	if (location < 0)
716 		return location;
717 
718 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
719 		dest = FBNIC_RPC_ACT_TBL0_DROP;
720 	} else if (fsp->flow_type & FLOW_RSS) {
721 		if (cmd->rss_context == 1)
722 			dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
723 	} else {
724 		u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
725 
726 		if (ring_idx >= fbn->num_rx_queues)
727 			return -EINVAL;
728 
729 		dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
730 			FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
731 	}
732 
733 	idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
734 	act_tcam = &fbd->act_tcam[idx];
735 
736 	/* Do not allow overwriting for now.
737 	 * To support overwriting rules we will need to add logic to free
738 	 * any IP or MACDA TCAMs that may be associated with the old rule.
739 	 */
740 	if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
741 		return -EBUSY;
742 
743 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
744 	hash_idx = fbnic_get_rss_hash_idx(flow_type);
745 
746 	switch (flow_type) {
747 	case UDP_V4_FLOW:
748 udp4_flow:
749 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
750 		fallthrough;
751 	case TCP_V4_FLOW:
752 tcp4_flow:
753 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
754 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
755 			       FBNIC_RPC_TCAM_ACT1_L4_VALID);
756 
757 		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
758 		sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
759 		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
760 		dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
761 		goto ip4_flow;
762 	case IP_USER_FLOW:
763 		if (!fsp->m_u.usr_ip4_spec.proto)
764 			goto ip4_flow;
765 		if (fsp->m_u.usr_ip4_spec.proto != 0xff)
766 			return -EINVAL;
767 		if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
768 			goto udp4_flow;
769 		if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
770 			goto tcp4_flow;
771 		return -EINVAL;
772 ip4_flow:
773 		addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
774 		mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
775 		if (mask4->s_addr) {
776 			ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
777 						  addr4, mask4);
778 			if (!ip_src)
779 				return -ENOSPC;
780 
781 			set_bit(idx, ip_src->act_tcam);
782 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
783 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
784 					       ip_src - fbd->ip_src);
785 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
786 				     FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
787 		}
788 
789 		addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
790 		mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
791 		if (mask4->s_addr) {
792 			ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
793 						  addr4, mask4);
794 			if (!ip_dst) {
795 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
796 					memset(ip_src, 0, sizeof(*ip_src));
797 				return -ENOSPC;
798 			}
799 
800 			set_bit(idx, ip_dst->act_tcam);
801 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
802 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
803 					       ip_dst - fbd->ip_dst);
804 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
805 				     FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
806 		}
807 		flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
808 			      FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
809 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
810 			       FBNIC_RPC_TCAM_ACT1_IP_VALID |
811 			       FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
812 		break;
813 	case UDP_V6_FLOW:
814 udp6_flow:
815 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
816 		fallthrough;
817 	case TCP_V6_FLOW:
818 tcp6_flow:
819 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
820 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
821 			  FBNIC_RPC_TCAM_ACT1_L4_VALID);
822 
823 		sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
824 		sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
825 		dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
826 		dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
827 		goto ipv6_flow;
828 	case IPV6_USER_FLOW:
829 		if (!fsp->m_u.usr_ip6_spec.l4_proto)
830 			goto ipv6_flow;
831 
832 		if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
833 			return -EINVAL;
834 		if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
835 			goto udp6_flow;
836 		if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
837 			goto tcp6_flow;
838 		if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
839 			return -EINVAL;
840 
841 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
842 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
843 		if (!ipv6_addr_any(mask6)) {
844 			ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
845 						  addr6, mask6);
846 			if (!ip_src)
847 				return -ENOSPC;
848 
849 			set_bit(idx, ip_src->act_tcam);
850 			ip_value |=
851 				FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
852 				FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
853 					   ip_src - fbd->ipo_src);
854 			ip_mask &=
855 				~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
856 				  FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
857 		}
858 
859 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
860 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
861 		if (!ipv6_addr_any(mask6)) {
862 			ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
863 						  addr6, mask6);
864 			if (!ip_dst) {
865 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
866 					memset(ip_src, 0, sizeof(*ip_src));
867 				return -ENOSPC;
868 			}
869 
870 			set_bit(idx, ip_dst->act_tcam);
871 			ip_value |=
872 				FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
873 				FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
874 					   ip_dst - fbd->ipo_dst);
875 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
876 				     FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
877 		}
878 
879 		flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
880 		flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
881 ipv6_flow:
882 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
883 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
884 		if (!ip_src && !ipv6_addr_any(mask6)) {
885 			ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
886 						  addr6, mask6);
887 			if (!ip_src)
888 				return -ENOSPC;
889 
890 			set_bit(idx, ip_src->act_tcam);
891 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
892 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
893 					       ip_src - fbd->ip_src);
894 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
895 				       FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
896 		}
897 
898 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
899 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
900 		if (!ip_dst && !ipv6_addr_any(mask6)) {
901 			ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
902 						  addr6, mask6);
903 			if (!ip_dst) {
904 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
905 					memset(ip_src, 0, sizeof(*ip_src));
906 				return -ENOSPC;
907 			}
908 
909 			set_bit(idx, ip_dst->act_tcam);
910 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
911 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
912 					       ip_dst - fbd->ip_dst);
913 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
914 				       FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
915 		}
916 
917 		flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
918 			      FBNIC_RPC_TCAM_ACT1_IP_VALID |
919 			      FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
920 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
921 			       FBNIC_RPC_TCAM_ACT1_IP_VALID |
922 			       FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
923 		break;
924 	case ETHER_FLOW:
925 		if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
926 			u8 *addr = fsp->h_u.ether_spec.h_dest;
927 			u8 *mask = fsp->m_u.ether_spec.h_dest;
928 
929 			/* Do not allow MAC addr of 0 */
930 			if (is_zero_ether_addr(addr))
931 				return -EINVAL;
932 
933 			/* Only support full MAC address to avoid
934 			 * conflicts with other MAC addresses.
935 			 */
936 			if (!is_broadcast_ether_addr(mask))
937 				return -EINVAL;
938 
939 			if (is_multicast_ether_addr(addr))
940 				mac_addr = __fbnic_mc_sync(fbd, addr);
941 			else
942 				mac_addr = __fbnic_uc_sync(fbd, addr);
943 
944 			if (!mac_addr)
945 				return -ENOSPC;
946 
947 			set_bit(idx, mac_addr->act_tcam);
948 			flow_value |=
949 				FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
950 					   mac_addr - fbd->mac_addr);
951 			flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
952 		}
953 
954 		flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
955 		flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
956 		break;
957 	default:
958 		return -EINVAL;
959 	}
960 
961 	/* Write action table values */
962 	act_tcam->dest = dest;
963 	act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
964 
965 	/* Write IP Match value/mask to action_tcam[0] */
966 	act_tcam->value.tcam[0] = ip_value;
967 	act_tcam->mask.tcam[0] = ip_mask;
968 
969 	/* Write flow type value/mask to action_tcam[1] */
970 	act_tcam->value.tcam[1] = flow_value;
971 	act_tcam->mask.tcam[1] = flow_mask;
972 
973 	/* Write error, DSCP, extra L4 matches to action_tcam[2] */
974 	act_tcam->value.tcam[2] = misc;
975 	act_tcam->mask.tcam[2] = misc_mask;
976 
977 	/* Write source/destination port values */
978 	act_tcam->value.tcam[3] = sport;
979 	act_tcam->mask.tcam[3] = sport_mask;
980 	act_tcam->value.tcam[4] = dport;
981 	act_tcam->mask.tcam[4] = dport_mask;
982 
983 	for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
984 		act_tcam->mask.tcam[j] = 0xffff;
985 
986 	act_tcam->state = FBNIC_TCAM_S_UPDATE;
987 	fsp->location = location;
988 
989 	if (netif_running(fbn->netdev)) {
990 		fbnic_write_rules(fbd);
991 		if (ip_src || ip_dst)
992 			fbnic_write_ip_addr(fbd);
993 		if (mac_addr)
994 			fbnic_write_macda(fbd);
995 	}
996 
997 	return 0;
998 }
999 
fbnic_clear_nfc_macda(struct fbnic_net * fbn,unsigned int tcam_idx)1000 static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
1001 				  unsigned int tcam_idx)
1002 {
1003 	struct fbnic_dev *fbd = fbn->fbd;
1004 	int idx;
1005 
1006 	for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
1007 		__fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
1008 
1009 	/* Write updates to hardware */
1010 	if (netif_running(fbn->netdev))
1011 		fbnic_write_macda(fbd);
1012 }
1013 
fbnic_clear_nfc_ip_addr(struct fbnic_net * fbn,unsigned int tcam_idx)1014 static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
1015 				    unsigned int tcam_idx)
1016 {
1017 	struct fbnic_dev *fbd = fbn->fbd;
1018 	int idx;
1019 
1020 	for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
1021 		__fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
1022 	for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
1023 		__fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
1024 	for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
1025 		__fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
1026 	for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
1027 		__fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
1028 
1029 	/* Write updates to hardware */
1030 	if (netif_running(fbn->netdev))
1031 		fbnic_write_ip_addr(fbd);
1032 }
1033 
fbnic_set_cls_rule_del(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)1034 static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
1035 				  const struct ethtool_rxnfc *cmd)
1036 {
1037 	struct ethtool_rx_flow_spec *fsp;
1038 	struct fbnic_dev *fbd = fbn->fbd;
1039 	struct fbnic_act_tcam *act_tcam;
1040 	int idx;
1041 
1042 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1043 
1044 	if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
1045 		return -EINVAL;
1046 
1047 	idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
1048 	act_tcam = &fbd->act_tcam[idx];
1049 
1050 	if (act_tcam->state != FBNIC_TCAM_S_VALID)
1051 		return -EINVAL;
1052 
1053 	act_tcam->state = FBNIC_TCAM_S_DELETE;
1054 
1055 	if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
1056 	    (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
1057 		fbnic_clear_nfc_macda(fbn, idx);
1058 
1059 	if ((act_tcam->value.tcam[0] &
1060 	     (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1061 	      FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1062 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1063 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
1064 	    (~act_tcam->mask.tcam[0] &
1065 	     (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
1066 	      FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
1067 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
1068 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
1069 		fbnic_clear_nfc_ip_addr(fbn, idx);
1070 
1071 	if (netif_running(fbn->netdev))
1072 		fbnic_write_rules(fbd);
1073 
1074 	return 0;
1075 }
1076 
fbnic_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1077 static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1078 {
1079 	struct fbnic_net *fbn = netdev_priv(netdev);
1080 	int ret = -EOPNOTSUPP;
1081 
1082 	switch (cmd->cmd) {
1083 	case ETHTOOL_SRXFH:
1084 		ret = fbnic_set_rss_hash_opts(fbn, cmd);
1085 		break;
1086 	case ETHTOOL_SRXCLSRLINS:
1087 		ret = fbnic_set_cls_rule_ins(fbn, cmd);
1088 		break;
1089 	case ETHTOOL_SRXCLSRLDEL:
1090 		ret = fbnic_set_cls_rule_del(fbn, cmd);
1091 		break;
1092 	}
1093 
1094 	return ret;
1095 }
1096 
fbnic_get_rxfh_key_size(struct net_device * netdev)1097 static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
1098 {
1099 	return FBNIC_RPC_RSS_KEY_BYTE_LEN;
1100 }
1101 
fbnic_get_rxfh_indir_size(struct net_device * netdev)1102 static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
1103 {
1104 	return FBNIC_RPC_RSS_TBL_SIZE;
1105 }
1106 
1107 static int
fbnic_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)1108 fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
1109 {
1110 	struct fbnic_net *fbn = netdev_priv(netdev);
1111 	unsigned int i;
1112 
1113 	rxfh->hfunc = ETH_RSS_HASH_TOP;
1114 
1115 	if (rxfh->key) {
1116 		for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
1117 			u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
1118 
1119 			rxfh->key[i] = rss_key >> 24;
1120 		}
1121 	}
1122 
1123 	if (rxfh->indir) {
1124 		for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1125 			rxfh->indir[i] = fbn->indir_tbl[0][i];
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 static unsigned int
fbnic_set_indir(struct fbnic_net * fbn,unsigned int idx,const u32 * indir)1132 fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
1133 {
1134 	unsigned int i, changes = 0;
1135 
1136 	for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
1137 		if (fbn->indir_tbl[idx][i] == indir[i])
1138 			continue;
1139 
1140 		fbn->indir_tbl[idx][i] = indir[i];
1141 		changes++;
1142 	}
1143 
1144 	return changes;
1145 }
1146 
1147 static int
fbnic_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1148 fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
1149 	       struct netlink_ext_ack *extack)
1150 {
1151 	struct fbnic_net *fbn = netdev_priv(netdev);
1152 	unsigned int i, changes = 0;
1153 
1154 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1155 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
1156 		return -EINVAL;
1157 
1158 	if (rxfh->key) {
1159 		u32 rss_key = 0;
1160 
1161 		for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
1162 			rss_key >>= 8;
1163 			rss_key |= (u32)(rxfh->key[i]) << 24;
1164 
1165 			if (i % 4)
1166 				continue;
1167 
1168 			if (fbn->rss_key[i / 4] == rss_key)
1169 				continue;
1170 
1171 			fbn->rss_key[i / 4] = rss_key;
1172 			changes++;
1173 		}
1174 	}
1175 
1176 	if (rxfh->indir)
1177 		changes += fbnic_set_indir(fbn, 0, rxfh->indir);
1178 
1179 	if (changes && netif_running(netdev))
1180 		fbnic_rss_reinit_hw(fbn->fbd, fbn);
1181 
1182 	return 0;
1183 }
1184 
1185 static int
fbnic_modify_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1186 fbnic_modify_rxfh_context(struct net_device *netdev,
1187 			  struct ethtool_rxfh_context *ctx,
1188 			  const struct ethtool_rxfh_param *rxfh,
1189 			  struct netlink_ext_ack *extack)
1190 {
1191 	struct fbnic_net *fbn = netdev_priv(netdev);
1192 	const u32 *indir = rxfh->indir;
1193 	unsigned int changes;
1194 
1195 	if (!indir)
1196 		indir = ethtool_rxfh_context_indir(ctx);
1197 
1198 	changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
1199 	if (changes && netif_running(netdev))
1200 		fbnic_rss_reinit_hw(fbn->fbd, fbn);
1201 
1202 	return 0;
1203 }
1204 
1205 static int
fbnic_create_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1206 fbnic_create_rxfh_context(struct net_device *netdev,
1207 			  struct ethtool_rxfh_context *ctx,
1208 			  const struct ethtool_rxfh_param *rxfh,
1209 			  struct netlink_ext_ack *extack)
1210 {
1211 	struct fbnic_net *fbn = netdev_priv(netdev);
1212 
1213 	if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1214 		NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1215 		return -EOPNOTSUPP;
1216 	}
1217 	ctx->hfunc = ETH_RSS_HASH_TOP;
1218 
1219 	if (!rxfh->indir) {
1220 		u32 *indir = ethtool_rxfh_context_indir(ctx);
1221 		unsigned int num_rx = fbn->num_rx_queues;
1222 		unsigned int i;
1223 
1224 		for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1225 			indir[i] = ethtool_rxfh_indir_default(i, num_rx);
1226 	}
1227 
1228 	return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
1229 }
1230 
1231 static int
fbnic_remove_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,u32 rss_context,struct netlink_ext_ack * extack)1232 fbnic_remove_rxfh_context(struct net_device *netdev,
1233 			  struct ethtool_rxfh_context *ctx, u32 rss_context,
1234 			  struct netlink_ext_ack *extack)
1235 {
1236 	/* Nothing to do, contexts are allocated statically */
1237 	return 0;
1238 }
1239 
fbnic_get_channels(struct net_device * netdev,struct ethtool_channels * ch)1240 static void fbnic_get_channels(struct net_device *netdev,
1241 			       struct ethtool_channels *ch)
1242 {
1243 	struct fbnic_net *fbn = netdev_priv(netdev);
1244 	struct fbnic_dev *fbd = fbn->fbd;
1245 
1246 	ch->max_rx = fbd->max_num_queues;
1247 	ch->max_tx = fbd->max_num_queues;
1248 	ch->max_combined = min(ch->max_rx, ch->max_tx);
1249 	ch->max_other =	FBNIC_NON_NAPI_VECTORS;
1250 
1251 	if (fbn->num_rx_queues > fbn->num_napi ||
1252 	    fbn->num_tx_queues > fbn->num_napi)
1253 		ch->combined_count = min(fbn->num_rx_queues,
1254 					 fbn->num_tx_queues);
1255 	else
1256 		ch->combined_count =
1257 			fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
1258 	ch->rx_count = fbn->num_rx_queues - ch->combined_count;
1259 	ch->tx_count = fbn->num_tx_queues - ch->combined_count;
1260 	ch->other_count = FBNIC_NON_NAPI_VECTORS;
1261 }
1262 
fbnic_set_queues(struct fbnic_net * fbn,struct ethtool_channels * ch,unsigned int max_napis)1263 static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
1264 			     unsigned int max_napis)
1265 {
1266 	fbn->num_rx_queues = ch->rx_count + ch->combined_count;
1267 	fbn->num_tx_queues = ch->tx_count + ch->combined_count;
1268 	fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
1269 			    max_napis);
1270 }
1271 
fbnic_set_channels(struct net_device * netdev,struct ethtool_channels * ch)1272 static int fbnic_set_channels(struct net_device *netdev,
1273 			      struct ethtool_channels *ch)
1274 {
1275 	struct fbnic_net *fbn = netdev_priv(netdev);
1276 	unsigned int max_napis, standalone;
1277 	struct fbnic_dev *fbd = fbn->fbd;
1278 	struct fbnic_net *clone;
1279 	int err;
1280 
1281 	max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
1282 	standalone = ch->rx_count + ch->tx_count;
1283 
1284 	/* Limits for standalone queues:
1285 	 *  - each queue has it's own NAPI (num_napi >= rx + tx + combined)
1286 	 *  - combining queues (combined not 0, rx or tx must be 0)
1287 	 */
1288 	if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
1289 	    (standalone && standalone + ch->combined_count > max_napis) ||
1290 	    ch->rx_count + ch->combined_count > fbd->max_num_queues ||
1291 	    ch->tx_count + ch->combined_count > fbd->max_num_queues ||
1292 	    ch->other_count != FBNIC_NON_NAPI_VECTORS)
1293 		return -EINVAL;
1294 
1295 	if (!netif_running(netdev)) {
1296 		fbnic_set_queues(fbn, ch, max_napis);
1297 		fbnic_reset_indir_tbl(fbn);
1298 		return 0;
1299 	}
1300 
1301 	clone = fbnic_clone_create(fbn);
1302 	if (!clone)
1303 		return -ENOMEM;
1304 
1305 	fbnic_set_queues(clone, ch, max_napis);
1306 
1307 	err = fbnic_alloc_napi_vectors(clone);
1308 	if (err)
1309 		goto err_free_clone;
1310 
1311 	err = fbnic_alloc_resources(clone);
1312 	if (err)
1313 		goto err_free_napis;
1314 
1315 	fbnic_down_noidle(fbn);
1316 	err = fbnic_wait_all_queues_idle(fbn->fbd, true);
1317 	if (err)
1318 		goto err_start_stack;
1319 
1320 	err = fbnic_set_netif_queues(clone);
1321 	if (err)
1322 		goto err_start_stack;
1323 
1324 	/* Nothing can fail past this point */
1325 	fbnic_flush(fbn);
1326 
1327 	fbnic_clone_swap(fbn, clone);
1328 
1329 	/* Reset RSS indirection table */
1330 	fbnic_reset_indir_tbl(fbn);
1331 
1332 	fbnic_up(fbn);
1333 
1334 	fbnic_free_resources(clone);
1335 	fbnic_free_napi_vectors(clone);
1336 	fbnic_clone_free(clone);
1337 
1338 	return 0;
1339 
1340 err_start_stack:
1341 	fbnic_flush(fbn);
1342 	fbnic_up(fbn);
1343 	fbnic_free_resources(clone);
1344 err_free_napis:
1345 	fbnic_free_napi_vectors(clone);
1346 err_free_clone:
1347 	fbnic_clone_free(clone);
1348 	return err;
1349 }
1350 
1351 static int
fbnic_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * tsinfo)1352 fbnic_get_ts_info(struct net_device *netdev,
1353 		  struct kernel_ethtool_ts_info *tsinfo)
1354 {
1355 	struct fbnic_net *fbn = netdev_priv(netdev);
1356 
1357 	tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
1358 
1359 	tsinfo->so_timestamping =
1360 		SOF_TIMESTAMPING_TX_SOFTWARE |
1361 		SOF_TIMESTAMPING_TX_HARDWARE |
1362 		SOF_TIMESTAMPING_RX_HARDWARE |
1363 		SOF_TIMESTAMPING_RAW_HARDWARE;
1364 
1365 	tsinfo->tx_types =
1366 		BIT(HWTSTAMP_TX_OFF) |
1367 		BIT(HWTSTAMP_TX_ON);
1368 
1369 	tsinfo->rx_filters =
1370 		BIT(HWTSTAMP_FILTER_NONE) |
1371 		BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1372 		BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1373 		BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1374 		BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1375 		BIT(HWTSTAMP_FILTER_ALL);
1376 
1377 	return 0;
1378 }
1379 
fbnic_get_ts_stats(struct net_device * netdev,struct ethtool_ts_stats * ts_stats)1380 static void fbnic_get_ts_stats(struct net_device *netdev,
1381 			       struct ethtool_ts_stats *ts_stats)
1382 {
1383 	struct fbnic_net *fbn = netdev_priv(netdev);
1384 	u64 ts_packets, ts_lost;
1385 	struct fbnic_ring *ring;
1386 	unsigned int start;
1387 	int i;
1388 
1389 	ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
1390 	ts_stats->lost = fbn->tx_stats.twq.ts_lost;
1391 	for (i = 0; i < fbn->num_tx_queues; i++) {
1392 		ring = fbn->tx[i];
1393 		do {
1394 			start = u64_stats_fetch_begin(&ring->stats.syncp);
1395 			ts_packets = ring->stats.twq.ts_packets;
1396 			ts_lost = ring->stats.twq.ts_lost;
1397 		} while (u64_stats_fetch_retry(&ring->stats.syncp, start));
1398 		ts_stats->pkts += ts_packets;
1399 		ts_stats->lost += ts_lost;
1400 	}
1401 }
1402 
fbnic_set_counter(u64 * stat,struct fbnic_stat_counter * counter)1403 static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
1404 {
1405 	if (counter->reported)
1406 		*stat = counter->value;
1407 }
1408 
1409 static void
fbnic_get_eth_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * eth_mac_stats)1410 fbnic_get_eth_mac_stats(struct net_device *netdev,
1411 			struct ethtool_eth_mac_stats *eth_mac_stats)
1412 {
1413 	struct fbnic_net *fbn = netdev_priv(netdev);
1414 	struct fbnic_mac_stats *mac_stats;
1415 	struct fbnic_dev *fbd = fbn->fbd;
1416 	const struct fbnic_mac *mac;
1417 
1418 	mac_stats = &fbd->hw_stats.mac;
1419 	mac = fbd->mac;
1420 
1421 	mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
1422 
1423 	fbnic_set_counter(&eth_mac_stats->FramesTransmittedOK,
1424 			  &mac_stats->eth_mac.FramesTransmittedOK);
1425 	fbnic_set_counter(&eth_mac_stats->FramesReceivedOK,
1426 			  &mac_stats->eth_mac.FramesReceivedOK);
1427 	fbnic_set_counter(&eth_mac_stats->FrameCheckSequenceErrors,
1428 			  &mac_stats->eth_mac.FrameCheckSequenceErrors);
1429 	fbnic_set_counter(&eth_mac_stats->AlignmentErrors,
1430 			  &mac_stats->eth_mac.AlignmentErrors);
1431 	fbnic_set_counter(&eth_mac_stats->OctetsTransmittedOK,
1432 			  &mac_stats->eth_mac.OctetsTransmittedOK);
1433 	fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACXmitError,
1434 			  &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
1435 	fbnic_set_counter(&eth_mac_stats->OctetsReceivedOK,
1436 			  &mac_stats->eth_mac.OctetsReceivedOK);
1437 	fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACRcvError,
1438 			  &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
1439 	fbnic_set_counter(&eth_mac_stats->MulticastFramesXmittedOK,
1440 			  &mac_stats->eth_mac.MulticastFramesXmittedOK);
1441 	fbnic_set_counter(&eth_mac_stats->BroadcastFramesXmittedOK,
1442 			  &mac_stats->eth_mac.BroadcastFramesXmittedOK);
1443 	fbnic_set_counter(&eth_mac_stats->MulticastFramesReceivedOK,
1444 			  &mac_stats->eth_mac.MulticastFramesReceivedOK);
1445 	fbnic_set_counter(&eth_mac_stats->BroadcastFramesReceivedOK,
1446 			  &mac_stats->eth_mac.BroadcastFramesReceivedOK);
1447 	fbnic_set_counter(&eth_mac_stats->FrameTooLongErrors,
1448 			  &mac_stats->eth_mac.FrameTooLongErrors);
1449 }
1450 
1451 static const struct ethtool_ops fbnic_ethtool_ops = {
1452 	.supported_coalesce_params	=
1453 				  ETHTOOL_COALESCE_USECS |
1454 				  ETHTOOL_COALESCE_RX_MAX_FRAMES,
1455 	.rxfh_max_num_contexts	= FBNIC_RPC_RSS_TBL_COUNT,
1456 	.get_drvinfo		= fbnic_get_drvinfo,
1457 	.get_regs_len		= fbnic_get_regs_len,
1458 	.get_regs		= fbnic_get_regs,
1459 	.get_coalesce		= fbnic_get_coalesce,
1460 	.set_coalesce		= fbnic_set_coalesce,
1461 	.get_ringparam		= fbnic_get_ringparam,
1462 	.set_ringparam		= fbnic_set_ringparam,
1463 	.get_strings		= fbnic_get_strings,
1464 	.get_ethtool_stats	= fbnic_get_ethtool_stats,
1465 	.get_sset_count		= fbnic_get_sset_count,
1466 	.get_rxnfc		= fbnic_get_rxnfc,
1467 	.set_rxnfc		= fbnic_set_rxnfc,
1468 	.get_rxfh_key_size	= fbnic_get_rxfh_key_size,
1469 	.get_rxfh_indir_size	= fbnic_get_rxfh_indir_size,
1470 	.get_rxfh		= fbnic_get_rxfh,
1471 	.set_rxfh		= fbnic_set_rxfh,
1472 	.create_rxfh_context	= fbnic_create_rxfh_context,
1473 	.modify_rxfh_context	= fbnic_modify_rxfh_context,
1474 	.remove_rxfh_context	= fbnic_remove_rxfh_context,
1475 	.get_channels		= fbnic_get_channels,
1476 	.set_channels		= fbnic_set_channels,
1477 	.get_ts_info		= fbnic_get_ts_info,
1478 	.get_ts_stats		= fbnic_get_ts_stats,
1479 	.get_eth_mac_stats	= fbnic_get_eth_mac_stats,
1480 };
1481 
fbnic_set_ethtool_ops(struct net_device * dev)1482 void fbnic_set_ethtool_ops(struct net_device *dev)
1483 {
1484 	dev->ethtool_ops = &fbnic_ethtool_ops;
1485 }
1486