1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/ethtool.h>
5 #include <linux/netdevice.h>
6 #include <linux/pci.h>
7 #include <net/ipv6.h>
8 
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_tlv.h"
12 
13 struct fbnic_stat {
14 	u8 string[ETH_GSTRING_LEN];
15 	unsigned int size;
16 	unsigned int offset;
17 };
18 
19 #define FBNIC_STAT_FIELDS(type, name, stat) { \
20 	.string = name, \
21 	.size = sizeof_field(struct type, stat), \
22 	.offset = offsetof(struct type, stat), \
23 }
24 
25 /* Hardware statistics not captured in rtnl_link_stats */
26 #define FBNIC_HW_STAT(name, stat) \
27 	FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
28 
29 static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
30 	/* TTI */
31 	FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
32 	FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
33 	FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
34 	FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
35 	FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
36 	FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
37 
38 	/* TMI */
39 	FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
40 	FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
41 	FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
42 
43 	/* RPC */
44 	FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
45 	FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
46 	FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
47 	FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
48 	FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
49 	FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
50 	FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
51 	FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
52 };
53 
54 #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
55 
56 #define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
57 	FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
58 
59 static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
60 	FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
61 	FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
62 	FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
63 	FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
64 
65 	FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
66 	FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
67 };
68 
69 #define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
70 	ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
71 
72 #define FBNIC_RXB_FIFO_STAT(name, stat) \
73 	FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
74 
75 static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
76 	FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
77 	FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
78 	FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
79 	FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
80 };
81 
82 #define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
83 
84 #define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
85 	FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
86 
87 static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
88 	FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
89 	FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
90 	FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
91 	FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
92 };
93 
94 #define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
95 	ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
96 
97 #define FBNIC_HW_Q_STAT(name, stat) \
98 	FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
99 
100 static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
101 	FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
102 	FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
103 	FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
104 };
105 
106 #define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
107 #define FBNIC_HW_STATS_LEN \
108 	(FBNIC_HW_FIXED_STATS_LEN + \
109 	 FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
110 	 FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
111 	 FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
112 	 FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
113 
114 static void
115 fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
116 {
117 	struct fbnic_net *fbn = netdev_priv(netdev);
118 	struct fbnic_dev *fbd = fbn->fbd;
119 
120 	fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
121 				    sizeof(drvinfo->fw_version));
122 }
123 
124 static int fbnic_get_regs_len(struct net_device *netdev)
125 {
126 	struct fbnic_net *fbn = netdev_priv(netdev);
127 
128 	return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
129 }
130 
131 static void fbnic_get_regs(struct net_device *netdev,
132 			   struct ethtool_regs *regs, void *data)
133 {
134 	struct fbnic_net *fbn = netdev_priv(netdev);
135 
136 	fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
137 }
138 
139 static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
140 {
141 	struct fbnic_net *clone;
142 
143 	clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
144 	if (!clone)
145 		return NULL;
146 
147 	memset(clone->tx, 0, sizeof(clone->tx));
148 	memset(clone->rx, 0, sizeof(clone->rx));
149 	memset(clone->napi, 0, sizeof(clone->napi));
150 	return clone;
151 }
152 
153 static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
154 				 struct fbnic_net *clone)
155 {
156 	swap(clone->rcq_size, orig->rcq_size);
157 	swap(clone->hpq_size, orig->hpq_size);
158 	swap(clone->ppq_size, orig->ppq_size);
159 	swap(clone->txq_size, orig->txq_size);
160 	swap(clone->num_rx_queues, orig->num_rx_queues);
161 	swap(clone->num_tx_queues, orig->num_tx_queues);
162 	swap(clone->num_napi, orig->num_napi);
163 }
164 
165 static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
166 					    struct fbnic_napi_vector *nv)
167 {
168 	int i, j;
169 
170 	for (i = 0; i < nv->txt_count; i++) {
171 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
172 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
173 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
174 	}
175 
176 	for (j = 0; j < nv->rxt_count; j++, i++) {
177 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
178 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
179 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
180 	}
181 }
182 
183 static void fbnic_clone_swap(struct fbnic_net *orig,
184 			     struct fbnic_net *clone)
185 {
186 	struct fbnic_dev *fbd = orig->fbd;
187 	unsigned int i;
188 
189 	for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
190 		fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
191 	for (i = 0; i < orig->num_napi; i++)
192 		fbnic_aggregate_vector_counters(orig, orig->napi[i]);
193 
194 	fbnic_clone_swap_cfg(orig, clone);
195 
196 	for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
197 		swap(clone->napi[i], orig->napi[i]);
198 	for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
199 		swap(clone->tx[i], orig->tx[i]);
200 	for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
201 		swap(clone->rx[i], orig->rx[i]);
202 }
203 
204 static void fbnic_clone_free(struct fbnic_net *clone)
205 {
206 	kfree(clone);
207 }
208 
209 static int fbnic_get_coalesce(struct net_device *netdev,
210 			      struct ethtool_coalesce *ec,
211 			      struct kernel_ethtool_coalesce *kernel_coal,
212 			      struct netlink_ext_ack *extack)
213 {
214 	struct fbnic_net *fbn = netdev_priv(netdev);
215 
216 	ec->tx_coalesce_usecs = fbn->tx_usecs;
217 	ec->rx_coalesce_usecs = fbn->rx_usecs;
218 	ec->rx_max_coalesced_frames = fbn->rx_max_frames;
219 
220 	return 0;
221 }
222 
223 static int fbnic_set_coalesce(struct net_device *netdev,
224 			      struct ethtool_coalesce *ec,
225 			      struct kernel_ethtool_coalesce *kernel_coal,
226 			      struct netlink_ext_ack *extack)
227 {
228 	struct fbnic_net *fbn = netdev_priv(netdev);
229 
230 	/* Verify against hardware limits */
231 	if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
232 		NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
233 		return -EINVAL;
234 	}
235 	if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
236 		NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
237 		return -EINVAL;
238 	}
239 	if (ec->rx_max_coalesced_frames >
240 	    FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
241 	    FBNIC_MIN_RXD_PER_FRAME) {
242 		NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
243 		return -EINVAL;
244 	}
245 
246 	fbn->tx_usecs = ec->tx_coalesce_usecs;
247 	fbn->rx_usecs = ec->rx_coalesce_usecs;
248 	fbn->rx_max_frames = ec->rx_max_coalesced_frames;
249 
250 	if (netif_running(netdev)) {
251 		int i;
252 
253 		for (i = 0; i < fbn->num_napi; i++) {
254 			struct fbnic_napi_vector *nv = fbn->napi[i];
255 
256 			fbnic_config_txrx_usecs(nv, 0);
257 			fbnic_config_rx_frames(nv);
258 		}
259 	}
260 
261 	return 0;
262 }
263 
264 static void
265 fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
266 		    struct kernel_ethtool_ringparam *kernel_ring,
267 		    struct netlink_ext_ack *extack)
268 {
269 	struct fbnic_net *fbn = netdev_priv(netdev);
270 
271 	ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
272 	ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
273 	ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
274 	ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
275 
276 	ring->rx_pending = fbn->rcq_size;
277 	ring->rx_mini_pending = fbn->hpq_size;
278 	ring->rx_jumbo_pending = fbn->ppq_size;
279 	ring->tx_pending = fbn->txq_size;
280 }
281 
282 static void fbnic_set_rings(struct fbnic_net *fbn,
283 			    struct ethtool_ringparam *ring)
284 {
285 	fbn->rcq_size = ring->rx_pending;
286 	fbn->hpq_size = ring->rx_mini_pending;
287 	fbn->ppq_size = ring->rx_jumbo_pending;
288 	fbn->txq_size = ring->tx_pending;
289 }
290 
291 static int
292 fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
293 		    struct kernel_ethtool_ringparam *kernel_ring,
294 		    struct netlink_ext_ack *extack)
295 
296 {
297 	struct fbnic_net *fbn = netdev_priv(netdev);
298 	struct fbnic_net *clone;
299 	int err;
300 
301 	ring->rx_pending	= roundup_pow_of_two(ring->rx_pending);
302 	ring->rx_mini_pending	= roundup_pow_of_two(ring->rx_mini_pending);
303 	ring->rx_jumbo_pending	= roundup_pow_of_two(ring->rx_jumbo_pending);
304 	ring->tx_pending	= roundup_pow_of_two(ring->tx_pending);
305 
306 	/* These are absolute minimums allowing the device and driver to operate
307 	 * but not necessarily guarantee reasonable performance. Settings below
308 	 * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
309 	 * at best.
310 	 */
311 	if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
312 	    ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
313 	    ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
314 	    ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
315 		NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
316 		return -EINVAL;
317 	}
318 
319 	if (!netif_running(netdev)) {
320 		fbnic_set_rings(fbn, ring);
321 		return 0;
322 	}
323 
324 	clone = fbnic_clone_create(fbn);
325 	if (!clone)
326 		return -ENOMEM;
327 
328 	fbnic_set_rings(clone, ring);
329 
330 	err = fbnic_alloc_napi_vectors(clone);
331 	if (err)
332 		goto err_free_clone;
333 
334 	err = fbnic_alloc_resources(clone);
335 	if (err)
336 		goto err_free_napis;
337 
338 	fbnic_down_noidle(fbn);
339 	err = fbnic_wait_all_queues_idle(fbn->fbd, true);
340 	if (err)
341 		goto err_start_stack;
342 
343 	err = fbnic_set_netif_queues(clone);
344 	if (err)
345 		goto err_start_stack;
346 
347 	/* Nothing can fail past this point */
348 	fbnic_flush(fbn);
349 
350 	fbnic_clone_swap(fbn, clone);
351 
352 	fbnic_up(fbn);
353 
354 	fbnic_free_resources(clone);
355 	fbnic_free_napi_vectors(clone);
356 	fbnic_clone_free(clone);
357 
358 	return 0;
359 
360 err_start_stack:
361 	fbnic_flush(fbn);
362 	fbnic_up(fbn);
363 	fbnic_free_resources(clone);
364 err_free_napis:
365 	fbnic_free_napi_vectors(clone);
366 err_free_clone:
367 	fbnic_clone_free(clone);
368 	return err;
369 }
370 
371 static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
372 {
373 	const struct fbnic_stat *stat;
374 	int i;
375 
376 	stat = fbnic_gstrings_rxb_enqueue_stats;
377 	for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
378 		ethtool_sprintf(data, stat->string, idx);
379 }
380 
381 static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
382 {
383 	const struct fbnic_stat *stat;
384 	int i;
385 
386 	stat = fbnic_gstrings_rxb_fifo_stats;
387 	for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
388 		ethtool_sprintf(data, stat->string, idx);
389 }
390 
391 static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
392 {
393 	const struct fbnic_stat *stat;
394 	int i;
395 
396 	stat = fbnic_gstrings_rxb_dequeue_stats;
397 	for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
398 		ethtool_sprintf(data, stat->string, idx);
399 }
400 
401 static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
402 {
403 	const struct fbnic_stat *stat;
404 	int i, idx;
405 
406 	switch (sset) {
407 	case ETH_SS_STATS:
408 		for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
409 			ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
410 
411 		for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
412 			fbnic_get_rxb_enqueue_strings(&data, i);
413 
414 		for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
415 			fbnic_get_rxb_fifo_strings(&data, i);
416 
417 		for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
418 			fbnic_get_rxb_dequeue_strings(&data, i);
419 
420 		for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
421 			stat = fbnic_gstrings_hw_q_stats;
422 
423 			for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
424 				ethtool_sprintf(&data, stat->string, idx);
425 		}
426 		break;
427 	}
428 }
429 
430 static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
431 				  const void *base, int len, u64 **data)
432 {
433 	while (len--) {
434 		u8 *curr = (u8 *)base + stat->offset;
435 
436 		**data = *(u64 *)curr;
437 
438 		stat++;
439 		(*data)++;
440 	}
441 }
442 
443 static void fbnic_get_ethtool_stats(struct net_device *dev,
444 				    struct ethtool_stats *stats, u64 *data)
445 {
446 	struct fbnic_net *fbn = netdev_priv(dev);
447 	struct fbnic_dev *fbd = fbn->fbd;
448 	int i;
449 
450 	fbnic_get_hw_stats(fbn->fbd);
451 
452 	spin_lock(&fbd->hw_stats_lock);
453 	fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
454 			      FBNIC_HW_FIXED_STATS_LEN, &data);
455 
456 	for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
457 		const struct fbnic_rxb_enqueue_stats *enq;
458 
459 		enq = &fbd->hw_stats.rxb.enq[i];
460 		fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
461 				      enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
462 				      &data);
463 	}
464 
465 	for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
466 		const struct fbnic_rxb_fifo_stats *fifo;
467 
468 		fifo = &fbd->hw_stats.rxb.fifo[i];
469 		fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
470 				      fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
471 				      &data);
472 	}
473 
474 	for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
475 		const struct fbnic_rxb_dequeue_stats *deq;
476 
477 		deq = &fbd->hw_stats.rxb.deq[i];
478 		fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
479 				      deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
480 				      &data);
481 	}
482 
483 	for (i  = 0; i < FBNIC_MAX_QUEUES; i++) {
484 		const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
485 
486 		fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
487 				      FBNIC_HW_Q_STATS_LEN, &data);
488 	}
489 	spin_unlock(&fbd->hw_stats_lock);
490 }
491 
492 static int fbnic_get_sset_count(struct net_device *dev, int sset)
493 {
494 	switch (sset) {
495 	case ETH_SS_STATS:
496 		return FBNIC_HW_STATS_LEN;
497 	default:
498 		return -EOPNOTSUPP;
499 	}
500 }
501 
502 static int fbnic_get_rss_hash_idx(u32 flow_type)
503 {
504 	switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
505 	case TCP_V4_FLOW:
506 		return FBNIC_TCP4_HASH_OPT;
507 	case TCP_V6_FLOW:
508 		return FBNIC_TCP6_HASH_OPT;
509 	case UDP_V4_FLOW:
510 		return FBNIC_UDP4_HASH_OPT;
511 	case UDP_V6_FLOW:
512 		return FBNIC_UDP6_HASH_OPT;
513 	case AH_V4_FLOW:
514 	case ESP_V4_FLOW:
515 	case AH_ESP_V4_FLOW:
516 	case SCTP_V4_FLOW:
517 	case IPV4_FLOW:
518 	case IPV4_USER_FLOW:
519 		return FBNIC_IPV4_HASH_OPT;
520 	case AH_V6_FLOW:
521 	case ESP_V6_FLOW:
522 	case AH_ESP_V6_FLOW:
523 	case SCTP_V6_FLOW:
524 	case IPV6_FLOW:
525 	case IPV6_USER_FLOW:
526 		return FBNIC_IPV6_HASH_OPT;
527 	case ETHER_FLOW:
528 		return FBNIC_ETHER_HASH_OPT;
529 	}
530 
531 	return -1;
532 }
533 
534 static int
535 fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
536 {
537 	int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
538 
539 	if (hash_opt_idx < 0)
540 		return -EINVAL;
541 
542 	/* Report options from rss_en table in fbn */
543 	cmd->data = fbn->rss_flow_hash[hash_opt_idx];
544 
545 	return 0;
546 }
547 
548 static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
549 				  struct ethtool_rxnfc *cmd,
550 				  u32 *rule_locs)
551 {
552 	struct fbnic_dev *fbd = fbn->fbd;
553 	int i, cnt = 0;
554 
555 	/* Report maximum rule count */
556 	cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
557 
558 	for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
559 		int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
560 		struct fbnic_act_tcam *act_tcam;
561 
562 		act_tcam = &fbd->act_tcam[idx];
563 		if (act_tcam->state != FBNIC_TCAM_S_VALID)
564 			continue;
565 
566 		if (rule_locs) {
567 			if (cnt == cmd->rule_cnt)
568 				return -EMSGSIZE;
569 
570 			rule_locs[cnt] = i;
571 		}
572 
573 		cnt++;
574 	}
575 
576 	return cnt;
577 }
578 
579 static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
580 {
581 	struct ethtool_rx_flow_spec *fsp;
582 	struct fbnic_dev *fbd = fbn->fbd;
583 	struct fbnic_act_tcam *act_tcam;
584 	int idx;
585 
586 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
587 
588 	if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
589 		return -EINVAL;
590 
591 	idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
592 	act_tcam = &fbd->act_tcam[idx];
593 
594 	if (act_tcam->state != FBNIC_TCAM_S_VALID)
595 		return -EINVAL;
596 
597 	/* Report maximum rule count */
598 	cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
599 
600 	/* Set flow type field */
601 	if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
602 		fsp->flow_type = ETHER_FLOW;
603 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
604 			       act_tcam->mask.tcam[1])) {
605 			struct fbnic_mac_addr *mac_addr;
606 
607 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
608 					act_tcam->value.tcam[1]);
609 			mac_addr = &fbd->mac_addr[idx];
610 
611 			ether_addr_copy(fsp->h_u.ether_spec.h_dest,
612 					mac_addr->value.addr8);
613 			eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
614 		}
615 	} else if (act_tcam->value.tcam[1] &
616 		   FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
617 		fsp->flow_type = IPV6_USER_FLOW;
618 		fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
619 		fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
620 
621 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
622 			       act_tcam->mask.tcam[0])) {
623 			struct fbnic_ip_addr *ip_addr;
624 			int i;
625 
626 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
627 					act_tcam->value.tcam[0]);
628 			ip_addr = &fbd->ipo_src[idx];
629 
630 			for (i = 0; i < 4; i++) {
631 				fsp->h_u.usr_ip6_spec.ip6src[i] =
632 					ip_addr->value.s6_addr32[i];
633 				fsp->m_u.usr_ip6_spec.ip6src[i] =
634 					~ip_addr->mask.s6_addr32[i];
635 			}
636 		}
637 
638 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
639 			       act_tcam->mask.tcam[0])) {
640 			struct fbnic_ip_addr *ip_addr;
641 			int i;
642 
643 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
644 					act_tcam->value.tcam[0]);
645 			ip_addr = &fbd->ipo_dst[idx];
646 
647 			for (i = 0; i < 4; i++) {
648 				fsp->h_u.usr_ip6_spec.ip6dst[i] =
649 					ip_addr->value.s6_addr32[i];
650 				fsp->m_u.usr_ip6_spec.ip6dst[i] =
651 					~ip_addr->mask.s6_addr32[i];
652 			}
653 		}
654 	} else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
655 		if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
656 			if (act_tcam->value.tcam[1] &
657 			    FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
658 				fsp->flow_type = UDP_V6_FLOW;
659 			else
660 				fsp->flow_type = TCP_V6_FLOW;
661 			fsp->h_u.tcp_ip6_spec.psrc =
662 				cpu_to_be16(act_tcam->value.tcam[3]);
663 			fsp->m_u.tcp_ip6_spec.psrc =
664 				cpu_to_be16(~act_tcam->mask.tcam[3]);
665 			fsp->h_u.tcp_ip6_spec.pdst =
666 				cpu_to_be16(act_tcam->value.tcam[4]);
667 			fsp->m_u.tcp_ip6_spec.pdst =
668 				cpu_to_be16(~act_tcam->mask.tcam[4]);
669 		} else {
670 			fsp->flow_type = IPV6_USER_FLOW;
671 		}
672 
673 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
674 			       act_tcam->mask.tcam[0])) {
675 			struct fbnic_ip_addr *ip_addr;
676 			int i;
677 
678 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
679 					act_tcam->value.tcam[0]);
680 			ip_addr = &fbd->ip_src[idx];
681 
682 			for (i = 0; i < 4; i++) {
683 				fsp->h_u.usr_ip6_spec.ip6src[i] =
684 					ip_addr->value.s6_addr32[i];
685 				fsp->m_u.usr_ip6_spec.ip6src[i] =
686 					~ip_addr->mask.s6_addr32[i];
687 			}
688 		}
689 
690 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
691 			       act_tcam->mask.tcam[0])) {
692 			struct fbnic_ip_addr *ip_addr;
693 			int i;
694 
695 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
696 					act_tcam->value.tcam[0]);
697 			ip_addr = &fbd->ip_dst[idx];
698 
699 			for (i = 0; i < 4; i++) {
700 				fsp->h_u.usr_ip6_spec.ip6dst[i] =
701 					ip_addr->value.s6_addr32[i];
702 				fsp->m_u.usr_ip6_spec.ip6dst[i] =
703 					~ip_addr->mask.s6_addr32[i];
704 			}
705 		}
706 	} else {
707 		if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
708 			if (act_tcam->value.tcam[1] &
709 			    FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
710 				fsp->flow_type = UDP_V4_FLOW;
711 			else
712 				fsp->flow_type = TCP_V4_FLOW;
713 			fsp->h_u.tcp_ip4_spec.psrc =
714 				cpu_to_be16(act_tcam->value.tcam[3]);
715 			fsp->m_u.tcp_ip4_spec.psrc =
716 				cpu_to_be16(~act_tcam->mask.tcam[3]);
717 			fsp->h_u.tcp_ip4_spec.pdst =
718 				cpu_to_be16(act_tcam->value.tcam[4]);
719 			fsp->m_u.tcp_ip4_spec.pdst =
720 				cpu_to_be16(~act_tcam->mask.tcam[4]);
721 		} else {
722 			fsp->flow_type = IPV4_USER_FLOW;
723 			fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
724 		}
725 
726 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
727 			       act_tcam->mask.tcam[0])) {
728 			struct fbnic_ip_addr *ip_addr;
729 
730 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
731 					act_tcam->value.tcam[0]);
732 			ip_addr = &fbd->ip_src[idx];
733 
734 			fsp->h_u.usr_ip4_spec.ip4src =
735 				ip_addr->value.s6_addr32[3];
736 			fsp->m_u.usr_ip4_spec.ip4src =
737 				~ip_addr->mask.s6_addr32[3];
738 		}
739 
740 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
741 			       act_tcam->mask.tcam[0])) {
742 			struct fbnic_ip_addr *ip_addr;
743 
744 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
745 					act_tcam->value.tcam[0]);
746 			ip_addr = &fbd->ip_dst[idx];
747 
748 			fsp->h_u.usr_ip4_spec.ip4dst =
749 				ip_addr->value.s6_addr32[3];
750 			fsp->m_u.usr_ip4_spec.ip4dst =
751 				~ip_addr->mask.s6_addr32[3];
752 		}
753 	}
754 
755 	/* Record action */
756 	if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
757 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
758 	else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
759 		fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
760 					     act_tcam->dest);
761 	else
762 		fsp->flow_type |= FLOW_RSS;
763 
764 	cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
765 				     act_tcam->dest);
766 
767 	return 0;
768 }
769 
770 static int fbnic_get_rxnfc(struct net_device *netdev,
771 			   struct ethtool_rxnfc *cmd, u32 *rule_locs)
772 {
773 	struct fbnic_net *fbn = netdev_priv(netdev);
774 	int ret = -EOPNOTSUPP;
775 	u32 special = 0;
776 
777 	switch (cmd->cmd) {
778 	case ETHTOOL_GRXRINGS:
779 		cmd->data = fbn->num_rx_queues;
780 		ret = 0;
781 		break;
782 	case ETHTOOL_GRXFH:
783 		ret = fbnic_get_rss_hash_opts(fbn, cmd);
784 		break;
785 	case ETHTOOL_GRXCLSRULE:
786 		ret = fbnic_get_cls_rule(fbn, cmd);
787 		break;
788 	case ETHTOOL_GRXCLSRLCNT:
789 		rule_locs = NULL;
790 		special = RX_CLS_LOC_SPECIAL;
791 		fallthrough;
792 	case ETHTOOL_GRXCLSRLALL:
793 		ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
794 		if (ret < 0)
795 			break;
796 
797 		cmd->data |= special;
798 		cmd->rule_cnt = ret;
799 		ret = 0;
800 		break;
801 	}
802 
803 	return ret;
804 }
805 
806 #define FBNIC_L2_HASH_OPTIONS \
807 	(RXH_L2DA | RXH_DISCARD)
808 #define FBNIC_L3_HASH_OPTIONS \
809 	(FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
810 #define FBNIC_L4_HASH_OPTIONS \
811 	(FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
812 
813 static int
814 fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
815 {
816 	int hash_opt_idx;
817 
818 	/* Verify the type requested is correct */
819 	hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
820 	if (hash_opt_idx < 0)
821 		return -EINVAL;
822 
823 	/* Verify the fields asked for can actually be assigned based on type */
824 	if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
825 	    (hash_opt_idx > FBNIC_L4_HASH_OPT &&
826 	     cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
827 	    (hash_opt_idx > FBNIC_IP_HASH_OPT &&
828 	     cmd->data & ~FBNIC_L2_HASH_OPTIONS))
829 		return -EINVAL;
830 
831 	fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
832 
833 	if (netif_running(fbn->netdev)) {
834 		fbnic_rss_reinit(fbn->fbd, fbn);
835 		fbnic_write_rules(fbn->fbd);
836 	}
837 
838 	return 0;
839 }
840 
841 static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
842 {
843 	int i;
844 
845 	for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
846 		int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
847 
848 		if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
849 			return i;
850 	}
851 
852 	return -ENOSPC;
853 }
854 
855 static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
856 				  const struct ethtool_rxnfc *cmd)
857 {
858 	u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
859 	u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
860 	u16 misc = 0, misc_mask = ~0;
861 	u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
862 			      FBNIC_RPC_ACT_TBL0_DEST_HOST);
863 	struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
864 	struct fbnic_mac_addr *mac_addr = NULL;
865 	struct ethtool_rx_flow_spec *fsp;
866 	struct fbnic_dev *fbd = fbn->fbd;
867 	struct fbnic_act_tcam *act_tcam;
868 	struct in6_addr *addr6, *mask6;
869 	struct in_addr *addr4, *mask4;
870 	int hash_idx, location;
871 	u32 flow_type;
872 	int idx, j;
873 
874 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
875 
876 	if (fsp->location != RX_CLS_LOC_ANY)
877 		return -EINVAL;
878 	location = fbnic_cls_rule_any_loc(fbd);
879 	if (location < 0)
880 		return location;
881 
882 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
883 		dest = FBNIC_RPC_ACT_TBL0_DROP;
884 	} else if (fsp->flow_type & FLOW_RSS) {
885 		if (cmd->rss_context == 1)
886 			dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
887 	} else {
888 		u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
889 
890 		if (ring_idx >= fbn->num_rx_queues)
891 			return -EINVAL;
892 
893 		dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
894 			FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
895 	}
896 
897 	idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
898 	act_tcam = &fbd->act_tcam[idx];
899 
900 	/* Do not allow overwriting for now.
901 	 * To support overwriting rules we will need to add logic to free
902 	 * any IP or MACDA TCAMs that may be associated with the old rule.
903 	 */
904 	if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
905 		return -EBUSY;
906 
907 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
908 	hash_idx = fbnic_get_rss_hash_idx(flow_type);
909 
910 	switch (flow_type) {
911 	case UDP_V4_FLOW:
912 udp4_flow:
913 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
914 		fallthrough;
915 	case TCP_V4_FLOW:
916 tcp4_flow:
917 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
918 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
919 			       FBNIC_RPC_TCAM_ACT1_L4_VALID);
920 
921 		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
922 		sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
923 		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
924 		dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
925 		goto ip4_flow;
926 	case IP_USER_FLOW:
927 		if (!fsp->m_u.usr_ip4_spec.proto)
928 			goto ip4_flow;
929 		if (fsp->m_u.usr_ip4_spec.proto != 0xff)
930 			return -EINVAL;
931 		if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
932 			goto udp4_flow;
933 		if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
934 			goto tcp4_flow;
935 		return -EINVAL;
936 ip4_flow:
937 		addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
938 		mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
939 		if (mask4->s_addr) {
940 			ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
941 						  addr4, mask4);
942 			if (!ip_src)
943 				return -ENOSPC;
944 
945 			set_bit(idx, ip_src->act_tcam);
946 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
947 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
948 					       ip_src - fbd->ip_src);
949 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
950 				     FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
951 		}
952 
953 		addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
954 		mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
955 		if (mask4->s_addr) {
956 			ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
957 						  addr4, mask4);
958 			if (!ip_dst) {
959 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
960 					memset(ip_src, 0, sizeof(*ip_src));
961 				return -ENOSPC;
962 			}
963 
964 			set_bit(idx, ip_dst->act_tcam);
965 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
966 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
967 					       ip_dst - fbd->ip_dst);
968 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
969 				     FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
970 		}
971 		flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
972 			      FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
973 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
974 			       FBNIC_RPC_TCAM_ACT1_IP_VALID |
975 			       FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
976 		break;
977 	case UDP_V6_FLOW:
978 udp6_flow:
979 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
980 		fallthrough;
981 	case TCP_V6_FLOW:
982 tcp6_flow:
983 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
984 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
985 			  FBNIC_RPC_TCAM_ACT1_L4_VALID);
986 
987 		sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
988 		sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
989 		dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
990 		dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
991 		goto ipv6_flow;
992 	case IPV6_USER_FLOW:
993 		if (!fsp->m_u.usr_ip6_spec.l4_proto)
994 			goto ipv6_flow;
995 
996 		if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
997 			return -EINVAL;
998 		if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
999 			goto udp6_flow;
1000 		if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
1001 			goto tcp6_flow;
1002 		if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
1003 			return -EINVAL;
1004 
1005 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
1006 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
1007 		if (!ipv6_addr_any(mask6)) {
1008 			ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
1009 						  addr6, mask6);
1010 			if (!ip_src)
1011 				return -ENOSPC;
1012 
1013 			set_bit(idx, ip_src->act_tcam);
1014 			ip_value |=
1015 				FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1016 				FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
1017 					   ip_src - fbd->ipo_src);
1018 			ip_mask &=
1019 				~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1020 				  FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
1021 		}
1022 
1023 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1024 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1025 		if (!ipv6_addr_any(mask6)) {
1026 			ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
1027 						  addr6, mask6);
1028 			if (!ip_dst) {
1029 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1030 					memset(ip_src, 0, sizeof(*ip_src));
1031 				return -ENOSPC;
1032 			}
1033 
1034 			set_bit(idx, ip_dst->act_tcam);
1035 			ip_value |=
1036 				FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
1037 				FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
1038 					   ip_dst - fbd->ipo_dst);
1039 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
1040 				     FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
1041 		}
1042 
1043 		flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
1044 		flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
1045 ipv6_flow:
1046 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
1047 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
1048 		if (!ip_src && !ipv6_addr_any(mask6)) {
1049 			ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
1050 						  addr6, mask6);
1051 			if (!ip_src)
1052 				return -ENOSPC;
1053 
1054 			set_bit(idx, ip_src->act_tcam);
1055 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1056 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
1057 					       ip_src - fbd->ip_src);
1058 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1059 				       FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
1060 		}
1061 
1062 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1063 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1064 		if (!ip_dst && !ipv6_addr_any(mask6)) {
1065 			ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
1066 						  addr6, mask6);
1067 			if (!ip_dst) {
1068 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1069 					memset(ip_src, 0, sizeof(*ip_src));
1070 				return -ENOSPC;
1071 			}
1072 
1073 			set_bit(idx, ip_dst->act_tcam);
1074 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1075 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
1076 					       ip_dst - fbd->ip_dst);
1077 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1078 				       FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
1079 		}
1080 
1081 		flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1082 			      FBNIC_RPC_TCAM_ACT1_IP_VALID |
1083 			      FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1084 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1085 			       FBNIC_RPC_TCAM_ACT1_IP_VALID |
1086 			       FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
1087 		break;
1088 	case ETHER_FLOW:
1089 		if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
1090 			u8 *addr = fsp->h_u.ether_spec.h_dest;
1091 			u8 *mask = fsp->m_u.ether_spec.h_dest;
1092 
1093 			/* Do not allow MAC addr of 0 */
1094 			if (is_zero_ether_addr(addr))
1095 				return -EINVAL;
1096 
1097 			/* Only support full MAC address to avoid
1098 			 * conflicts with other MAC addresses.
1099 			 */
1100 			if (!is_broadcast_ether_addr(mask))
1101 				return -EINVAL;
1102 
1103 			if (is_multicast_ether_addr(addr))
1104 				mac_addr = __fbnic_mc_sync(fbd, addr);
1105 			else
1106 				mac_addr = __fbnic_uc_sync(fbd, addr);
1107 
1108 			if (!mac_addr)
1109 				return -ENOSPC;
1110 
1111 			set_bit(idx, mac_addr->act_tcam);
1112 			flow_value |=
1113 				FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
1114 					   mac_addr - fbd->mac_addr);
1115 			flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
1116 		}
1117 
1118 		flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1119 		flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1120 		break;
1121 	default:
1122 		return -EINVAL;
1123 	}
1124 
1125 	/* Write action table values */
1126 	act_tcam->dest = dest;
1127 	act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
1128 
1129 	/* Write IP Match value/mask to action_tcam[0] */
1130 	act_tcam->value.tcam[0] = ip_value;
1131 	act_tcam->mask.tcam[0] = ip_mask;
1132 
1133 	/* Write flow type value/mask to action_tcam[1] */
1134 	act_tcam->value.tcam[1] = flow_value;
1135 	act_tcam->mask.tcam[1] = flow_mask;
1136 
1137 	/* Write error, DSCP, extra L4 matches to action_tcam[2] */
1138 	act_tcam->value.tcam[2] = misc;
1139 	act_tcam->mask.tcam[2] = misc_mask;
1140 
1141 	/* Write source/destination port values */
1142 	act_tcam->value.tcam[3] = sport;
1143 	act_tcam->mask.tcam[3] = sport_mask;
1144 	act_tcam->value.tcam[4] = dport;
1145 	act_tcam->mask.tcam[4] = dport_mask;
1146 
1147 	for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
1148 		act_tcam->mask.tcam[j] = 0xffff;
1149 
1150 	act_tcam->state = FBNIC_TCAM_S_UPDATE;
1151 	fsp->location = location;
1152 
1153 	if (netif_running(fbn->netdev)) {
1154 		fbnic_write_rules(fbd);
1155 		if (ip_src || ip_dst)
1156 			fbnic_write_ip_addr(fbd);
1157 		if (mac_addr)
1158 			fbnic_write_macda(fbd);
1159 	}
1160 
1161 	return 0;
1162 }
1163 
1164 static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
1165 				  unsigned int tcam_idx)
1166 {
1167 	struct fbnic_dev *fbd = fbn->fbd;
1168 	int idx;
1169 
1170 	for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
1171 		__fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
1172 
1173 	/* Write updates to hardware */
1174 	if (netif_running(fbn->netdev))
1175 		fbnic_write_macda(fbd);
1176 }
1177 
1178 static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
1179 				    unsigned int tcam_idx)
1180 {
1181 	struct fbnic_dev *fbd = fbn->fbd;
1182 	int idx;
1183 
1184 	for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
1185 		__fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
1186 	for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
1187 		__fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
1188 	for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
1189 		__fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
1190 	for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
1191 		__fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
1192 
1193 	/* Write updates to hardware */
1194 	if (netif_running(fbn->netdev))
1195 		fbnic_write_ip_addr(fbd);
1196 }
1197 
1198 static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
1199 				  const struct ethtool_rxnfc *cmd)
1200 {
1201 	struct ethtool_rx_flow_spec *fsp;
1202 	struct fbnic_dev *fbd = fbn->fbd;
1203 	struct fbnic_act_tcam *act_tcam;
1204 	int idx;
1205 
1206 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1207 
1208 	if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
1209 		return -EINVAL;
1210 
1211 	idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
1212 	act_tcam = &fbd->act_tcam[idx];
1213 
1214 	if (act_tcam->state != FBNIC_TCAM_S_VALID)
1215 		return -EINVAL;
1216 
1217 	act_tcam->state = FBNIC_TCAM_S_DELETE;
1218 
1219 	if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
1220 	    (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
1221 		fbnic_clear_nfc_macda(fbn, idx);
1222 
1223 	if ((act_tcam->value.tcam[0] &
1224 	     (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1225 	      FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1226 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1227 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
1228 	    (~act_tcam->mask.tcam[0] &
1229 	     (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
1230 	      FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
1231 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
1232 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
1233 		fbnic_clear_nfc_ip_addr(fbn, idx);
1234 
1235 	if (netif_running(fbn->netdev))
1236 		fbnic_write_rules(fbd);
1237 
1238 	return 0;
1239 }
1240 
1241 static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1242 {
1243 	struct fbnic_net *fbn = netdev_priv(netdev);
1244 	int ret = -EOPNOTSUPP;
1245 
1246 	switch (cmd->cmd) {
1247 	case ETHTOOL_SRXFH:
1248 		ret = fbnic_set_rss_hash_opts(fbn, cmd);
1249 		break;
1250 	case ETHTOOL_SRXCLSRLINS:
1251 		ret = fbnic_set_cls_rule_ins(fbn, cmd);
1252 		break;
1253 	case ETHTOOL_SRXCLSRLDEL:
1254 		ret = fbnic_set_cls_rule_del(fbn, cmd);
1255 		break;
1256 	}
1257 
1258 	return ret;
1259 }
1260 
1261 static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
1262 {
1263 	return FBNIC_RPC_RSS_KEY_BYTE_LEN;
1264 }
1265 
1266 static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
1267 {
1268 	return FBNIC_RPC_RSS_TBL_SIZE;
1269 }
1270 
1271 static int
1272 fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
1273 {
1274 	struct fbnic_net *fbn = netdev_priv(netdev);
1275 	unsigned int i;
1276 
1277 	rxfh->hfunc = ETH_RSS_HASH_TOP;
1278 
1279 	if (rxfh->key) {
1280 		for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
1281 			u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
1282 
1283 			rxfh->key[i] = rss_key >> 24;
1284 		}
1285 	}
1286 
1287 	if (rxfh->indir) {
1288 		for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1289 			rxfh->indir[i] = fbn->indir_tbl[0][i];
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 static unsigned int
1296 fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
1297 {
1298 	unsigned int i, changes = 0;
1299 
1300 	for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
1301 		if (fbn->indir_tbl[idx][i] == indir[i])
1302 			continue;
1303 
1304 		fbn->indir_tbl[idx][i] = indir[i];
1305 		changes++;
1306 	}
1307 
1308 	return changes;
1309 }
1310 
1311 static int
1312 fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
1313 	       struct netlink_ext_ack *extack)
1314 {
1315 	struct fbnic_net *fbn = netdev_priv(netdev);
1316 	unsigned int i, changes = 0;
1317 
1318 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1319 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
1320 		return -EINVAL;
1321 
1322 	if (rxfh->key) {
1323 		u32 rss_key = 0;
1324 
1325 		for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
1326 			rss_key >>= 8;
1327 			rss_key |= (u32)(rxfh->key[i]) << 24;
1328 
1329 			if (i % 4)
1330 				continue;
1331 
1332 			if (fbn->rss_key[i / 4] == rss_key)
1333 				continue;
1334 
1335 			fbn->rss_key[i / 4] = rss_key;
1336 			changes++;
1337 		}
1338 	}
1339 
1340 	if (rxfh->indir)
1341 		changes += fbnic_set_indir(fbn, 0, rxfh->indir);
1342 
1343 	if (changes && netif_running(netdev))
1344 		fbnic_rss_reinit_hw(fbn->fbd, fbn);
1345 
1346 	return 0;
1347 }
1348 
1349 static int
1350 fbnic_modify_rxfh_context(struct net_device *netdev,
1351 			  struct ethtool_rxfh_context *ctx,
1352 			  const struct ethtool_rxfh_param *rxfh,
1353 			  struct netlink_ext_ack *extack)
1354 {
1355 	struct fbnic_net *fbn = netdev_priv(netdev);
1356 	const u32 *indir = rxfh->indir;
1357 	unsigned int changes;
1358 
1359 	if (!indir)
1360 		indir = ethtool_rxfh_context_indir(ctx);
1361 
1362 	changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
1363 	if (changes && netif_running(netdev))
1364 		fbnic_rss_reinit_hw(fbn->fbd, fbn);
1365 
1366 	return 0;
1367 }
1368 
1369 static int
1370 fbnic_create_rxfh_context(struct net_device *netdev,
1371 			  struct ethtool_rxfh_context *ctx,
1372 			  const struct ethtool_rxfh_param *rxfh,
1373 			  struct netlink_ext_ack *extack)
1374 {
1375 	struct fbnic_net *fbn = netdev_priv(netdev);
1376 
1377 	if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1378 		NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1379 		return -EOPNOTSUPP;
1380 	}
1381 	ctx->hfunc = ETH_RSS_HASH_TOP;
1382 
1383 	if (!rxfh->indir) {
1384 		u32 *indir = ethtool_rxfh_context_indir(ctx);
1385 		unsigned int num_rx = fbn->num_rx_queues;
1386 		unsigned int i;
1387 
1388 		for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1389 			indir[i] = ethtool_rxfh_indir_default(i, num_rx);
1390 	}
1391 
1392 	return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
1393 }
1394 
1395 static int
1396 fbnic_remove_rxfh_context(struct net_device *netdev,
1397 			  struct ethtool_rxfh_context *ctx, u32 rss_context,
1398 			  struct netlink_ext_ack *extack)
1399 {
1400 	/* Nothing to do, contexts are allocated statically */
1401 	return 0;
1402 }
1403 
1404 static void fbnic_get_channels(struct net_device *netdev,
1405 			       struct ethtool_channels *ch)
1406 {
1407 	struct fbnic_net *fbn = netdev_priv(netdev);
1408 	struct fbnic_dev *fbd = fbn->fbd;
1409 
1410 	ch->max_rx = fbd->max_num_queues;
1411 	ch->max_tx = fbd->max_num_queues;
1412 	ch->max_combined = min(ch->max_rx, ch->max_tx);
1413 	ch->max_other =	FBNIC_NON_NAPI_VECTORS;
1414 
1415 	if (fbn->num_rx_queues > fbn->num_napi ||
1416 	    fbn->num_tx_queues > fbn->num_napi)
1417 		ch->combined_count = min(fbn->num_rx_queues,
1418 					 fbn->num_tx_queues);
1419 	else
1420 		ch->combined_count =
1421 			fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
1422 	ch->rx_count = fbn->num_rx_queues - ch->combined_count;
1423 	ch->tx_count = fbn->num_tx_queues - ch->combined_count;
1424 	ch->other_count = FBNIC_NON_NAPI_VECTORS;
1425 }
1426 
1427 static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
1428 			     unsigned int max_napis)
1429 {
1430 	fbn->num_rx_queues = ch->rx_count + ch->combined_count;
1431 	fbn->num_tx_queues = ch->tx_count + ch->combined_count;
1432 	fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
1433 			    max_napis);
1434 }
1435 
1436 static int fbnic_set_channels(struct net_device *netdev,
1437 			      struct ethtool_channels *ch)
1438 {
1439 	struct fbnic_net *fbn = netdev_priv(netdev);
1440 	unsigned int max_napis, standalone;
1441 	struct fbnic_dev *fbd = fbn->fbd;
1442 	struct fbnic_net *clone;
1443 	int err;
1444 
1445 	max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
1446 	standalone = ch->rx_count + ch->tx_count;
1447 
1448 	/* Limits for standalone queues:
1449 	 *  - each queue has it's own NAPI (num_napi >= rx + tx + combined)
1450 	 *  - combining queues (combined not 0, rx or tx must be 0)
1451 	 */
1452 	if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
1453 	    (standalone && standalone + ch->combined_count > max_napis) ||
1454 	    ch->rx_count + ch->combined_count > fbd->max_num_queues ||
1455 	    ch->tx_count + ch->combined_count > fbd->max_num_queues ||
1456 	    ch->other_count != FBNIC_NON_NAPI_VECTORS)
1457 		return -EINVAL;
1458 
1459 	if (!netif_running(netdev)) {
1460 		fbnic_set_queues(fbn, ch, max_napis);
1461 		fbnic_reset_indir_tbl(fbn);
1462 		return 0;
1463 	}
1464 
1465 	clone = fbnic_clone_create(fbn);
1466 	if (!clone)
1467 		return -ENOMEM;
1468 
1469 	fbnic_set_queues(clone, ch, max_napis);
1470 
1471 	err = fbnic_alloc_napi_vectors(clone);
1472 	if (err)
1473 		goto err_free_clone;
1474 
1475 	err = fbnic_alloc_resources(clone);
1476 	if (err)
1477 		goto err_free_napis;
1478 
1479 	fbnic_down_noidle(fbn);
1480 	err = fbnic_wait_all_queues_idle(fbn->fbd, true);
1481 	if (err)
1482 		goto err_start_stack;
1483 
1484 	err = fbnic_set_netif_queues(clone);
1485 	if (err)
1486 		goto err_start_stack;
1487 
1488 	/* Nothing can fail past this point */
1489 	fbnic_flush(fbn);
1490 
1491 	fbnic_clone_swap(fbn, clone);
1492 
1493 	/* Reset RSS indirection table */
1494 	fbnic_reset_indir_tbl(fbn);
1495 
1496 	fbnic_up(fbn);
1497 
1498 	fbnic_free_resources(clone);
1499 	fbnic_free_napi_vectors(clone);
1500 	fbnic_clone_free(clone);
1501 
1502 	return 0;
1503 
1504 err_start_stack:
1505 	fbnic_flush(fbn);
1506 	fbnic_up(fbn);
1507 	fbnic_free_resources(clone);
1508 err_free_napis:
1509 	fbnic_free_napi_vectors(clone);
1510 err_free_clone:
1511 	fbnic_clone_free(clone);
1512 	return err;
1513 }
1514 
1515 static int
1516 fbnic_get_ts_info(struct net_device *netdev,
1517 		  struct kernel_ethtool_ts_info *tsinfo)
1518 {
1519 	struct fbnic_net *fbn = netdev_priv(netdev);
1520 
1521 	tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
1522 
1523 	tsinfo->so_timestamping =
1524 		SOF_TIMESTAMPING_TX_SOFTWARE |
1525 		SOF_TIMESTAMPING_TX_HARDWARE |
1526 		SOF_TIMESTAMPING_RX_HARDWARE |
1527 		SOF_TIMESTAMPING_RAW_HARDWARE;
1528 
1529 	tsinfo->tx_types =
1530 		BIT(HWTSTAMP_TX_OFF) |
1531 		BIT(HWTSTAMP_TX_ON);
1532 
1533 	tsinfo->rx_filters =
1534 		BIT(HWTSTAMP_FILTER_NONE) |
1535 		BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1536 		BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1537 		BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1538 		BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1539 		BIT(HWTSTAMP_FILTER_ALL);
1540 
1541 	return 0;
1542 }
1543 
1544 static void fbnic_get_ts_stats(struct net_device *netdev,
1545 			       struct ethtool_ts_stats *ts_stats)
1546 {
1547 	struct fbnic_net *fbn = netdev_priv(netdev);
1548 	u64 ts_packets, ts_lost;
1549 	struct fbnic_ring *ring;
1550 	unsigned int start;
1551 	int i;
1552 
1553 	ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
1554 	ts_stats->lost = fbn->tx_stats.twq.ts_lost;
1555 	for (i = 0; i < fbn->num_tx_queues; i++) {
1556 		ring = fbn->tx[i];
1557 		do {
1558 			start = u64_stats_fetch_begin(&ring->stats.syncp);
1559 			ts_packets = ring->stats.twq.ts_packets;
1560 			ts_lost = ring->stats.twq.ts_lost;
1561 		} while (u64_stats_fetch_retry(&ring->stats.syncp, start));
1562 		ts_stats->pkts += ts_packets;
1563 		ts_stats->lost += ts_lost;
1564 	}
1565 }
1566 
1567 static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
1568 {
1569 	if (counter->reported)
1570 		*stat = counter->value;
1571 }
1572 
1573 static void
1574 fbnic_get_eth_mac_stats(struct net_device *netdev,
1575 			struct ethtool_eth_mac_stats *eth_mac_stats)
1576 {
1577 	struct fbnic_net *fbn = netdev_priv(netdev);
1578 	struct fbnic_mac_stats *mac_stats;
1579 	struct fbnic_dev *fbd = fbn->fbd;
1580 	const struct fbnic_mac *mac;
1581 
1582 	mac_stats = &fbd->hw_stats.mac;
1583 	mac = fbd->mac;
1584 
1585 	mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
1586 
1587 	fbnic_set_counter(&eth_mac_stats->FramesTransmittedOK,
1588 			  &mac_stats->eth_mac.FramesTransmittedOK);
1589 	fbnic_set_counter(&eth_mac_stats->FramesReceivedOK,
1590 			  &mac_stats->eth_mac.FramesReceivedOK);
1591 	fbnic_set_counter(&eth_mac_stats->FrameCheckSequenceErrors,
1592 			  &mac_stats->eth_mac.FrameCheckSequenceErrors);
1593 	fbnic_set_counter(&eth_mac_stats->AlignmentErrors,
1594 			  &mac_stats->eth_mac.AlignmentErrors);
1595 	fbnic_set_counter(&eth_mac_stats->OctetsTransmittedOK,
1596 			  &mac_stats->eth_mac.OctetsTransmittedOK);
1597 	fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACXmitError,
1598 			  &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
1599 	fbnic_set_counter(&eth_mac_stats->OctetsReceivedOK,
1600 			  &mac_stats->eth_mac.OctetsReceivedOK);
1601 	fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACRcvError,
1602 			  &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
1603 	fbnic_set_counter(&eth_mac_stats->MulticastFramesXmittedOK,
1604 			  &mac_stats->eth_mac.MulticastFramesXmittedOK);
1605 	fbnic_set_counter(&eth_mac_stats->BroadcastFramesXmittedOK,
1606 			  &mac_stats->eth_mac.BroadcastFramesXmittedOK);
1607 	fbnic_set_counter(&eth_mac_stats->MulticastFramesReceivedOK,
1608 			  &mac_stats->eth_mac.MulticastFramesReceivedOK);
1609 	fbnic_set_counter(&eth_mac_stats->BroadcastFramesReceivedOK,
1610 			  &mac_stats->eth_mac.BroadcastFramesReceivedOK);
1611 	fbnic_set_counter(&eth_mac_stats->FrameTooLongErrors,
1612 			  &mac_stats->eth_mac.FrameTooLongErrors);
1613 }
1614 
1615 static const struct ethtool_ops fbnic_ethtool_ops = {
1616 	.supported_coalesce_params	=
1617 				  ETHTOOL_COALESCE_USECS |
1618 				  ETHTOOL_COALESCE_RX_MAX_FRAMES,
1619 	.rxfh_max_num_contexts	= FBNIC_RPC_RSS_TBL_COUNT,
1620 	.get_drvinfo		= fbnic_get_drvinfo,
1621 	.get_regs_len		= fbnic_get_regs_len,
1622 	.get_regs		= fbnic_get_regs,
1623 	.get_coalesce		= fbnic_get_coalesce,
1624 	.set_coalesce		= fbnic_set_coalesce,
1625 	.get_ringparam		= fbnic_get_ringparam,
1626 	.set_ringparam		= fbnic_set_ringparam,
1627 	.get_strings		= fbnic_get_strings,
1628 	.get_ethtool_stats	= fbnic_get_ethtool_stats,
1629 	.get_sset_count		= fbnic_get_sset_count,
1630 	.get_rxnfc		= fbnic_get_rxnfc,
1631 	.set_rxnfc		= fbnic_set_rxnfc,
1632 	.get_rxfh_key_size	= fbnic_get_rxfh_key_size,
1633 	.get_rxfh_indir_size	= fbnic_get_rxfh_indir_size,
1634 	.get_rxfh		= fbnic_get_rxfh,
1635 	.set_rxfh		= fbnic_set_rxfh,
1636 	.create_rxfh_context	= fbnic_create_rxfh_context,
1637 	.modify_rxfh_context	= fbnic_modify_rxfh_context,
1638 	.remove_rxfh_context	= fbnic_remove_rxfh_context,
1639 	.get_channels		= fbnic_get_channels,
1640 	.set_channels		= fbnic_set_channels,
1641 	.get_ts_info		= fbnic_get_ts_info,
1642 	.get_ts_stats		= fbnic_get_ts_stats,
1643 	.get_eth_mac_stats	= fbnic_get_eth_mac_stats,
1644 };
1645 
1646 void fbnic_set_ethtool_ops(struct net_device *dev)
1647 {
1648 	dev->ethtool_ops = &fbnic_ethtool_ops;
1649 }
1650