xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c (revision 91a4855d6c03e770e42f17c798a36a3c46e63de2) !
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23 
24 #include "cn20k/reg.h"
25 #include "cn20k/debugfs.h"
26 #include "cn20k/npc.h"
27 
28 #define DEBUGFS_DIR_NAME "octeontx2"
29 
30 enum {
31 	CGX_STAT0,
32 	CGX_STAT1,
33 	CGX_STAT2,
34 	CGX_STAT3,
35 	CGX_STAT4,
36 	CGX_STAT5,
37 	CGX_STAT6,
38 	CGX_STAT7,
39 	CGX_STAT8,
40 	CGX_STAT9,
41 	CGX_STAT10,
42 	CGX_STAT11,
43 	CGX_STAT12,
44 	CGX_STAT13,
45 	CGX_STAT14,
46 	CGX_STAT15,
47 	CGX_STAT16,
48 	CGX_STAT17,
49 	CGX_STAT18,
50 };
51 
52 static char *cgx_rx_stats_fields[] = {
53 	[CGX_STAT0]	= "Received packets",
54 	[CGX_STAT1]	= "Octets of received packets",
55 	[CGX_STAT2]	= "Received PAUSE packets",
56 	[CGX_STAT3]	= "Received PAUSE and control packets",
57 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
58 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
59 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
60 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
61 	[CGX_STAT8]	= "Error packets",
62 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
63 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
64 	[CGX_STAT11]	= "NCSI-bound packets dropped",
65 	[CGX_STAT12]	= "NCSI-bound octets dropped",
66 };
67 
68 static char *cgx_tx_stats_fields[] = {
69 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
70 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
71 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
72 	[CGX_STAT3]	= "Single collisions before successful transmission",
73 	[CGX_STAT4]	= "Total octets sent on the interface",
74 	[CGX_STAT5]	= "Total frames sent on the interface",
75 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
76 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
77 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
78 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
79 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
80 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
81 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
82 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
83 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
84 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
85 	[CGX_STAT16]	= "Transmit underflow and were truncated",
86 	[CGX_STAT17]	= "Control/PAUSE packets sent",
87 };
88 
89 static char *rpm_rx_stats_fields[] = {
90 	"Octets of received packets",
91 	"Octets of received packets with out error",
92 	"Received packets with alignment errors",
93 	"Control/PAUSE packets received",
94 	"Packets received with Frame too long Errors",
95 	"Packets received with a1nrange length Errors",
96 	"Received packets",
97 	"Packets received with FrameCheckSequenceErrors",
98 	"Packets received with VLAN header",
99 	"Error packets",
100 	"Packets received with unicast DMAC",
101 	"Packets received with multicast DMAC",
102 	"Packets received with broadcast DMAC",
103 	"Dropped packets",
104 	"Total frames received on interface",
105 	"Packets received with an octet count < 64",
106 	"Packets received with an octet count == 64",
107 	"Packets received with an octet count of 65-127",
108 	"Packets received with an octet count of 128-255",
109 	"Packets received with an octet count of 256-511",
110 	"Packets received with an octet count of 512-1023",
111 	"Packets received with an octet count of 1024-1518",
112 	"Packets received with an octet count of > 1518",
113 	"Oversized Packets",
114 	"Jabber Packets",
115 	"Fragmented Packets",
116 	"CBFC(class based flow control) pause frames received for class 0",
117 	"CBFC pause frames received for class 1",
118 	"CBFC pause frames received for class 2",
119 	"CBFC pause frames received for class 3",
120 	"CBFC pause frames received for class 4",
121 	"CBFC pause frames received for class 5",
122 	"CBFC pause frames received for class 6",
123 	"CBFC pause frames received for class 7",
124 	"CBFC pause frames received for class 8",
125 	"CBFC pause frames received for class 9",
126 	"CBFC pause frames received for class 10",
127 	"CBFC pause frames received for class 11",
128 	"CBFC pause frames received for class 12",
129 	"CBFC pause frames received for class 13",
130 	"CBFC pause frames received for class 14",
131 	"CBFC pause frames received for class 15",
132 	"MAC control packets received",
133 };
134 
135 static char *rpm_tx_stats_fields[] = {
136 	"Total octets sent on the interface",
137 	"Total octets transmitted OK",
138 	"Control/Pause frames sent",
139 	"Total frames transmitted OK",
140 	"Total frames sent with VLAN header",
141 	"Error Packets",
142 	"Packets sent to unicast DMAC",
143 	"Packets sent to the multicast DMAC",
144 	"Packets sent to a broadcast DMAC",
145 	"Packets sent with an octet count == 64",
146 	"Packets sent with an octet count of 65-127",
147 	"Packets sent with an octet count of 128-255",
148 	"Packets sent with an octet count of 256-511",
149 	"Packets sent with an octet count of 512-1023",
150 	"Packets sent with an octet count of 1024-1518",
151 	"Packets sent with an octet count of > 1518",
152 	"CBFC(class based flow control) pause frames transmitted for class 0",
153 	"CBFC pause frames transmitted for class 1",
154 	"CBFC pause frames transmitted for class 2",
155 	"CBFC pause frames transmitted for class 3",
156 	"CBFC pause frames transmitted for class 4",
157 	"CBFC pause frames transmitted for class 5",
158 	"CBFC pause frames transmitted for class 6",
159 	"CBFC pause frames transmitted for class 7",
160 	"CBFC pause frames transmitted for class 8",
161 	"CBFC pause frames transmitted for class 9",
162 	"CBFC pause frames transmitted for class 10",
163 	"CBFC pause frames transmitted for class 11",
164 	"CBFC pause frames transmitted for class 12",
165 	"CBFC pause frames transmitted for class 13",
166 	"CBFC pause frames transmitted for class 14",
167 	"CBFC pause frames transmitted for class 15",
168 	"MAC control packets sent",
169 	"Total frames sent on the interface"
170 };
171 
172 enum cpt_eng_type {
173 	CPT_AE_TYPE = 1,
174 	CPT_SE_TYPE = 2,
175 	CPT_IE_TYPE = 3,
176 };
177 
178 #define rvu_dbg_NULL NULL
179 #define rvu_dbg_open_NULL NULL
180 
181 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
182 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
183 { \
184 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
185 } \
186 static const struct file_operations rvu_dbg_##name##_fops = { \
187 	.owner		= THIS_MODULE, \
188 	.open		= rvu_dbg_open_##name, \
189 	.read		= seq_read, \
190 	.write		= rvu_dbg_##write_op, \
191 	.llseek		= seq_lseek, \
192 	.release	= single_release, \
193 }
194 
195 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
196 static const struct file_operations rvu_dbg_##name##_fops = { \
197 	.owner = THIS_MODULE, \
198 	.open = simple_open, \
199 	.read = rvu_dbg_##read_op, \
200 	.write = rvu_dbg_##write_op \
201 }
202 
203 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
204 
205 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
206 {
207 	struct mcs *mcs = filp->private;
208 	struct mcs_port_stats stats;
209 	int lmac;
210 
211 	seq_puts(filp, "\n port stats\n");
212 	mutex_lock(&mcs->stats_lock);
213 	for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
214 		mcs_get_port_stats(mcs, &stats, lmac, dir);
215 		seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
216 		seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
217 
218 		if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
219 			seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
220 				   stats.preempt_err_cnt);
221 		if (dir == MCS_TX)
222 			seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
223 				   stats.sectag_insert_err_cnt);
224 	}
225 	mutex_unlock(&mcs->stats_lock);
226 	return 0;
227 }
228 
229 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
230 {
231 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
232 }
233 
234 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
235 
236 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
237 {
238 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
239 }
240 
241 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
242 
243 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
244 {
245 	struct mcs *mcs = filp->private;
246 	struct mcs_sa_stats stats;
247 	struct rsrc_bmap *map;
248 	int sa_id;
249 
250 	if (dir == MCS_TX) {
251 		map = &mcs->tx.sa;
252 		mutex_lock(&mcs->stats_lock);
253 		for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
254 			seq_puts(filp, "\n TX SA stats\n");
255 			mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
256 			seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
257 				   stats.pkt_encrypt_cnt);
258 
259 			seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
260 				   stats.pkt_protected_cnt);
261 		}
262 		mutex_unlock(&mcs->stats_lock);
263 		return 0;
264 	}
265 
266 	/* RX stats */
267 	map = &mcs->rx.sa;
268 	mutex_lock(&mcs->stats_lock);
269 	for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
270 		seq_puts(filp, "\n RX SA stats\n");
271 		mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
272 		seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
273 		seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
274 		seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
275 		seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
276 		seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
277 	}
278 	mutex_unlock(&mcs->stats_lock);
279 	return 0;
280 }
281 
282 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
283 {
284 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
285 }
286 
287 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
288 
289 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
290 {
291 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
292 }
293 
294 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
295 
296 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
297 {
298 	struct mcs *mcs = filp->private;
299 	struct mcs_sc_stats stats;
300 	struct rsrc_bmap *map;
301 	int sc_id;
302 
303 	map = &mcs->tx.sc;
304 	seq_puts(filp, "\n SC stats\n");
305 
306 	mutex_lock(&mcs->stats_lock);
307 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
308 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
309 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
310 		seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
311 		seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
312 
313 		if (mcs->hw->mcs_blks == 1) {
314 			seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
315 				   stats.octet_encrypt_cnt);
316 			seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
317 				   stats.octet_protected_cnt);
318 		}
319 	}
320 	mutex_unlock(&mcs->stats_lock);
321 	return 0;
322 }
323 
324 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
325 
326 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
327 {
328 	struct mcs *mcs = filp->private;
329 	struct mcs_sc_stats stats;
330 	struct rsrc_bmap *map;
331 	int sc_id;
332 
333 	map = &mcs->rx.sc;
334 	seq_puts(filp, "\n SC stats\n");
335 
336 	mutex_lock(&mcs->stats_lock);
337 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
338 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
339 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
340 		seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
341 		seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
342 		seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
343 		seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
344 		seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
345 
346 		if (mcs->hw->mcs_blks > 1) {
347 			seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
348 			seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
349 		}
350 		if (mcs->hw->mcs_blks == 1) {
351 			seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
352 				   stats.octet_decrypt_cnt);
353 			seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
354 				   stats.octet_validate_cnt);
355 		}
356 	}
357 	mutex_unlock(&mcs->stats_lock);
358 	return 0;
359 }
360 
361 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
362 
363 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
364 {
365 	struct mcs *mcs = filp->private;
366 	struct mcs_flowid_stats stats;
367 	struct rsrc_bmap *map;
368 	int flow_id;
369 
370 	seq_puts(filp, "\n Flowid stats\n");
371 
372 	if (dir == MCS_RX)
373 		map = &mcs->rx.flow_ids;
374 	else
375 		map = &mcs->tx.flow_ids;
376 
377 	mutex_lock(&mcs->stats_lock);
378 	for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
379 		mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
380 		seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
381 	}
382 	mutex_unlock(&mcs->stats_lock);
383 	return 0;
384 }
385 
386 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
387 {
388 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
389 }
390 
391 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
392 
393 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
394 {
395 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
396 }
397 
398 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
399 
400 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
401 {
402 	struct mcs *mcs = filp->private;
403 	struct mcs_secy_stats stats;
404 	struct rsrc_bmap *map;
405 	int secy_id;
406 
407 	map = &mcs->tx.secy;
408 	seq_puts(filp, "\n MCS TX secy stats\n");
409 
410 	mutex_lock(&mcs->stats_lock);
411 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
412 		mcs_get_tx_secy_stats(mcs, &stats, secy_id);
413 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
414 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
415 			   stats.ctl_pkt_bcast_cnt);
416 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
417 			   stats.ctl_pkt_mcast_cnt);
418 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
419 			   stats.ctl_pkt_ucast_cnt);
420 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
421 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
422 			   stats.unctl_pkt_bcast_cnt);
423 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
424 			   stats.unctl_pkt_mcast_cnt);
425 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
426 			   stats.unctl_pkt_ucast_cnt);
427 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
428 		seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
429 			   stats.octet_encrypted_cnt);
430 		seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
431 			   stats.octet_protected_cnt);
432 		seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
433 			   stats.pkt_noactivesa_cnt);
434 		seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
435 		seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
436 	}
437 	mutex_unlock(&mcs->stats_lock);
438 	return 0;
439 }
440 
441 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
442 
443 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
444 {
445 	struct mcs *mcs = filp->private;
446 	struct mcs_secy_stats stats;
447 	struct rsrc_bmap *map;
448 	int secy_id;
449 
450 	map = &mcs->rx.secy;
451 	seq_puts(filp, "\n MCS secy stats\n");
452 
453 	mutex_lock(&mcs->stats_lock);
454 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
455 		mcs_get_rx_secy_stats(mcs, &stats, secy_id);
456 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
457 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
458 			   stats.ctl_pkt_bcast_cnt);
459 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
460 			   stats.ctl_pkt_mcast_cnt);
461 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
462 			   stats.ctl_pkt_ucast_cnt);
463 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
464 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
465 			   stats.unctl_pkt_bcast_cnt);
466 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
467 			   stats.unctl_pkt_mcast_cnt);
468 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
469 			   stats.unctl_pkt_ucast_cnt);
470 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
471 		seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
472 			   stats.octet_decrypted_cnt);
473 		seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
474 			   stats.octet_validated_cnt);
475 		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
476 			   stats.pkt_port_disabled_cnt);
477 		seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
478 		seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
479 			   stats.pkt_nosa_cnt);
480 		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
481 			   stats.pkt_nosaerror_cnt);
482 		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
483 			   stats.pkt_tagged_ctl_cnt);
484 		seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
485 		seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
486 		if (mcs->hw->mcs_blks > 1)
487 			seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
488 				   stats.pkt_notag_cnt);
489 	}
490 	mutex_unlock(&mcs->stats_lock);
491 	return 0;
492 }
493 
494 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
495 
496 static void rvu_dbg_mcs_init(struct rvu *rvu)
497 {
498 	struct mcs *mcs;
499 	char dname[10];
500 	int i;
501 
502 	if (!rvu->mcs_blk_cnt)
503 		return;
504 
505 	rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
506 
507 	for (i = 0; i < rvu->mcs_blk_cnt; i++) {
508 		mcs = mcs_get_pdata(i);
509 
510 		sprintf(dname, "mcs%d", i);
511 		rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
512 						      rvu->rvu_dbg.mcs_root);
513 
514 		rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
515 
516 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
517 				    &rvu_dbg_mcs_rx_flowid_stats_fops);
518 
519 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
520 				    &rvu_dbg_mcs_rx_secy_stats_fops);
521 
522 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
523 				    &rvu_dbg_mcs_rx_sc_stats_fops);
524 
525 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
526 				    &rvu_dbg_mcs_rx_sa_stats_fops);
527 
528 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
529 				    &rvu_dbg_mcs_rx_port_stats_fops);
530 
531 		rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
532 
533 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
534 				    &rvu_dbg_mcs_tx_flowid_stats_fops);
535 
536 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
537 				    &rvu_dbg_mcs_tx_secy_stats_fops);
538 
539 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
540 				    &rvu_dbg_mcs_tx_sc_stats_fops);
541 
542 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
543 				    &rvu_dbg_mcs_tx_sa_stats_fops);
544 
545 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
546 				    &rvu_dbg_mcs_tx_port_stats_fops);
547 	}
548 }
549 
550 #define LMT_MAPTBL_ENTRY_SIZE 16
551 /* Dump LMTST map table */
552 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
553 					       char __user *buffer,
554 					       size_t count, loff_t *ppos)
555 {
556 	struct rvu *rvu = filp->private_data;
557 	u64 lmt_addr, val, tbl_base;
558 	int pf, vf, num_vfs, hw_vfs;
559 	void __iomem *lmt_map_base;
560 	int apr_pfs, apr_vfs;
561 	int buf_size = 10240;
562 	size_t off = 0;
563 	int index = 0;
564 	char *buf;
565 	int ret;
566 
567 	/* don't allow partial reads */
568 	if (*ppos != 0)
569 		return 0;
570 
571 	buf = kzalloc(buf_size, GFP_KERNEL);
572 	if (!buf)
573 		return -ENOMEM;
574 
575 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
576 	val  = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
577 	apr_vfs = 1 << (val & 0xF);
578 	apr_pfs = 1 << ((val >> 4) & 0x7);
579 
580 	lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
581 				  LMT_MAPTBL_ENTRY_SIZE);
582 	if (!lmt_map_base) {
583 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
584 		kfree(buf);
585 		return false;
586 	}
587 
588 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
589 			  "\n\t\t\t\t\tLmtst Map Table Entries");
590 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
591 			  "\n\t\t\t\t\t=======================");
592 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
593 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
594 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
595 			  "Lmtline Base (word 0)\t\t");
596 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
597 			  "Lmt Map Entry (word 1)");
598 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
599 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
600 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
601 				    pf);
602 
603 		index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
604 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
605 				 (tbl_base + index));
606 		lmt_addr = readq(lmt_map_base + index);
607 		off += scnprintf(&buf[off], buf_size - 1 - off,
608 				 " 0x%016llx\t\t", lmt_addr);
609 		index += 8;
610 		val = readq(lmt_map_base + index);
611 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
612 				 val);
613 		/* Reading num of VFs per PF */
614 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
615 		for (vf = 0; vf < num_vfs; vf++) {
616 			index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
617 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
618 			off += scnprintf(&buf[off], buf_size - 1 - off,
619 					    "PF%d:VF%d  \t\t", pf, vf);
620 			off += scnprintf(&buf[off], buf_size - 1 - off,
621 					 " 0x%llx\t\t", (tbl_base + index));
622 			lmt_addr = readq(lmt_map_base + index);
623 			off += scnprintf(&buf[off], buf_size - 1 - off,
624 					 " 0x%016llx\t\t", lmt_addr);
625 			index += 8;
626 			val = readq(lmt_map_base + index);
627 			off += scnprintf(&buf[off], buf_size - 1 - off,
628 					 " 0x%016llx\n", val);
629 		}
630 	}
631 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
632 
633 	ret = min(off, count);
634 	if (copy_to_user(buffer, buf, ret))
635 		ret = -EFAULT;
636 	kfree(buf);
637 
638 	iounmap(lmt_map_base);
639 	if (ret < 0)
640 		return ret;
641 
642 	*ppos = ret;
643 	return ret;
644 }
645 
646 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
647 
648 static void get_lf_str_list(const struct rvu_block *block, int pcifunc,
649 			    char *lfs)
650 {
651 	int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max;
652 
653 	for_each_set_bit(lf, block->lf.bmap, block->lf.max) {
654 		if (lf >= block->lf.max)
655 			break;
656 
657 		if (block->fn_map[lf] != pcifunc)
658 			continue;
659 
660 		if (lf == prev_lf + 1) {
661 			prev_lf = lf;
662 			seq = 1;
663 			continue;
664 		}
665 
666 		if (seq)
667 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
668 		else
669 			len += (len ? sprintf(lfs + len, ",%d", lf) :
670 				      sprintf(lfs + len, "%d", lf));
671 
672 		prev_lf = lf;
673 		seq = 0;
674 	}
675 
676 	if (seq)
677 		len += sprintf(lfs + len, "-%d", prev_lf);
678 
679 	lfs[len] = '\0';
680 }
681 
682 static int get_max_column_width(struct rvu *rvu)
683 {
684 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
685 	struct rvu_block block;
686 	u16 pcifunc;
687 	char *buf;
688 
689 	buf = kzalloc(buf_size, GFP_KERNEL);
690 	if (!buf)
691 		return -ENOMEM;
692 
693 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
694 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
695 			pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
696 			if (!pcifunc)
697 				continue;
698 
699 			for (index = 0; index < BLK_COUNT; index++) {
700 				block = rvu->hw->block[index];
701 				if (!strlen(block.name))
702 					continue;
703 
704 				get_lf_str_list(&block, pcifunc, buf);
705 				if (lf_str_size <= strlen(buf))
706 					lf_str_size = strlen(buf) + 1;
707 			}
708 		}
709 	}
710 
711 	kfree(buf);
712 	return lf_str_size;
713 }
714 
715 /* Dumps current provisioning status of all RVU block LFs */
716 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
717 					  char __user *buffer,
718 					  size_t count, loff_t *ppos)
719 {
720 	int index, off = 0, flag = 0, len = 0, i = 0;
721 	struct rvu *rvu = filp->private_data;
722 	int bytes_not_copied = 0;
723 	struct rvu_block block;
724 	int pf, vf, pcifunc;
725 	int buf_size = 2048;
726 	int lf_str_size;
727 	char *lfs;
728 	char *buf;
729 
730 	/* don't allow partial reads */
731 	if (*ppos != 0)
732 		return 0;
733 
734 	buf = kzalloc(buf_size, GFP_KERNEL);
735 	if (!buf)
736 		return -ENOMEM;
737 
738 	/* Get the maximum width of a column */
739 	lf_str_size = get_max_column_width(rvu);
740 
741 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
742 	if (!lfs) {
743 		kfree(buf);
744 		return -ENOMEM;
745 	}
746 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
747 			  "pcifunc");
748 	for (index = 0; index < BLK_COUNT; index++)
749 		if (strlen(rvu->hw->block[index].name)) {
750 			off += scnprintf(&buf[off], buf_size - 1 - off,
751 					 "%-*s", lf_str_size,
752 					 rvu->hw->block[index].name);
753 		}
754 
755 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
756 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
757 	if (bytes_not_copied)
758 		goto out;
759 
760 	i++;
761 	*ppos += off;
762 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
763 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
764 			off = 0;
765 			flag = 0;
766 			pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
767 			if (!pcifunc)
768 				continue;
769 
770 			if (vf) {
771 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
772 				off = scnprintf(&buf[off],
773 						buf_size - 1 - off,
774 						"%-*s", lf_str_size, lfs);
775 			} else {
776 				sprintf(lfs, "PF%d", pf);
777 				off = scnprintf(&buf[off],
778 						buf_size - 1 - off,
779 						"%-*s", lf_str_size, lfs);
780 			}
781 
782 			for (index = 0; index < BLK_COUNT; index++) {
783 				block = rvu->hw->block[index];
784 				if (!strlen(block.name))
785 					continue;
786 				len = 0;
787 				lfs[len] = '\0';
788 				get_lf_str_list(&block, pcifunc, lfs);
789 				if (strlen(lfs))
790 					flag = 1;
791 
792 				off += scnprintf(&buf[off], buf_size - 1 - off,
793 						 "%-*s", lf_str_size, lfs);
794 			}
795 			if (flag) {
796 				off +=	scnprintf(&buf[off],
797 						  buf_size - 1 - off, "\n");
798 				bytes_not_copied = copy_to_user(buffer +
799 								(i * off),
800 								buf, off);
801 				if (bytes_not_copied)
802 					goto out;
803 
804 				i++;
805 				*ppos += off;
806 			}
807 		}
808 	}
809 
810 out:
811 	kfree(lfs);
812 	kfree(buf);
813 	if (bytes_not_copied)
814 		return -EFAULT;
815 
816 	return *ppos;
817 }
818 
819 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
820 
821 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
822 {
823 	char cgx[10], lmac[10], chan[10];
824 	struct rvu *rvu = filp->private;
825 	struct pci_dev *pdev = NULL;
826 	struct mac_ops *mac_ops;
827 	struct rvu_pfvf *pfvf;
828 	int pf, domain, blkid;
829 	u8 cgx_id, lmac_id;
830 	u16 pcifunc;
831 
832 	domain = 2;
833 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
834 	/* There can be no CGX devices at all */
835 	if (!mac_ops)
836 		return 0;
837 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
838 		   mac_ops->name);
839 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
840 		if (!is_pf_cgxmapped(rvu, pf))
841 			continue;
842 
843 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
844 		if (!pdev)
845 			continue;
846 
847 		cgx[0] = 0;
848 		lmac[0] = 0;
849 		pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
850 		pfvf = rvu_get_pfvf(rvu, pcifunc);
851 
852 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
853 			blkid = 0;
854 		else
855 			blkid = 1;
856 
857 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
858 				    &lmac_id);
859 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
860 		sprintf(lmac, "LMAC%d", lmac_id);
861 		sprintf(chan, "%d",
862 			rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
863 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
864 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
865 			   chan);
866 
867 		pci_dev_put(pdev);
868 	}
869 	return 0;
870 }
871 
872 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
873 
874 static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused)
875 {
876 	struct rvu *rvu = s->private;
877 	struct rvu_fwdata *fwdata;
878 	u8 mac[ETH_ALEN];
879 	int count = 0, i;
880 
881 	if (!rvu->fwdata)
882 		return -EAGAIN;
883 
884 	fwdata = rvu->fwdata;
885 	seq_puts(s, "\nRVU Firmware Data:\n");
886 	seq_puts(s, "\n\t\tPTP INFORMATION\n");
887 	seq_puts(s, "\t\t===============\n");
888 	seq_printf(s, "\t\texternal clockrate \t :%x\n",
889 		   fwdata->ptp_ext_clk_rate);
890 	seq_printf(s, "\t\texternal timestamp \t :%x\n",
891 		   fwdata->ptp_ext_tstamp);
892 	seq_puts(s, "\n");
893 
894 	seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n");
895 	seq_puts(s, "\t\t=======================\n");
896 	seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid);
897 	seq_printf(s, "\t\tNode ID \t\t :%x\n",
898 		   fwdata->channel_data.info.node_id);
899 	seq_printf(s, "\t\tNumber of VFs  \t\t :%x\n",
900 		   fwdata->channel_data.info.max_vfs);
901 	seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n",
902 		   fwdata->channel_data.info.num_pf_rings);
903 	seq_printf(s, "\t\tPF SRN \t\t\t :%x\n",
904 		   fwdata->channel_data.info.pf_srn);
905 	seq_puts(s, "\n");
906 
907 	seq_puts(s, "\n\t\tPF-INDEX  MACADDRESS\n");
908 	seq_puts(s, "\t\t====================\n");
909 	for (i = 0; i < PF_MACNUM_MAX; i++) {
910 		u64_to_ether_addr(fwdata->pf_macs[i], mac);
911 		if (!is_zero_ether_addr(mac)) {
912 			seq_printf(s, "\t\t  %d       %pM\n", i, mac);
913 			count++;
914 		}
915 	}
916 
917 	if (!count)
918 		seq_puts(s, "\t\tNo valid address found\n");
919 
920 	seq_puts(s, "\n\t\tVF-INDEX  MACADDRESS\n");
921 	seq_puts(s, "\t\t====================\n");
922 	count = 0;
923 	for (i = 0; i < VF_MACNUM_MAX; i++) {
924 		u64_to_ether_addr(fwdata->vf_macs[i], mac);
925 		if (!is_zero_ether_addr(mac)) {
926 			seq_printf(s, "\t\t  %d       %pM\n", i, mac);
927 			count++;
928 		}
929 	}
930 
931 	if (!count)
932 		seq_puts(s, "\t\tNo valid address found\n");
933 
934 	return 0;
935 }
936 
937 RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL);
938 
939 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
940 				u16 *pcifunc)
941 {
942 	struct rvu_block *block;
943 	struct rvu_hwinfo *hw;
944 
945 	hw = rvu->hw;
946 	block = &hw->block[blkaddr];
947 
948 	if (lf < 0 || lf >= block->lf.max) {
949 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
950 			 block->lf.max - 1);
951 		return false;
952 	}
953 
954 	*pcifunc = block->fn_map[lf];
955 	if (!*pcifunc) {
956 		dev_warn(rvu->dev,
957 			 "This LF is not attached to any RVU PFFUNC\n");
958 		return false;
959 	}
960 	return true;
961 }
962 
963 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
964 {
965 	if (!pfvf->aura_ctx) {
966 		seq_puts(m, "Aura context is not initialized\n");
967 	} else {
968 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
969 		seq_printf(m, "Aura context ena/dis bitmap : %*pb\n",
970 			   pfvf->aura_ctx->qsize, pfvf->aura_bmap);
971 	}
972 
973 	if (!pfvf->pool_ctx) {
974 		seq_puts(m, "Pool context is not initialized\n");
975 	} else {
976 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
977 		seq_printf(m, "Pool context ena/dis bitmap : %*pb\n",
978 			   pfvf->pool_ctx->qsize, pfvf->pool_bmap);
979 	}
980 }
981 
982 /* The 'qsize' entry dumps current Aura/Pool context Qsize
983  * and each context's current enable/disable status in a bitmap.
984  */
985 static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
986 				 int blktype)
987 {
988 	void (*print_qsize)(struct seq_file *s,
989 			    struct rvu_pfvf *pfvf) = NULL;
990 	struct rvu_pfvf *pfvf;
991 	struct rvu *rvu;
992 	int qsize_id;
993 	u16 pcifunc;
994 	int blkaddr;
995 
996 	rvu = s->private;
997 	switch (blktype) {
998 	case BLKTYPE_NPA:
999 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
1000 		print_qsize = print_npa_qsize;
1001 		break;
1002 
1003 	case BLKTYPE_NIX:
1004 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
1005 		print_qsize = print_nix_qsize;
1006 		break;
1007 
1008 	default:
1009 		return -EINVAL;
1010 	}
1011 
1012 	if (blktype == BLKTYPE_NPA)
1013 		blkaddr = BLKADDR_NPA;
1014 	else
1015 		blkaddr = debugfs_get_aux_num(s->file);
1016 
1017 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
1018 		return -EINVAL;
1019 
1020 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1021 	print_qsize(s, pfvf);
1022 
1023 	return 0;
1024 }
1025 
1026 static ssize_t rvu_dbg_qsize_write(struct file *file,
1027 				   const char __user *buffer, size_t count,
1028 				   loff_t *ppos, int blktype)
1029 {
1030 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
1031 	struct seq_file *seqfile = file->private_data;
1032 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
1033 	struct rvu *rvu = seqfile->private;
1034 	int blkaddr;
1035 	u16 pcifunc;
1036 	int ret, lf;
1037 
1038 	cmd_buf = memdup_user_nul(buffer, count);
1039 	if (IS_ERR(cmd_buf))
1040 		return -ENOMEM;
1041 
1042 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1043 	if (cmd_buf_tmp) {
1044 		*cmd_buf_tmp = '\0';
1045 		count = cmd_buf_tmp - cmd_buf + 1;
1046 	}
1047 
1048 	cmd_buf_tmp = cmd_buf;
1049 	subtoken = strsep(&cmd_buf, " ");
1050 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1051 	if (cmd_buf)
1052 		ret = -EINVAL;
1053 
1054 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1055 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1056 		goto qsize_write_done;
1057 	}
1058 
1059 	if (blktype == BLKTYPE_NPA)
1060 		blkaddr = BLKADDR_NPA;
1061 	else
1062 		blkaddr = debugfs_get_aux_num(file);
1063 
1064 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1065 		ret = -EINVAL;
1066 		goto qsize_write_done;
1067 	}
1068 	if (blktype  == BLKTYPE_NPA)
1069 		rvu->rvu_dbg.npa_qsize_id = lf;
1070 	else
1071 		rvu->rvu_dbg.nix_qsize_id = lf;
1072 
1073 qsize_write_done:
1074 	kfree(cmd_buf_tmp);
1075 	return ret ? ret : count;
1076 }
1077 
1078 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1079 				       const char __user *buffer,
1080 				       size_t count, loff_t *ppos)
1081 {
1082 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1083 					    BLKTYPE_NPA);
1084 }
1085 
1086 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1087 {
1088 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1089 }
1090 
1091 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1092 
1093 /* Dumps given NPA Aura's context */
1094 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1095 {
1096 	struct npa_aura_s *aura = &rsp->aura;
1097 	struct rvu *rvu = m->private;
1098 
1099 	if (is_cn20k(rvu->pdev)) {
1100 		print_npa_cn20k_aura_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp);
1101 		return;
1102 	}
1103 
1104 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1105 
1106 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1107 		   aura->ena, aura->pool_caching);
1108 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1109 		   aura->pool_way_mask, aura->avg_con);
1110 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1111 		   aura->pool_drop_ena, aura->aura_drop_ena);
1112 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1113 		   aura->bp_ena, aura->aura_drop);
1114 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1115 		   aura->shift, aura->avg_level);
1116 
1117 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1118 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1119 
1120 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1121 		   (u64)aura->limit, aura->bp, aura->fc_ena);
1122 
1123 	if (!is_rvu_otx2(rvu))
1124 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1125 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1126 		   aura->fc_up_crossing, aura->fc_stype);
1127 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1128 
1129 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1130 
1131 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1132 		   aura->pool_drop, aura->update_time);
1133 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1134 		   aura->err_int, aura->err_int_ena);
1135 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1136 		   aura->thresh_int, aura->thresh_int_ena);
1137 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1138 		   aura->thresh_up, aura->thresh_qint_idx);
1139 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1140 
1141 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1142 	if (!is_rvu_otx2(rvu))
1143 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1144 }
1145 
1146 /* Dumps given NPA Pool's context */
1147 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1148 {
1149 	struct npa_pool_s *pool = &rsp->pool;
1150 	struct rvu *rvu = m->private;
1151 
1152 	if (is_cn20k(rvu->pdev)) {
1153 		print_npa_cn20k_pool_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp);
1154 		return;
1155 	}
1156 
1157 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1158 
1159 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1160 		   pool->ena, pool->nat_align);
1161 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1162 		   pool->stack_caching, pool->stack_way_mask);
1163 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1164 		   pool->buf_offset, pool->buf_size);
1165 
1166 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1167 		   pool->stack_max_pages, pool->stack_pages);
1168 
1169 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1170 
1171 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1172 		   pool->stack_offset, pool->shift, pool->avg_level);
1173 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1174 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
1175 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1176 		   pool->fc_hyst_bits, pool->fc_up_crossing);
1177 	if (!is_rvu_otx2(rvu))
1178 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1179 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1180 
1181 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1182 
1183 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1184 
1185 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1186 
1187 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1188 		   pool->err_int, pool->err_int_ena);
1189 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1190 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1191 		   pool->thresh_int_ena, pool->thresh_up);
1192 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1193 		   pool->thresh_qint_idx, pool->err_qint_idx);
1194 	if (!is_rvu_otx2(rvu))
1195 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1196 }
1197 
1198 /* Reads aura/pool's ctx from admin queue */
1199 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1200 {
1201 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1202 	struct npa_aq_enq_req aq_req;
1203 	struct npa_aq_enq_rsp rsp;
1204 	struct rvu_pfvf *pfvf;
1205 	int aura, rc, max_id;
1206 	int npalf, id, all;
1207 	struct rvu *rvu;
1208 	u16 pcifunc;
1209 
1210 	rvu = m->private;
1211 
1212 	switch (ctype) {
1213 	case NPA_AQ_CTYPE_AURA:
1214 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1215 		id = rvu->rvu_dbg.npa_aura_ctx.id;
1216 		all = rvu->rvu_dbg.npa_aura_ctx.all;
1217 		break;
1218 
1219 	case NPA_AQ_CTYPE_POOL:
1220 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1221 		id = rvu->rvu_dbg.npa_pool_ctx.id;
1222 		all = rvu->rvu_dbg.npa_pool_ctx.all;
1223 		break;
1224 	default:
1225 		return -EINVAL;
1226 	}
1227 
1228 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1229 		return -EINVAL;
1230 
1231 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1232 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1233 		seq_puts(m, "Aura context is not initialized\n");
1234 		return -EINVAL;
1235 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1236 		seq_puts(m, "Pool context is not initialized\n");
1237 		return -EINVAL;
1238 	}
1239 
1240 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1241 	aq_req.hdr.pcifunc = pcifunc;
1242 	aq_req.ctype = ctype;
1243 	aq_req.op = NPA_AQ_INSTOP_READ;
1244 	if (ctype == NPA_AQ_CTYPE_AURA) {
1245 		max_id = pfvf->aura_ctx->qsize;
1246 		print_npa_ctx = print_npa_aura_ctx;
1247 	} else {
1248 		max_id = pfvf->pool_ctx->qsize;
1249 		print_npa_ctx = print_npa_pool_ctx;
1250 	}
1251 
1252 	if (id < 0 || id >= max_id) {
1253 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1254 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1255 			max_id - 1);
1256 		return -EINVAL;
1257 	}
1258 
1259 	if (all)
1260 		id = 0;
1261 	else
1262 		max_id = id + 1;
1263 
1264 	for (aura = id; aura < max_id; aura++) {
1265 		aq_req.aura_id = aura;
1266 
1267 		/* Skip if queue is uninitialized */
1268 		if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1269 			continue;
1270 
1271 		seq_printf(m, "======%s : %d=======\n",
1272 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1273 			aq_req.aura_id);
1274 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1275 		if (rc) {
1276 			seq_puts(m, "Failed to read context\n");
1277 			return -EINVAL;
1278 		}
1279 		print_npa_ctx(m, &rsp);
1280 	}
1281 	return 0;
1282 }
1283 
1284 static int write_npa_ctx(struct rvu *rvu, bool all,
1285 			 int npalf, int id, int ctype)
1286 {
1287 	struct rvu_pfvf *pfvf;
1288 	int max_id = 0;
1289 	u16 pcifunc;
1290 
1291 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1292 		return -EINVAL;
1293 
1294 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1295 
1296 	if (ctype == NPA_AQ_CTYPE_AURA) {
1297 		if (!pfvf->aura_ctx) {
1298 			dev_warn(rvu->dev, "Aura context is not initialized\n");
1299 			return -EINVAL;
1300 		}
1301 		max_id = pfvf->aura_ctx->qsize;
1302 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
1303 		if (!pfvf->pool_ctx) {
1304 			dev_warn(rvu->dev, "Pool context is not initialized\n");
1305 			return -EINVAL;
1306 		}
1307 		max_id = pfvf->pool_ctx->qsize;
1308 	}
1309 
1310 	if (id < 0 || id >= max_id) {
1311 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1312 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1313 			max_id - 1);
1314 		return -EINVAL;
1315 	}
1316 
1317 	switch (ctype) {
1318 	case NPA_AQ_CTYPE_AURA:
1319 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1320 		rvu->rvu_dbg.npa_aura_ctx.id = id;
1321 		rvu->rvu_dbg.npa_aura_ctx.all = all;
1322 		break;
1323 
1324 	case NPA_AQ_CTYPE_POOL:
1325 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1326 		rvu->rvu_dbg.npa_pool_ctx.id = id;
1327 		rvu->rvu_dbg.npa_pool_ctx.all = all;
1328 		break;
1329 	default:
1330 		return -EINVAL;
1331 	}
1332 	return 0;
1333 }
1334 
1335 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1336 				const char __user *buffer, int *npalf,
1337 				int *id, bool *all)
1338 {
1339 	int bytes_not_copied;
1340 	char *cmd_buf_tmp;
1341 	char *subtoken;
1342 	int ret;
1343 
1344 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1345 	if (bytes_not_copied)
1346 		return -EFAULT;
1347 
1348 	cmd_buf[*count] = '\0';
1349 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1350 
1351 	if (cmd_buf_tmp) {
1352 		*cmd_buf_tmp = '\0';
1353 		*count = cmd_buf_tmp - cmd_buf + 1;
1354 	}
1355 
1356 	subtoken = strsep(&cmd_buf, " ");
1357 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1358 	if (ret < 0)
1359 		return ret;
1360 	subtoken = strsep(&cmd_buf, " ");
1361 	if (subtoken && strcmp(subtoken, "all") == 0) {
1362 		*all = true;
1363 	} else {
1364 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1365 		if (ret < 0)
1366 			return ret;
1367 	}
1368 	if (cmd_buf)
1369 		return -EINVAL;
1370 	return ret;
1371 }
1372 
1373 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1374 				     const char __user *buffer,
1375 				     size_t count, loff_t *ppos, int ctype)
1376 {
1377 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1378 					"aura" : "pool";
1379 	struct seq_file *seqfp = filp->private_data;
1380 	struct rvu *rvu = seqfp->private;
1381 	int npalf, id = 0, ret;
1382 	bool all = false;
1383 
1384 	if ((*ppos != 0) || !count)
1385 		return -EINVAL;
1386 
1387 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1388 	if (!cmd_buf)
1389 		return count;
1390 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1391 				   &npalf, &id, &all);
1392 	if (ret < 0) {
1393 		dev_info(rvu->dev,
1394 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1395 			 ctype_string, ctype_string);
1396 		goto done;
1397 	} else {
1398 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1399 	}
1400 done:
1401 	kfree(cmd_buf);
1402 	return ret ? ret : count;
1403 }
1404 
1405 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1406 					  const char __user *buffer,
1407 					  size_t count, loff_t *ppos)
1408 {
1409 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1410 				     NPA_AQ_CTYPE_AURA);
1411 }
1412 
1413 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1414 {
1415 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1416 }
1417 
1418 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1419 
1420 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1421 					  const char __user *buffer,
1422 					  size_t count, loff_t *ppos)
1423 {
1424 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1425 				     NPA_AQ_CTYPE_POOL);
1426 }
1427 
1428 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1429 {
1430 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1431 }
1432 
1433 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1434 
1435 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1436 			    int ctype, int transaction)
1437 {
1438 	u64 req, out_req, lat, cant_alloc;
1439 	struct nix_hw *nix_hw;
1440 	struct rvu *rvu;
1441 	int port;
1442 
1443 	if (blk_addr == BLKADDR_NDC_NPA0) {
1444 		rvu = s->private;
1445 	} else {
1446 		nix_hw = s->private;
1447 		rvu = nix_hw->rvu;
1448 	}
1449 
1450 	for (port = 0; port < NDC_MAX_PORT; port++) {
1451 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1452 						(port, ctype, transaction));
1453 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1454 						(port, ctype, transaction));
1455 		out_req = rvu_read64(rvu, blk_addr,
1456 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1457 				     (port, ctype, transaction));
1458 		cant_alloc = rvu_read64(rvu, blk_addr,
1459 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1460 					(port, transaction));
1461 		seq_printf(s, "\nPort:%d\n", port);
1462 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1463 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1464 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1465 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1466 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1467 	}
1468 }
1469 
1470 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1471 {
1472 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1473 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1474 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1475 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1476 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1477 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1478 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1479 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1480 	return 0;
1481 }
1482 
1483 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1484 {
1485 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1486 }
1487 
1488 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1489 
1490 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1491 {
1492 	struct nix_hw *nix_hw;
1493 	struct rvu *rvu;
1494 	int bank, max_bank;
1495 	u64 ndc_af_const;
1496 
1497 	if (blk_addr == BLKADDR_NDC_NPA0) {
1498 		rvu = s->private;
1499 	} else {
1500 		nix_hw = s->private;
1501 		rvu = nix_hw->rvu;
1502 	}
1503 
1504 	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1505 	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1506 	for (bank = 0; bank < max_bank; bank++) {
1507 		seq_printf(s, "BANK:%d\n", bank);
1508 		seq_printf(s, "\tHits:\t%lld\n",
1509 			   (u64)rvu_read64(rvu, blk_addr,
1510 			   NDC_AF_BANKX_HIT_PC(bank)));
1511 		seq_printf(s, "\tMiss:\t%lld\n",
1512 			   (u64)rvu_read64(rvu, blk_addr,
1513 			    NDC_AF_BANKX_MISS_PC(bank)));
1514 	}
1515 	return 0;
1516 }
1517 
1518 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1519 {
1520 	struct nix_hw *nix_hw = filp->private;
1521 	int blkaddr = 0;
1522 	int ndc_idx = 0;
1523 
1524 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1525 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1526 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1527 
1528 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1529 }
1530 
1531 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1532 
1533 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1534 {
1535 	struct nix_hw *nix_hw = filp->private;
1536 	int blkaddr = 0;
1537 	int ndc_idx = 0;
1538 
1539 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1540 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1541 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1542 
1543 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1544 }
1545 
1546 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1547 
1548 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1549 					     void *unused)
1550 {
1551 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1552 }
1553 
1554 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1555 
1556 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1557 						void *unused)
1558 {
1559 	struct nix_hw *nix_hw = filp->private;
1560 	int ndc_idx = NPA0_U;
1561 	int blkaddr = 0;
1562 
1563 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1564 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1565 
1566 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1567 }
1568 
1569 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1570 
1571 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1572 						void *unused)
1573 {
1574 	struct nix_hw *nix_hw = filp->private;
1575 	int ndc_idx = NPA0_U;
1576 	int blkaddr = 0;
1577 
1578 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1579 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1580 
1581 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1582 }
1583 
1584 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1585 
1586 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1587 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1588 {
1589 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1590 		   sq_ctx->ena, sq_ctx->qint_idx);
1591 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1592 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1593 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1594 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1595 
1596 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1597 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1598 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1599 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1600 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1601 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1602 
1603 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1604 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1605 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1606 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1607 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1608 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1609 
1610 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1611 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1612 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1613 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1614 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1615 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1616 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1617 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1618 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1619 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1620 
1621 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1622 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1623 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1624 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1625 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1626 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1627 		   sq_ctx->smenq_next_sqb);
1628 
1629 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1630 
1631 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1632 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1633 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1634 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1635 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1636 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1637 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1638 
1639 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1640 		   (u64)sq_ctx->scm_lso_rem);
1641 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1642 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1643 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1644 		   (u64)sq_ctx->dropped_octs);
1645 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1646 		   (u64)sq_ctx->dropped_pkts);
1647 }
1648 
1649 static void print_tm_tree(struct seq_file *m,
1650 			  struct nix_aq_enq_rsp *rsp, u64 sq)
1651 {
1652 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1653 	struct nix_hw *nix_hw = m->private;
1654 	struct rvu *rvu = nix_hw->rvu;
1655 	u16 p1, p2, p3, p4, schq;
1656 	int blkaddr;
1657 	u64 cfg;
1658 
1659 	if (!sq_ctx->ena)
1660 		return;
1661 
1662 	blkaddr = nix_hw->blkaddr;
1663 	schq = sq_ctx->smq;
1664 
1665 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
1666 	p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
1667 
1668 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
1669 	p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
1670 
1671 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
1672 	p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
1673 
1674 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
1675 	p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
1676 	seq_printf(m,
1677 		   "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
1678 		   sq, schq, p1, p2, p3, p4);
1679 }
1680 
1681 /*dumps given tm_tree registers*/
1682 static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
1683 {
1684 	int qidx, nixlf, rc, id, max_id = 0;
1685 	struct nix_hw *nix_hw = m->private;
1686 	struct rvu *rvu = nix_hw->rvu;
1687 	struct nix_aq_enq_req aq_req;
1688 	struct nix_aq_enq_rsp rsp;
1689 	struct rvu_pfvf *pfvf;
1690 	u16 pcifunc;
1691 
1692 	nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1693 	id = rvu->rvu_dbg.nix_tm_ctx.id;
1694 
1695 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1696 		return -EINVAL;
1697 
1698 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1699 	max_id = pfvf->sq_ctx->qsize;
1700 
1701 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1702 	aq_req.hdr.pcifunc = pcifunc;
1703 	aq_req.ctype = NIX_AQ_CTYPE_SQ;
1704 	aq_req.op = NIX_AQ_INSTOP_READ;
1705 	seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1706 	for (qidx = id; qidx < max_id; qidx++) {
1707 		aq_req.qidx = qidx;
1708 
1709 		/* Skip SQ's if not initialized */
1710 		if (!test_bit(qidx, pfvf->sq_bmap))
1711 			continue;
1712 
1713 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1714 
1715 		if (rc) {
1716 			seq_printf(m, "Failed to read SQ(%d) context\n",
1717 				   aq_req.qidx);
1718 			continue;
1719 		}
1720 		print_tm_tree(m, &rsp, aq_req.qidx);
1721 	}
1722 	return 0;
1723 }
1724 
1725 static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
1726 					 const char __user *buffer,
1727 					 size_t count, loff_t *ppos)
1728 {
1729 	struct seq_file *m = filp->private_data;
1730 	struct nix_hw *nix_hw = m->private;
1731 	struct rvu *rvu = nix_hw->rvu;
1732 	struct rvu_pfvf *pfvf;
1733 	u16 pcifunc;
1734 	u64 nixlf;
1735 	int ret;
1736 
1737 	ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1738 	if (ret)
1739 		return ret;
1740 
1741 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1742 		return -EINVAL;
1743 
1744 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1745 	if (!pfvf->sq_ctx) {
1746 		dev_warn(rvu->dev, "SQ context is not initialized\n");
1747 		return -EINVAL;
1748 	}
1749 
1750 	rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1751 	return count;
1752 }
1753 
1754 RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
1755 
1756 static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
1757 {
1758 	struct nix_hw *nix_hw = m->private;
1759 	struct rvu *rvu = nix_hw->rvu;
1760 	int blkaddr, link, link_level;
1761 	struct rvu_hwinfo *hw;
1762 
1763 	hw = rvu->hw;
1764 	blkaddr = nix_hw->blkaddr;
1765 	if (lvl == NIX_TXSCH_LVL_MDQ) {
1766 		seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
1767 			   rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
1768 		seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
1769 			   rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
1770 		seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
1771 			   rvu_read64(rvu, blkaddr,
1772 				      NIX_AF_MDQX_OUT_MD_COUNT(schq)));
1773 		seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
1774 			   rvu_read64(rvu, blkaddr,
1775 				      NIX_AF_MDQX_SCHEDULE(schq)));
1776 		seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
1777 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
1778 		seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
1779 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
1780 		seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
1781 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
1782 		seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
1783 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
1784 		seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
1785 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
1786 		seq_puts(m, "\n");
1787 	}
1788 
1789 	if (lvl == NIX_TXSCH_LVL_TL4) {
1790 		seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
1791 			   rvu_read64(rvu, blkaddr,
1792 				      NIX_AF_TL4X_SDP_LINK_CFG(schq)));
1793 		seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
1794 			   rvu_read64(rvu, blkaddr,
1795 				      NIX_AF_TL4X_SCHEDULE(schq)));
1796 		seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
1797 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
1798 		seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
1799 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
1800 		seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
1801 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
1802 		seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
1803 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
1804 		seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
1805 			   rvu_read64(rvu, blkaddr,
1806 				      NIX_AF_TL4X_TOPOLOGY(schq)));
1807 		seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
1808 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
1809 		seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1810 			   rvu_read64(rvu, blkaddr,
1811 				      NIX_AF_TL4X_MD_DEBUG0(schq)));
1812 		seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1813 			   rvu_read64(rvu, blkaddr,
1814 				      NIX_AF_TL4X_MD_DEBUG1(schq)));
1815 		seq_puts(m, "\n");
1816 	}
1817 
1818 	if (lvl == NIX_TXSCH_LVL_TL3) {
1819 		seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
1820 			   rvu_read64(rvu, blkaddr,
1821 				      NIX_AF_TL3X_SCHEDULE(schq)));
1822 		seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
1823 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
1824 		seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
1825 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
1826 		seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
1827 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
1828 		seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
1829 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
1830 		seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
1831 			   rvu_read64(rvu, blkaddr,
1832 				      NIX_AF_TL3X_TOPOLOGY(schq)));
1833 		seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
1834 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
1835 		seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1836 			   rvu_read64(rvu, blkaddr,
1837 				      NIX_AF_TL3X_MD_DEBUG0(schq)));
1838 		seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1839 			   rvu_read64(rvu, blkaddr,
1840 				      NIX_AF_TL3X_MD_DEBUG1(schq)));
1841 
1842 		link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1843 				& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1844 		if (lvl == link_level) {
1845 			seq_printf(m,
1846 				   "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1847 				   schq, rvu_read64(rvu, blkaddr,
1848 				   NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1849 			for (link = 0; link < hw->cgx_links; link++)
1850 				seq_printf(m,
1851 					   "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1852 					   schq, link,
1853 					   rvu_read64(rvu, blkaddr,
1854 						      NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1855 		}
1856 		seq_puts(m, "\n");
1857 	}
1858 
1859 	if (lvl == NIX_TXSCH_LVL_TL2) {
1860 		seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
1861 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
1862 		seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
1863 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
1864 		seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
1865 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
1866 		seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
1867 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
1868 		seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
1869 			   rvu_read64(rvu, blkaddr,
1870 				      NIX_AF_TL2X_TOPOLOGY(schq)));
1871 		seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
1872 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
1873 		seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1874 			   rvu_read64(rvu, blkaddr,
1875 				      NIX_AF_TL2X_MD_DEBUG0(schq)));
1876 		seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1877 			   rvu_read64(rvu, blkaddr,
1878 				      NIX_AF_TL2X_MD_DEBUG1(schq)));
1879 
1880 		link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1881 				& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1882 		if (lvl == link_level) {
1883 			seq_printf(m,
1884 				   "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1885 				   schq, rvu_read64(rvu, blkaddr,
1886 				   NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1887 			for (link = 0; link < hw->cgx_links; link++)
1888 				seq_printf(m,
1889 					   "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1890 					   schq, link, rvu_read64(rvu, blkaddr,
1891 					   NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1892 		}
1893 		seq_puts(m, "\n");
1894 	}
1895 
1896 	if (lvl == NIX_TXSCH_LVL_TL1) {
1897 		seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
1898 			   schq,
1899 			   rvu_read64(rvu, blkaddr,
1900 				      NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
1901 		seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
1902 			   rvu_read64(rvu, blkaddr,
1903 				      NIX_AF_TX_LINKX_HW_XOFF(schq)));
1904 		seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
1905 			   rvu_read64(rvu, blkaddr,
1906 				      NIX_AF_TL1X_SCHEDULE(schq)));
1907 		seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
1908 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
1909 		seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
1910 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
1911 		seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
1912 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
1913 		seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
1914 			   rvu_read64(rvu, blkaddr,
1915 				      NIX_AF_TL1X_TOPOLOGY(schq)));
1916 		seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1917 			   rvu_read64(rvu, blkaddr,
1918 				      NIX_AF_TL1X_MD_DEBUG0(schq)));
1919 		seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1920 			   rvu_read64(rvu, blkaddr,
1921 				      NIX_AF_TL1X_MD_DEBUG1(schq)));
1922 		seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
1923 			   schq,
1924 			   rvu_read64(rvu, blkaddr,
1925 				      NIX_AF_TL1X_DROPPED_PACKETS(schq)));
1926 		seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
1927 			   rvu_read64(rvu, blkaddr,
1928 				      NIX_AF_TL1X_DROPPED_BYTES(schq)));
1929 		seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
1930 			   rvu_read64(rvu, blkaddr,
1931 				      NIX_AF_TL1X_RED_PACKETS(schq)));
1932 		seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
1933 			   rvu_read64(rvu, blkaddr,
1934 				      NIX_AF_TL1X_RED_BYTES(schq)));
1935 		seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
1936 			   rvu_read64(rvu, blkaddr,
1937 				      NIX_AF_TL1X_YELLOW_PACKETS(schq)));
1938 		seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
1939 			   rvu_read64(rvu, blkaddr,
1940 				      NIX_AF_TL1X_YELLOW_BYTES(schq)));
1941 		seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
1942 			   rvu_read64(rvu, blkaddr,
1943 				      NIX_AF_TL1X_GREEN_PACKETS(schq)));
1944 		seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
1945 			   rvu_read64(rvu, blkaddr,
1946 				      NIX_AF_TL1X_GREEN_BYTES(schq)));
1947 		seq_puts(m, "\n");
1948 	}
1949 }
1950 
1951 /*dumps given tm_topo registers*/
1952 static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
1953 {
1954 	struct nix_hw *nix_hw = m->private;
1955 	struct rvu *rvu = nix_hw->rvu;
1956 	struct nix_aq_enq_req aq_req;
1957 	struct nix_txsch *txsch;
1958 	int nixlf, lvl, schq;
1959 	u16 pcifunc;
1960 
1961 	nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1962 
1963 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1964 		return -EINVAL;
1965 
1966 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1967 	aq_req.hdr.pcifunc = pcifunc;
1968 	aq_req.ctype = NIX_AQ_CTYPE_SQ;
1969 	aq_req.op = NIX_AQ_INSTOP_READ;
1970 	seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1971 
1972 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1973 		txsch = &nix_hw->txsch[lvl];
1974 		for (schq = 0; schq < txsch->schq.max; schq++) {
1975 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
1976 				print_tm_topo(m, schq, lvl);
1977 		}
1978 	}
1979 	return 0;
1980 }
1981 
1982 static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
1983 					 const char __user *buffer,
1984 					 size_t count, loff_t *ppos)
1985 {
1986 	struct seq_file *m = filp->private_data;
1987 	struct nix_hw *nix_hw = m->private;
1988 	struct rvu *rvu = nix_hw->rvu;
1989 	struct rvu_pfvf *pfvf;
1990 	u16 pcifunc;
1991 	u64 nixlf;
1992 	int ret;
1993 
1994 	ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1995 	if (ret)
1996 		return ret;
1997 
1998 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1999 		return -EINVAL;
2000 
2001 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2002 	if (!pfvf->sq_ctx) {
2003 		dev_warn(rvu->dev, "SQ context is not initialized\n");
2004 		return -EINVAL;
2005 	}
2006 
2007 	rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
2008 	return count;
2009 }
2010 
2011 RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
2012 
2013 /* Dumps given nix_sq's context */
2014 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2015 {
2016 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
2017 	struct nix_hw *nix_hw = m->private;
2018 	struct rvu *rvu = nix_hw->rvu;
2019 
2020 	if (is_cn20k(rvu->pdev)) {
2021 		print_nix_cn20k_sq_ctx(m, (struct nix_cn20k_sq_ctx_s *)sq_ctx);
2022 		return;
2023 	}
2024 
2025 	if (!is_rvu_otx2(rvu)) {
2026 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
2027 		return;
2028 	}
2029 
2030 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
2031 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
2032 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2033 		   sq_ctx->sdp_mcast, sq_ctx->substream);
2034 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
2035 		   sq_ctx->qint_idx, sq_ctx->ena);
2036 
2037 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
2038 		   sq_ctx->sqb_count, sq_ctx->default_chan);
2039 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
2040 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
2041 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
2042 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
2043 
2044 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
2045 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
2046 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
2047 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
2048 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
2049 
2050 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
2051 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
2052 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
2053 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
2054 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
2055 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
2056 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
2057 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
2058 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
2059 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
2060 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
2061 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
2062 
2063 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
2064 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
2065 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
2066 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
2067 		   sq_ctx->smenq_next_sqb);
2068 
2069 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
2070 
2071 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
2072 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
2073 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
2074 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
2075 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
2076 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
2077 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
2078 
2079 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
2080 		   (u64)sq_ctx->scm_lso_rem);
2081 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
2082 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
2083 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
2084 		   (u64)sq_ctx->dropped_octs);
2085 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
2086 		   (u64)sq_ctx->dropped_pkts);
2087 }
2088 
2089 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
2090 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
2091 {
2092 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2093 		   rq_ctx->ena, rq_ctx->sso_ena);
2094 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2095 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
2096 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
2097 		   rq_ctx->cq, rq_ctx->lenerr_dis);
2098 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
2099 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
2100 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
2101 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
2102 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
2103 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
2104 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
2105 
2106 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2107 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
2108 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
2109 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2110 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
2111 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
2112 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
2113 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2114 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
2115 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
2116 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
2117 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
2118 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
2119 
2120 	seq_printf(m, "W2: band_prof_id \t\t%d\n",
2121 		   (u16)rq_ctx->band_prof_id_h << 10 | rq_ctx->band_prof_id);
2122 
2123 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
2124 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
2125 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
2126 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
2127 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
2128 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
2129 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
2130 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
2131 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
2132 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
2133 
2134 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
2135 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
2136 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
2137 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
2138 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
2139 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
2140 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
2141 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2142 
2143 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
2144 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
2145 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
2146 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
2147 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
2148 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
2149 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
2150 
2151 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
2152 		   rq_ctx->ltag, rq_ctx->good_utag);
2153 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
2154 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
2155 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
2156 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
2157 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
2158 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
2159 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
2160 
2161 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2162 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2163 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2164 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2165 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2166 }
2167 
2168 /* Dumps given nix_rq's context */
2169 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2170 {
2171 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
2172 	struct nix_hw *nix_hw = m->private;
2173 	struct rvu *rvu = nix_hw->rvu;
2174 
2175 	if (!is_rvu_otx2(rvu)) {
2176 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
2177 		return;
2178 	}
2179 
2180 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2181 		   rq_ctx->wqe_aura, rq_ctx->substream);
2182 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2183 		   rq_ctx->cq, rq_ctx->ena_wqwd);
2184 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2185 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
2186 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
2187 
2188 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2189 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
2190 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
2191 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
2192 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2193 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
2194 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2195 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
2196 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
2197 
2198 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
2199 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
2200 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
2201 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
2202 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
2203 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
2204 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
2205 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
2206 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
2207 
2208 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
2209 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
2210 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
2211 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2212 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
2213 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
2214 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
2215 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
2216 
2217 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
2218 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
2219 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
2220 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
2221 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
2222 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
2223 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
2224 
2225 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
2226 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
2227 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
2228 		   rq_ctx->good_utag, rq_ctx->ltag);
2229 
2230 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2231 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2232 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2233 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2234 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2235 }
2236 
2237 /* Dumps given nix_cq's context */
2238 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2239 {
2240 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
2241 	struct nix_hw *nix_hw = m->private;
2242 	struct rvu *rvu = nix_hw->rvu;
2243 
2244 	if (is_cn20k(rvu->pdev)) {
2245 		print_nix_cn20k_cq_ctx(m, (struct nix_cn20k_aq_enq_rsp *)rsp);
2246 		return;
2247 	}
2248 
2249 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
2250 
2251 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
2252 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
2253 		   cq_ctx->avg_con, cq_ctx->cint_idx);
2254 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
2255 		   cq_ctx->cq_err, cq_ctx->qint_idx);
2256 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
2257 		   cq_ctx->bpid, cq_ctx->bp_ena);
2258 
2259 	if (!is_rvu_otx2(rvu)) {
2260 		seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
2261 		seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
2262 		seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
2263 		seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
2264 			   cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
2265 			   cq_ctx->lbpid_low);
2266 		seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
2267 	}
2268 
2269 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
2270 		   cq_ctx->update_time, cq_ctx->avg_level);
2271 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
2272 		   cq_ctx->head, cq_ctx->tail);
2273 
2274 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
2275 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
2276 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
2277 		   cq_ctx->qsize, cq_ctx->caching);
2278 
2279 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
2280 		   cq_ctx->substream, cq_ctx->ena);
2281 	if (!is_rvu_otx2(rvu)) {
2282 		seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
2283 		seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
2284 			   cq_ctx->cpt_drop_err_en);
2285 	}
2286 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
2287 		   cq_ctx->drop_ena, cq_ctx->drop);
2288 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
2289 }
2290 
2291 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
2292 					 void *unused, int ctype)
2293 {
2294 	void (*print_nix_ctx)(struct seq_file *filp,
2295 			      struct nix_aq_enq_rsp *rsp) = NULL;
2296 	struct nix_hw *nix_hw = filp->private;
2297 	struct rvu *rvu = nix_hw->rvu;
2298 	struct nix_aq_enq_req aq_req;
2299 	struct nix_aq_enq_rsp rsp;
2300 	char *ctype_string = NULL;
2301 	int qidx, rc, max_id = 0;
2302 	struct rvu_pfvf *pfvf;
2303 	int nixlf, id, all;
2304 	u16 pcifunc;
2305 
2306 	switch (ctype) {
2307 	case NIX_AQ_CTYPE_CQ:
2308 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
2309 		id = rvu->rvu_dbg.nix_cq_ctx.id;
2310 		all = rvu->rvu_dbg.nix_cq_ctx.all;
2311 		break;
2312 
2313 	case NIX_AQ_CTYPE_SQ:
2314 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
2315 		id = rvu->rvu_dbg.nix_sq_ctx.id;
2316 		all = rvu->rvu_dbg.nix_sq_ctx.all;
2317 		break;
2318 
2319 	case NIX_AQ_CTYPE_RQ:
2320 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
2321 		id = rvu->rvu_dbg.nix_rq_ctx.id;
2322 		all = rvu->rvu_dbg.nix_rq_ctx.all;
2323 		break;
2324 
2325 	default:
2326 		return -EINVAL;
2327 	}
2328 
2329 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2330 		return -EINVAL;
2331 
2332 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2333 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
2334 		seq_puts(filp, "SQ context is not initialized\n");
2335 		return -EINVAL;
2336 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
2337 		seq_puts(filp, "RQ context is not initialized\n");
2338 		return -EINVAL;
2339 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
2340 		seq_puts(filp, "CQ context is not initialized\n");
2341 		return -EINVAL;
2342 	}
2343 
2344 	if (ctype == NIX_AQ_CTYPE_SQ) {
2345 		max_id = pfvf->sq_ctx->qsize;
2346 		ctype_string = "sq";
2347 		print_nix_ctx = print_nix_sq_ctx;
2348 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
2349 		max_id = pfvf->rq_ctx->qsize;
2350 		ctype_string = "rq";
2351 		print_nix_ctx = print_nix_rq_ctx;
2352 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
2353 		max_id = pfvf->cq_ctx->qsize;
2354 		ctype_string = "cq";
2355 		print_nix_ctx = print_nix_cq_ctx;
2356 	}
2357 
2358 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
2359 	aq_req.hdr.pcifunc = pcifunc;
2360 	aq_req.ctype = ctype;
2361 	aq_req.op = NIX_AQ_INSTOP_READ;
2362 	if (all)
2363 		id = 0;
2364 	else
2365 		max_id = id + 1;
2366 	for (qidx = id; qidx < max_id; qidx++) {
2367 		aq_req.qidx = qidx;
2368 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
2369 			   ctype_string, nixlf, aq_req.qidx);
2370 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
2371 		if (rc) {
2372 			seq_puts(filp, "Failed to read the context\n");
2373 			return -EINVAL;
2374 		}
2375 		print_nix_ctx(filp, &rsp);
2376 	}
2377 	return 0;
2378 }
2379 
2380 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
2381 			       int id, int ctype, char *ctype_string,
2382 			       struct seq_file *m)
2383 {
2384 	struct nix_hw *nix_hw = m->private;
2385 	struct rvu_pfvf *pfvf;
2386 	int max_id = 0;
2387 	u16 pcifunc;
2388 
2389 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2390 		return -EINVAL;
2391 
2392 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2393 
2394 	if (ctype == NIX_AQ_CTYPE_SQ) {
2395 		if (!pfvf->sq_ctx) {
2396 			dev_warn(rvu->dev, "SQ context is not initialized\n");
2397 			return -EINVAL;
2398 		}
2399 		max_id = pfvf->sq_ctx->qsize;
2400 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
2401 		if (!pfvf->rq_ctx) {
2402 			dev_warn(rvu->dev, "RQ context is not initialized\n");
2403 			return -EINVAL;
2404 		}
2405 		max_id = pfvf->rq_ctx->qsize;
2406 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
2407 		if (!pfvf->cq_ctx) {
2408 			dev_warn(rvu->dev, "CQ context is not initialized\n");
2409 			return -EINVAL;
2410 		}
2411 		max_id = pfvf->cq_ctx->qsize;
2412 	}
2413 
2414 	if (id < 0 || id >= max_id) {
2415 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
2416 			 ctype_string, max_id - 1);
2417 		return -EINVAL;
2418 	}
2419 	switch (ctype) {
2420 	case NIX_AQ_CTYPE_CQ:
2421 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2422 		rvu->rvu_dbg.nix_cq_ctx.id = id;
2423 		rvu->rvu_dbg.nix_cq_ctx.all = all;
2424 		break;
2425 
2426 	case NIX_AQ_CTYPE_SQ:
2427 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2428 		rvu->rvu_dbg.nix_sq_ctx.id = id;
2429 		rvu->rvu_dbg.nix_sq_ctx.all = all;
2430 		break;
2431 
2432 	case NIX_AQ_CTYPE_RQ:
2433 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2434 		rvu->rvu_dbg.nix_rq_ctx.id = id;
2435 		rvu->rvu_dbg.nix_rq_ctx.all = all;
2436 		break;
2437 	default:
2438 		return -EINVAL;
2439 	}
2440 	return 0;
2441 }
2442 
2443 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2444 					   const char __user *buffer,
2445 					   size_t count, loff_t *ppos,
2446 					   int ctype)
2447 {
2448 	struct seq_file *m = filp->private_data;
2449 	struct nix_hw *nix_hw = m->private;
2450 	struct rvu *rvu = nix_hw->rvu;
2451 	char *cmd_buf, *ctype_string;
2452 	int nixlf, id = 0, ret;
2453 	bool all = false;
2454 
2455 	if ((*ppos != 0) || !count)
2456 		return -EINVAL;
2457 
2458 	switch (ctype) {
2459 	case NIX_AQ_CTYPE_SQ:
2460 		ctype_string = "sq";
2461 		break;
2462 	case NIX_AQ_CTYPE_RQ:
2463 		ctype_string = "rq";
2464 		break;
2465 	case NIX_AQ_CTYPE_CQ:
2466 		ctype_string = "cq";
2467 		break;
2468 	default:
2469 		return -EINVAL;
2470 	}
2471 
2472 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2473 
2474 	if (!cmd_buf)
2475 		return count;
2476 
2477 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2478 				   &nixlf, &id, &all);
2479 	if (ret < 0) {
2480 		dev_info(rvu->dev,
2481 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2482 			 ctype_string, ctype_string);
2483 		goto done;
2484 	} else {
2485 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2486 					  ctype_string, m);
2487 	}
2488 done:
2489 	kfree(cmd_buf);
2490 	return ret ? ret : count;
2491 }
2492 
2493 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2494 					const char __user *buffer,
2495 					size_t count, loff_t *ppos)
2496 {
2497 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2498 					    NIX_AQ_CTYPE_SQ);
2499 }
2500 
2501 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2502 {
2503 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2504 }
2505 
2506 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2507 
2508 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2509 					const char __user *buffer,
2510 					size_t count, loff_t *ppos)
2511 {
2512 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2513 					    NIX_AQ_CTYPE_RQ);
2514 }
2515 
2516 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
2517 {
2518 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
2519 }
2520 
2521 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2522 
2523 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2524 					const char __user *buffer,
2525 					size_t count, loff_t *ppos)
2526 {
2527 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2528 					    NIX_AQ_CTYPE_CQ);
2529 }
2530 
2531 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2532 {
2533 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2534 }
2535 
2536 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2537 
2538 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2539 				 unsigned long *bmap, char *qtype)
2540 {
2541 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2542 	seq_printf(filp, "%s context ena/dis bitmap : %*pb\n", qtype, qsize, bmap);
2543 }
2544 
2545 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2546 {
2547 	if (!pfvf->cq_ctx)
2548 		seq_puts(filp, "cq context is not initialized\n");
2549 	else
2550 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2551 				     "cq");
2552 
2553 	if (!pfvf->rq_ctx)
2554 		seq_puts(filp, "rq context is not initialized\n");
2555 	else
2556 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2557 				     "rq");
2558 
2559 	if (!pfvf->sq_ctx)
2560 		seq_puts(filp, "sq context is not initialized\n");
2561 	else
2562 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2563 				     "sq");
2564 }
2565 
2566 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2567 				       const char __user *buffer,
2568 				       size_t count, loff_t *ppos)
2569 {
2570 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2571 				   BLKTYPE_NIX);
2572 }
2573 
2574 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2575 {
2576 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2577 }
2578 
2579 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2580 
2581 static void print_band_prof_ctx(struct seq_file *m,
2582 				struct nix_bandprof_s *prof)
2583 {
2584 	char *str;
2585 
2586 	switch (prof->pc_mode) {
2587 	case NIX_RX_PC_MODE_VLAN:
2588 		str = "VLAN";
2589 		break;
2590 	case NIX_RX_PC_MODE_DSCP:
2591 		str = "DSCP";
2592 		break;
2593 	case NIX_RX_PC_MODE_GEN:
2594 		str = "Generic";
2595 		break;
2596 	case NIX_RX_PC_MODE_RSVD:
2597 		str = "Reserved";
2598 		break;
2599 	}
2600 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2601 	str = (prof->icolor == 3) ? "Color blind" :
2602 		(prof->icolor == 0) ? "Green" :
2603 		(prof->icolor == 1) ? "Yellow" : "Red";
2604 	seq_printf(m, "W0: icolor\t\t%s\n", str);
2605 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2606 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2607 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2608 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2609 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2610 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2611 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2612 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2613 
2614 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2615 	str = (prof->lmode == 0) ? "byte" : "packet";
2616 	seq_printf(m, "W1: lmode\t\t%s\n", str);
2617 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2618 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2619 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2620 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2621 	str = (prof->gc_action == 0) ? "PASS" :
2622 		(prof->gc_action == 1) ? "DROP" : "RED";
2623 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
2624 	str = (prof->yc_action == 0) ? "PASS" :
2625 		(prof->yc_action == 1) ? "DROP" : "RED";
2626 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
2627 	str = (prof->rc_action == 0) ? "PASS" :
2628 		(prof->rc_action == 1) ? "DROP" : "RED";
2629 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
2630 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2631 
2632 	seq_printf(m, "W1: band_prof_id\t%d\n",
2633 		   (u16)prof->band_prof_id_h << 7 | prof->band_prof_id);
2634 
2635 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2636 
2637 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2638 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2639 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2640 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2641 		   (u64)prof->green_pkt_pass);
2642 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2643 		   (u64)prof->yellow_pkt_pass);
2644 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2645 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
2646 		   (u64)prof->green_octs_pass);
2647 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2648 		   (u64)prof->yellow_octs_pass);
2649 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2650 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2651 		   (u64)prof->green_pkt_drop);
2652 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2653 		   (u64)prof->yellow_pkt_drop);
2654 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2655 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
2656 		   (u64)prof->green_octs_drop);
2657 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2658 		   (u64)prof->yellow_octs_drop);
2659 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2660 	seq_puts(m, "==============================\n");
2661 }
2662 
2663 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2664 {
2665 	struct nix_hw *nix_hw = m->private;
2666 	struct nix_cn10k_aq_enq_req aq_req;
2667 	struct nix_cn10k_aq_enq_rsp aq_rsp;
2668 	struct rvu *rvu = nix_hw->rvu;
2669 	struct nix_ipolicer *ipolicer;
2670 	int layer, prof_idx, idx, rc;
2671 	u16 pcifunc;
2672 	char *str;
2673 
2674 	/* Ingress policers do not exist on all platforms */
2675 	if (!nix_hw->ipolicer)
2676 		return 0;
2677 
2678 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2679 		if (layer == BAND_PROF_INVAL_LAYER)
2680 			continue;
2681 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2682 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2683 
2684 		seq_printf(m, "\n%s bandwidth profiles\n", str);
2685 		seq_puts(m, "=======================\n");
2686 
2687 		ipolicer = &nix_hw->ipolicer[layer];
2688 
2689 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2690 			if (is_rsrc_free(&ipolicer->band_prof, idx))
2691 				continue;
2692 
2693 			prof_idx = (idx & 0x3FFF) | (layer << 14);
2694 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2695 						 0x00, NIX_AQ_CTYPE_BANDPROF,
2696 						 prof_idx);
2697 			if (rc) {
2698 				dev_err(rvu->dev,
2699 					"%s: Failed to fetch context of %s profile %d, err %d\n",
2700 					__func__, str, idx, rc);
2701 				return 0;
2702 			}
2703 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2704 			pcifunc = ipolicer->pfvf_map[idx];
2705 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2706 				seq_printf(m, "Allocated to :: PF %d\n",
2707 					   rvu_get_pf(rvu->pdev, pcifunc));
2708 			else
2709 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
2710 					   rvu_get_pf(rvu->pdev, pcifunc),
2711 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2712 			print_band_prof_ctx(m, &aq_rsp.prof);
2713 		}
2714 	}
2715 	return 0;
2716 }
2717 
2718 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2719 
2720 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2721 {
2722 	struct nix_hw *nix_hw = m->private;
2723 	struct nix_ipolicer *ipolicer;
2724 	int layer;
2725 	char *str;
2726 
2727 	/* Ingress policers do not exist on all platforms */
2728 	if (!nix_hw->ipolicer)
2729 		return 0;
2730 
2731 	seq_puts(m, "\nBandwidth profile resource free count\n");
2732 	seq_puts(m, "=====================================\n");
2733 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2734 		if (layer == BAND_PROF_INVAL_LAYER)
2735 			continue;
2736 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2737 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2738 
2739 		ipolicer = &nix_hw->ipolicer[layer];
2740 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
2741 			   ipolicer->band_prof.max,
2742 			   rvu_rsrc_free_count(&ipolicer->band_prof));
2743 	}
2744 	seq_puts(m, "=====================================\n");
2745 
2746 	return 0;
2747 }
2748 
2749 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2750 
2751 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2752 {
2753 	struct nix_hw *nix_hw;
2754 
2755 	if (!is_block_implemented(rvu->hw, blkaddr))
2756 		return;
2757 
2758 	if (blkaddr == BLKADDR_NIX0) {
2759 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2760 		nix_hw = &rvu->hw->nix[0];
2761 	} else {
2762 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2763 						      rvu->rvu_dbg.root);
2764 		nix_hw = &rvu->hw->nix[1];
2765 	}
2766 
2767 	debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
2768 			    &rvu_dbg_nix_tm_tree_fops);
2769 	debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
2770 			    &rvu_dbg_nix_tm_topo_fops);
2771 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2772 			    &rvu_dbg_nix_sq_ctx_fops);
2773 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2774 			    &rvu_dbg_nix_rq_ctx_fops);
2775 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2776 			    &rvu_dbg_nix_cq_ctx_fops);
2777 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2778 			    &rvu_dbg_nix_ndc_tx_cache_fops);
2779 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2780 			    &rvu_dbg_nix_ndc_rx_cache_fops);
2781 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2782 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2783 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2784 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2785 	debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2786 			    blkaddr, &rvu_dbg_nix_qsize_fops);
2787 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2788 			    &rvu_dbg_nix_band_prof_ctx_fops);
2789 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2790 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2791 }
2792 
2793 static void rvu_dbg_npa_init(struct rvu *rvu)
2794 {
2795 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2796 
2797 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2798 			    &rvu_dbg_npa_qsize_fops);
2799 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2800 			    &rvu_dbg_npa_aura_ctx_fops);
2801 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2802 			    &rvu_dbg_npa_pool_ctx_fops);
2803 
2804 	if (is_cn20k(rvu->pdev)) /* NDC not appliable for cn20k */
2805 		return;
2806 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2807 			    &rvu_dbg_npa_ndc_cache_fops);
2808 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2809 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2810 }
2811 
2812 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2813 	({								\
2814 		u64 cnt;						\
2815 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2816 					     NIX_STATS_RX, &(cnt));	\
2817 		if (!err)						\
2818 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2819 		cnt;							\
2820 	})
2821 
2822 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2823 	({								\
2824 		u64 cnt;						\
2825 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2826 					  NIX_STATS_TX, &(cnt));	\
2827 		if (!err)						\
2828 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2829 		cnt;							\
2830 	})
2831 
2832 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2833 {
2834 	struct cgx_link_user_info linfo;
2835 	struct mac_ops *mac_ops;
2836 	void *cgxd = s->private;
2837 	u64 ucast, mcast, bcast;
2838 	int stat = 0, err = 0;
2839 	u64 tx_stat, rx_stat;
2840 	struct rvu *rvu;
2841 
2842 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2843 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2844 	if (!rvu)
2845 		return -ENODEV;
2846 
2847 	mac_ops = get_mac_ops(cgxd);
2848 	/* There can be no CGX devices at all */
2849 	if (!mac_ops)
2850 		return 0;
2851 
2852 	/* Link status */
2853 	seq_puts(s, "\n=======Link Status======\n\n");
2854 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2855 	if (err)
2856 		seq_puts(s, "Failed to read link status\n");
2857 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2858 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2859 
2860 	/* Rx stats */
2861 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2862 		   mac_ops->name);
2863 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2864 	if (err)
2865 		return err;
2866 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2867 	if (err)
2868 		return err;
2869 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2870 	if (err)
2871 		return err;
2872 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2873 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2874 	if (err)
2875 		return err;
2876 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2877 	if (err)
2878 		return err;
2879 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2880 	if (err)
2881 		return err;
2882 
2883 	/* Tx stats */
2884 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2885 		   mac_ops->name);
2886 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2887 	if (err)
2888 		return err;
2889 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2890 	if (err)
2891 		return err;
2892 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2893 	if (err)
2894 		return err;
2895 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2896 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2897 	if (err)
2898 		return err;
2899 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2900 	if (err)
2901 		return err;
2902 
2903 	/* Rx stats */
2904 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2905 	while (stat < mac_ops->rx_stats_cnt) {
2906 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2907 		if (err)
2908 			return err;
2909 		if (is_rvu_otx2(rvu))
2910 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2911 				   rx_stat);
2912 		else
2913 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2914 				   rx_stat);
2915 		stat++;
2916 	}
2917 
2918 	/* Tx stats */
2919 	stat = 0;
2920 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2921 	while (stat < mac_ops->tx_stats_cnt) {
2922 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2923 		if (err)
2924 			return err;
2925 
2926 		if (is_rvu_otx2(rvu))
2927 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2928 				   tx_stat);
2929 		else
2930 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2931 				   tx_stat);
2932 		stat++;
2933 	}
2934 
2935 	return err;
2936 }
2937 
2938 static int rvu_dbg_derive_lmacid(struct seq_file *s)
2939 {
2940 	return debugfs_get_aux_num(s->file);
2941 }
2942 
2943 static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
2944 {
2945 	return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
2946 }
2947 
2948 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2949 
2950 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2951 {
2952 	struct pci_dev *pdev = NULL;
2953 	void *cgxd = s->private;
2954 	char *bcast, *mcast;
2955 	u16 index, domain;
2956 	u8 dmac[ETH_ALEN];
2957 	struct rvu *rvu;
2958 	u64 cfg, mac;
2959 	int pf;
2960 
2961 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2962 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2963 	if (!rvu)
2964 		return -ENODEV;
2965 
2966 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2967 	domain = 2;
2968 
2969 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2970 	if (!pdev)
2971 		return 0;
2972 
2973 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2974 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2975 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2976 
2977 	seq_puts(s,
2978 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2979 	seq_printf(s, "%s  PF%d  %9s  %9s",
2980 		   dev_name(&pdev->dev), pf, bcast, mcast);
2981 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2982 		seq_printf(s, "%12s\n\n", "UNICAST");
2983 	else
2984 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2985 
2986 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2987 
2988 	for (index = 0 ; index < 32 ; index++) {
2989 		cfg = cgx_read_dmac_entry(cgxd, index);
2990 		/* Display enabled dmac entries associated with current lmac */
2991 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2992 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2993 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2994 			u64_to_ether_addr(mac, dmac);
2995 			seq_printf(s, "%7d     %pM\n", index, dmac);
2996 		}
2997 	}
2998 
2999 	pci_dev_put(pdev);
3000 	return 0;
3001 }
3002 
3003 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
3004 {
3005 	return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
3006 }
3007 
3008 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
3009 
3010 static int cgx_print_fwdata(struct seq_file *s, int lmac_id)
3011 {
3012 	struct cgx_lmac_fwdata_s *fwdata;
3013 	void *cgxd = s->private;
3014 	struct phy_s *phy;
3015 	struct rvu *rvu;
3016 	int cgx_id, i;
3017 
3018 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
3019 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
3020 	if (!rvu)
3021 		return -ENODEV;
3022 
3023 	if (!rvu->fwdata)
3024 		return -EAGAIN;
3025 
3026 	cgx_id = cgx_get_cgxid(cgxd);
3027 
3028 	if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
3029 		fwdata =  &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
3030 	else
3031 		fwdata =  &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id];
3032 
3033 	seq_puts(s, "\nFIRMWARE SHARED:\n");
3034 	seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n");
3035 	seq_puts(s, "\t\t==========================\n");
3036 	seq_printf(s, "\t\t Link modes \t\t :%llx\n",
3037 		   fwdata->supported_link_modes);
3038 	seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an);
3039 	seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec);
3040 	seq_puts(s, "\n");
3041 
3042 	seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n");
3043 	seq_puts(s, "\t\t==========================\n");
3044 	seq_printf(s, "\t\t Link modes \t\t :%llx\n",
3045 		   (u64)fwdata->advertised_link_modes);
3046 	seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an);
3047 	seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec);
3048 	seq_puts(s, "\n");
3049 
3050 	seq_puts(s, "\t\tLMAC CONFIG\t\t\n");
3051 	seq_puts(s, "\t\t============\n");
3052 	seq_printf(s, "\t\t rw_valid  \t\t :%x\n",  fwdata->rw_valid);
3053 	seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type);
3054 	seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx);
3055 	seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port);
3056 	seq_printf(s, "\t\t Link modes own \t :%llx\n",
3057 		   (u64)fwdata->advertised_link_modes_own);
3058 	seq_puts(s, "\n");
3059 
3060 	seq_puts(s, "\n\t\tEEPROM DATA\n");
3061 	seq_puts(s, "\t\t===========\n");
3062 	seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id);
3063 	seq_puts(s, "\t\t data \t\t\t :\n");
3064 	seq_puts(s, "\t\t");
3065 	for (i = 0; i < SFP_EEPROM_SIZE; i++) {
3066 		seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]);
3067 		if ((i + 1) % 16 == 0) {
3068 			seq_puts(s, "\n");
3069 			seq_puts(s, "\t\t");
3070 		}
3071 	}
3072 	seq_puts(s, "\n");
3073 
3074 	phy = &fwdata->phy;
3075 	seq_puts(s, "\n\t\tPHY INFORMATION\n");
3076 	seq_puts(s, "\t\t===============\n");
3077 	seq_printf(s, "\t\t Mod type configurable \t\t :%x\n",
3078 		   phy->misc.can_change_mod_type);
3079 	seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type);
3080 	seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats);
3081 	seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n",
3082 		   phy->fec_stats.rsfec_corr_cws);
3083 	seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n",
3084 		   phy->fec_stats.rsfec_uncorr_cws);
3085 	seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n",
3086 		   phy->fec_stats.brfec_corr_blks);
3087 	seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n",
3088 		   phy->fec_stats.brfec_uncorr_blks);
3089 	seq_puts(s, "\n");
3090 
3091 	return 0;
3092 }
3093 
3094 static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused)
3095 {
3096 	return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s));
3097 }
3098 
3099 RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL);
3100 
3101 static void rvu_dbg_cgx_init(struct rvu *rvu)
3102 {
3103 	struct mac_ops *mac_ops;
3104 	unsigned long lmac_bmap;
3105 	int i, lmac_id;
3106 	char dname[20];
3107 	void *cgx;
3108 
3109 	if (!cgx_get_cgxcnt_max())
3110 		return;
3111 
3112 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
3113 	if (!mac_ops)
3114 		return;
3115 
3116 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
3117 						   rvu->rvu_dbg.root);
3118 
3119 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
3120 		cgx = rvu_cgx_pdata(i, rvu);
3121 		if (!cgx)
3122 			continue;
3123 		lmac_bmap = cgx_get_lmac_bmap(cgx);
3124 		/* cgx debugfs dir */
3125 		sprintf(dname, "%s%d", mac_ops->name, i);
3126 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
3127 						      rvu->rvu_dbg.cgx_root);
3128 
3129 		for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
3130 			/* lmac debugfs dir */
3131 			sprintf(dname, "lmac%d", lmac_id);
3132 			rvu->rvu_dbg.lmac =
3133 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
3134 
3135 			debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
3136 					    cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
3137 			debugfs_create_file_aux_num("mac_filter", 0600,
3138 					    rvu->rvu_dbg.lmac, cgx, lmac_id,
3139 					    &rvu_dbg_cgx_dmac_flt_fops);
3140 			debugfs_create_file("fwdata", 0600,
3141 					    rvu->rvu_dbg.lmac, cgx,
3142 					    &rvu_dbg_cgx_fwdata_fops);
3143 		}
3144 	}
3145 }
3146 
3147 /* NPC debugfs APIs */
3148 static void rvu_print_npc_mcam_info(struct seq_file *s,
3149 				    u16 pcifunc, int blkaddr)
3150 {
3151 	struct rvu *rvu = s->private;
3152 	int entry_acnt, entry_ecnt;
3153 	int cntr_acnt, cntr_ecnt;
3154 
3155 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
3156 					  &entry_acnt, &entry_ecnt);
3157 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
3158 					    &cntr_acnt, &cntr_ecnt);
3159 	if (!entry_acnt && !cntr_acnt)
3160 		return;
3161 
3162 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
3163 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
3164 			   rvu_get_pf(rvu->pdev, pcifunc));
3165 	else
3166 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
3167 			   rvu_get_pf(rvu->pdev, pcifunc),
3168 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
3169 
3170 	if (entry_acnt) {
3171 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
3172 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
3173 	}
3174 	if (cntr_acnt) {
3175 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
3176 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
3177 	}
3178 }
3179 
3180 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
3181 {
3182 	struct rvu *rvu = filp->private;
3183 	int x4_free, x2_free, sb_free;
3184 	int pf, vf, numvfs, blkaddr;
3185 	struct npc_priv_t *npc_priv;
3186 	struct npc_mcam *mcam;
3187 	u16 pcifunc, counters;
3188 	u64 cfg;
3189 
3190 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3191 	if (blkaddr < 0)
3192 		return -ENODEV;
3193 
3194 	mcam = &rvu->hw->mcam;
3195 	counters = rvu->hw->npc_counters;
3196 
3197 	seq_puts(filp, "\nNPC MCAM info:\n");
3198 	/* MCAM keywidth on receive and transmit sides */
3199 	if (is_cn20k(rvu->pdev)) {
3200 		npc_priv = npc_priv_get();
3201 		seq_printf(filp, "\t\t RX keywidth \t: %s\n",
3202 			   (npc_priv->kw == NPC_MCAM_KEY_X1) ?
3203 			   "256bits" : "512bits");
3204 
3205 		npc_cn20k_subbank_calc_free(rvu, &x2_free, &x4_free, &sb_free);
3206 		seq_printf(filp, "\t\t free x4 slots\t: %d\n", x4_free);
3207 
3208 		seq_printf(filp, "\t\t free x2 slots\t: %d\n", x2_free);
3209 
3210 		seq_printf(filp, "\t\t free subbanks\t: %d\n", sb_free);
3211 	} else {
3212 		cfg = rvu_read64(rvu, blkaddr,
3213 				 NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
3214 		cfg = (cfg >> 32) & 0x07;
3215 		seq_printf(filp, "\t\t RX keywidth \t: %s\n",
3216 			   (cfg == NPC_MCAM_KEY_X1) ?
3217 			   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3218 					"224bits" : "448bits"));
3219 		cfg = rvu_read64(rvu, blkaddr,
3220 				 NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
3221 		cfg = (cfg >> 32) & 0x07;
3222 		seq_printf(filp, "\t\t TX keywidth \t: %s\n",
3223 			   (cfg == NPC_MCAM_KEY_X1) ?
3224 			   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3225 					"224bits" : "448bits"));
3226 	}
3227 
3228 	mutex_lock(&mcam->lock);
3229 	/* MCAM entries */
3230 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
3231 	seq_printf(filp, "\t\t Reserved \t: %d\n",
3232 		   mcam->total_entries - mcam->bmap_entries);
3233 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
3234 
3235 	/* MCAM counters */
3236 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
3237 	seq_printf(filp, "\t\t Reserved \t: %d\n",
3238 		   counters - mcam->counters.max);
3239 	seq_printf(filp, "\t\t Available \t: %d\n",
3240 		   rvu_rsrc_free_count(&mcam->counters));
3241 
3242 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
3243 		mutex_unlock(&mcam->lock);
3244 		return 0;
3245 	}
3246 
3247 	seq_puts(filp, "\n\t\t Current allocation\n");
3248 	seq_puts(filp, "\t\t====================\n");
3249 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3250 		pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
3251 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3252 
3253 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3254 		numvfs = (cfg >> 12) & 0xFF;
3255 		for (vf = 0; vf < numvfs; vf++) {
3256 			pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
3257 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3258 		}
3259 	}
3260 
3261 	mutex_unlock(&mcam->lock);
3262 	return 0;
3263 }
3264 
3265 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
3266 
3267 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
3268 					     void *unused)
3269 {
3270 	struct rvu *rvu = filp->private;
3271 	struct npc_mcam *mcam;
3272 	int blkaddr;
3273 
3274 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3275 	if (blkaddr < 0)
3276 		return -ENODEV;
3277 
3278 	mcam = &rvu->hw->mcam;
3279 
3280 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
3281 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
3282 		   rvu_read64(rvu, blkaddr,
3283 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
3284 
3285 	return 0;
3286 }
3287 
3288 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
3289 
3290 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask)                                     \
3291 do {									      \
3292 	seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt));     \
3293 	seq_printf(s, "mask 0x%lx\n",                                         \
3294 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask));               \
3295 } while (0)                                                                   \
3296 
3297 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask)                               \
3298 do {									      \
3299 	typeof(_pkt) (pkt) = (_pkt);					      \
3300 	typeof(_mask) (mask) = (_mask);                                       \
3301 	seq_printf(s, "%ld %ld %ld\n",                                        \
3302 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt),                  \
3303 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt),                  \
3304 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt));                \
3305 	seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n",                           \
3306 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask),                 \
3307 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask),                 \
3308 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask));               \
3309 } while (0)                                                                   \
3310 
3311 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
3312 					struct rvu_npc_mcam_rule *rule)
3313 {
3314 	u8 bit;
3315 
3316 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
3317 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
3318 		switch (bit) {
3319 		case NPC_LXMB:
3320 			if (rule->lxmb == 1)
3321 				seq_puts(s, "\tL2M nibble is set\n");
3322 			else
3323 				seq_puts(s, "\tL2B nibble is set\n");
3324 			break;
3325 		case NPC_DMAC:
3326 			seq_printf(s, "%pM ", rule->packet.dmac);
3327 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
3328 			break;
3329 		case NPC_SMAC:
3330 			seq_printf(s, "%pM ", rule->packet.smac);
3331 			seq_printf(s, "mask %pM\n", rule->mask.smac);
3332 			break;
3333 		case NPC_ETYPE:
3334 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
3335 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
3336 			break;
3337 		case NPC_OUTER_VID:
3338 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
3339 			seq_printf(s, "mask 0x%x\n",
3340 				   ntohs(rule->mask.vlan_tci));
3341 			break;
3342 		case NPC_INNER_VID:
3343 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
3344 			seq_printf(s, "mask 0x%x\n",
3345 				   ntohs(rule->mask.vlan_itci));
3346 			break;
3347 		case NPC_TOS:
3348 			seq_printf(s, "%d ", rule->packet.tos);
3349 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
3350 			break;
3351 		case NPC_SIP_IPV4:
3352 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
3353 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
3354 			break;
3355 		case NPC_DIP_IPV4:
3356 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
3357 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
3358 			break;
3359 		case NPC_SIP_IPV6:
3360 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
3361 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
3362 			break;
3363 		case NPC_DIP_IPV6:
3364 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
3365 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
3366 			break;
3367 		case NPC_IPFRAG_IPV6:
3368 			seq_printf(s, "0x%x ", rule->packet.next_header);
3369 			seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
3370 			break;
3371 		case NPC_IPFRAG_IPV4:
3372 			seq_printf(s, "0x%x ", rule->packet.ip_flag);
3373 			seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
3374 			break;
3375 		case NPC_SPORT_TCP:
3376 		case NPC_SPORT_UDP:
3377 		case NPC_SPORT_SCTP:
3378 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
3379 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
3380 			break;
3381 		case NPC_DPORT_TCP:
3382 		case NPC_DPORT_UDP:
3383 		case NPC_DPORT_SCTP:
3384 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
3385 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
3386 			break;
3387 		case NPC_TCP_FLAGS:
3388 			seq_printf(s, "%d ", rule->packet.tcp_flags);
3389 			seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
3390 			break;
3391 		case NPC_IPSEC_SPI:
3392 			seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
3393 			seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
3394 			break;
3395 		case NPC_MPLS1_LBTCBOS:
3396 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
3397 						   rule->mask.mpls_lse[0]);
3398 			break;
3399 		case NPC_MPLS1_TTL:
3400 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
3401 					       rule->mask.mpls_lse[0]);
3402 			break;
3403 		case NPC_MPLS2_LBTCBOS:
3404 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
3405 						   rule->mask.mpls_lse[1]);
3406 			break;
3407 		case NPC_MPLS2_TTL:
3408 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
3409 					       rule->mask.mpls_lse[1]);
3410 			break;
3411 		case NPC_MPLS3_LBTCBOS:
3412 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
3413 						   rule->mask.mpls_lse[2]);
3414 			break;
3415 		case NPC_MPLS3_TTL:
3416 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
3417 					       rule->mask.mpls_lse[2]);
3418 			break;
3419 		case NPC_MPLS4_LBTCBOS:
3420 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
3421 						   rule->mask.mpls_lse[3]);
3422 			break;
3423 		case NPC_MPLS4_TTL:
3424 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
3425 					       rule->mask.mpls_lse[3]);
3426 			break;
3427 		case NPC_TYPE_ICMP:
3428 			seq_printf(s, "%d ", rule->packet.icmp_type);
3429 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
3430 			break;
3431 		case NPC_CODE_ICMP:
3432 			seq_printf(s, "%d ", rule->packet.icmp_code);
3433 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
3434 			break;
3435 		default:
3436 			seq_puts(s, "\n");
3437 			break;
3438 		}
3439 	}
3440 }
3441 
3442 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
3443 					 struct rvu_npc_mcam_rule *rule)
3444 {
3445 	if (is_npc_intf_tx(rule->intf)) {
3446 		switch (rule->tx_action.op) {
3447 		case NIX_TX_ACTIONOP_DROP:
3448 			seq_puts(s, "\taction: Drop\n");
3449 			break;
3450 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
3451 			seq_puts(s, "\taction: Unicast to default channel\n");
3452 			break;
3453 		case NIX_TX_ACTIONOP_UCAST_CHAN:
3454 			seq_printf(s, "\taction: Unicast to channel %d\n",
3455 				   rule->tx_action.index);
3456 			break;
3457 		case NIX_TX_ACTIONOP_MCAST:
3458 			seq_puts(s, "\taction: Multicast\n");
3459 			break;
3460 		case NIX_TX_ACTIONOP_DROP_VIOL:
3461 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
3462 			break;
3463 		default:
3464 			break;
3465 		}
3466 	} else {
3467 		switch (rule->rx_action.op) {
3468 		case NIX_RX_ACTIONOP_DROP:
3469 			seq_puts(s, "\taction: Drop\n");
3470 			break;
3471 		case NIX_RX_ACTIONOP_UCAST:
3472 			seq_printf(s, "\taction: Direct to queue %d\n",
3473 				   rule->rx_action.index);
3474 			break;
3475 		case NIX_RX_ACTIONOP_RSS:
3476 			seq_puts(s, "\taction: RSS\n");
3477 			break;
3478 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
3479 			seq_puts(s, "\taction: Unicast ipsec\n");
3480 			break;
3481 		case NIX_RX_ACTIONOP_MCAST:
3482 			seq_puts(s, "\taction: Multicast\n");
3483 			break;
3484 		default:
3485 			break;
3486 		}
3487 	}
3488 }
3489 
3490 static const char *rvu_dbg_get_intf_name(int intf)
3491 {
3492 	switch (intf) {
3493 	case NIX_INTFX_RX(0):
3494 		return "NIX0_RX";
3495 	case NIX_INTFX_RX(1):
3496 		return "NIX1_RX";
3497 	case NIX_INTFX_TX(0):
3498 		return "NIX0_TX";
3499 	case NIX_INTFX_TX(1):
3500 		return "NIX1_TX";
3501 	default:
3502 		break;
3503 	}
3504 
3505 	return "unknown";
3506 }
3507 
3508 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
3509 {
3510 	struct rvu_npc_mcam_rule *iter;
3511 	struct rvu *rvu = s->private;
3512 	struct npc_mcam *mcam;
3513 	int pf, vf = -1, bank;
3514 	u16 target, index;
3515 	bool enabled;
3516 	u64 hits, off;
3517 	int blkaddr;
3518 
3519 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3520 	if (blkaddr < 0)
3521 		return 0;
3522 
3523 	mcam = &rvu->hw->mcam;
3524 
3525 	mutex_lock(&mcam->lock);
3526 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
3527 		pf = rvu_get_pf(rvu->pdev, iter->owner);
3528 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3529 
3530 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
3531 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3532 			seq_printf(s, "VF%d", vf);
3533 		}
3534 		seq_puts(s, "\n");
3535 
3536 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3537 						    "RX" : "TX");
3538 		seq_printf(s, "\tinterface: %s\n",
3539 			   rvu_dbg_get_intf_name(iter->intf));
3540 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3541 
3542 		rvu_dbg_npc_mcam_show_flows(s, iter);
3543 		if (is_npc_intf_rx(iter->intf)) {
3544 			target = iter->rx_action.pf_func;
3545 			pf = rvu_get_pf(rvu->pdev, target);
3546 			seq_printf(s, "\tForward to: PF%d ", pf);
3547 
3548 			if (target & RVU_PFVF_FUNC_MASK) {
3549 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3550 				seq_printf(s, "VF%d", vf);
3551 			}
3552 			seq_puts(s, "\n");
3553 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3554 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3555 		}
3556 
3557 		rvu_dbg_npc_mcam_show_action(s, iter);
3558 
3559 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3560 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3561 		if (is_cn20k(rvu->pdev)) {
3562 			seq_printf(s, "\tpriority: %u\n", iter->hw_prio);
3563 			index = iter->entry & (mcam->banksize - 1);
3564 			bank = npc_get_bank(mcam, iter->entry);
3565 			off = NPC_AF_CN20K_MCAMEX_BANKX_STAT_EXT(index, bank);
3566 			hits = rvu_read64(rvu, blkaddr, off);
3567 			seq_printf(s, "\thits: %lld\n", hits);
3568 			continue;
3569 		}
3570 
3571 		if (!iter->has_cntr)
3572 			continue;
3573 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
3574 
3575 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3576 		seq_printf(s, "\thits: %lld\n", hits);
3577 	}
3578 	mutex_unlock(&mcam->lock);
3579 
3580 	return 0;
3581 }
3582 
3583 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3584 
3585 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3586 {
3587 	struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3588 	struct npc_exact_table_entry *cam_entry;
3589 	struct npc_exact_table *table;
3590 	struct rvu *rvu = s->private;
3591 	int i, j;
3592 
3593 	u8 bitmap = 0;
3594 
3595 	table = rvu->hw->table;
3596 
3597 	mutex_lock(&table->lock);
3598 
3599 	/* Check if there is at least one entry in mem table */
3600 	if (!table->mem_tbl_entry_cnt)
3601 		goto dump_cam_table;
3602 
3603 	/* Print table headers */
3604 	seq_puts(s, "\n\tExact Match MEM Table\n");
3605 	seq_puts(s, "Index\t");
3606 
3607 	for (i = 0; i < table->mem_table.ways; i++) {
3608 		mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3609 							struct npc_exact_table_entry, list);
3610 
3611 		seq_printf(s, "Way-%d\t\t\t\t\t", i);
3612 	}
3613 
3614 	seq_puts(s, "\n");
3615 	for (i = 0; i < table->mem_table.ways; i++)
3616 		seq_puts(s, "\tChan  MAC                     \t");
3617 
3618 	seq_puts(s, "\n\n");
3619 
3620 	/* Print mem table entries */
3621 	for (i = 0; i < table->mem_table.depth; i++) {
3622 		bitmap = 0;
3623 		for (j = 0; j < table->mem_table.ways; j++) {
3624 			if (!mem_entry[j])
3625 				continue;
3626 
3627 			if (mem_entry[j]->index != i)
3628 				continue;
3629 
3630 			bitmap |= BIT(j);
3631 		}
3632 
3633 		/* No valid entries */
3634 		if (!bitmap)
3635 			continue;
3636 
3637 		seq_printf(s, "%d\t", i);
3638 		for (j = 0; j < table->mem_table.ways; j++) {
3639 			if (!(bitmap & BIT(j))) {
3640 				seq_puts(s, "nil\t\t\t\t\t");
3641 				continue;
3642 			}
3643 
3644 			seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3645 				   mem_entry[j]->mac);
3646 			mem_entry[j] = list_next_entry(mem_entry[j], list);
3647 		}
3648 		seq_puts(s, "\n");
3649 	}
3650 
3651 dump_cam_table:
3652 
3653 	if (!table->cam_tbl_entry_cnt)
3654 		goto done;
3655 
3656 	seq_puts(s, "\n\tExact Match CAM Table\n");
3657 	seq_puts(s, "index\tchan\tMAC\n");
3658 
3659 	/* Traverse cam table entries */
3660 	list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3661 		seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3662 			   cam_entry->mac);
3663 	}
3664 
3665 done:
3666 	mutex_unlock(&table->lock);
3667 	return 0;
3668 }
3669 
3670 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3671 
3672 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3673 {
3674 	struct npc_exact_table *table;
3675 	struct rvu *rvu = s->private;
3676 	int i;
3677 
3678 	table = rvu->hw->table;
3679 
3680 	seq_puts(s, "\n\tExact Table Info\n");
3681 	seq_printf(s, "Exact Match Feature : %s\n",
3682 		   rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3683 	if (!rvu->hw->cap.npc_exact_match_enabled)
3684 		return 0;
3685 
3686 	seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3687 	for (i = 0; i < table->num_drop_rules; i++)
3688 		seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3689 
3690 	seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3691 	for (i = 0; i < table->num_drop_rules; i++)
3692 		seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3693 
3694 	seq_puts(s, "\n\tMEM Table Info\n");
3695 	seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3696 	seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3697 	seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3698 	seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3699 	seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3700 
3701 	seq_puts(s, "\n\tCAM Table Info\n");
3702 	seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3703 
3704 	return 0;
3705 }
3706 
3707 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3708 
3709 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3710 {
3711 	struct npc_exact_table *table;
3712 	struct rvu *rvu = s->private;
3713 	struct npc_key_field *field;
3714 	u64 cfg, cam1, off;
3715 	u16 chan, pcifunc;
3716 	int blkaddr, i;
3717 	char *str;
3718 
3719 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3720 	table = rvu->hw->table;
3721 
3722 	field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3723 
3724 	seq_puts(s, "\n\t Exact Hit on drop status\n");
3725 	seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3726 
3727 	for (i = 0; i < table->num_drop_rules; i++) {
3728 		pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3729 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3730 
3731 		/* channel will be always in keyword 0 */
3732 		cam1 = rvu_read64(rvu, blkaddr,
3733 				  NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3734 		chan = field->kw_mask[0] & cam1;
3735 
3736 		str = (cfg & 1) ? "enabled" : "disabled";
3737 		if (is_cn20k(rvu->pdev)) {
3738 			off = NPC_AF_CN20K_MCAMEX_BANKX_STAT_EXT(i, 0);
3739 		seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc,
3740 			   i, rvu_read64(rvu, blkaddr, off), chan, str);
3741 		} else {
3742 			off = NPC_AF_MATCH_STATX(table->counter_idx[i]);
3743 			seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc,
3744 				   i, rvu_read64(rvu, blkaddr, off),
3745 				   chan, str);
3746 		}
3747 
3748 	}
3749 
3750 	return 0;
3751 }
3752 
3753 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3754 
3755 static void rvu_dbg_npc_init(struct rvu *rvu)
3756 {
3757 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3758 
3759 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3760 			    &rvu_dbg_npc_mcam_info_fops);
3761 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3762 			    &rvu_dbg_npc_mcam_rules_fops);
3763 
3764 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3765 			    &rvu_dbg_npc_rx_miss_act_fops);
3766 
3767 	if (is_cn20k(rvu->pdev))
3768 		npc_cn20k_debugfs_init(rvu);
3769 
3770 	if (!rvu->hw->cap.npc_exact_match_enabled)
3771 		return;
3772 
3773 	debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3774 			    &rvu_dbg_npc_exact_entries_fops);
3775 
3776 	debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3777 			    &rvu_dbg_npc_exact_info_fops);
3778 
3779 	debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3780 			    &rvu_dbg_npc_exact_drop_cnt_fops);
3781 
3782 }
3783 
3784 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3785 {
3786 	struct cpt_ctx *ctx = filp->private;
3787 	u64 busy_sts = 0, free_sts = 0;
3788 	u32 e_min = 0, e_max = 0, e, i;
3789 	u16 max_ses, max_ies, max_aes;
3790 	struct rvu *rvu = ctx->rvu;
3791 	int blkaddr = ctx->blkaddr;
3792 	u64 reg;
3793 
3794 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3795 	max_ses = reg & 0xffff;
3796 	max_ies = (reg >> 16) & 0xffff;
3797 	max_aes = (reg >> 32) & 0xffff;
3798 
3799 	switch (eng_type) {
3800 	case CPT_AE_TYPE:
3801 		e_min = max_ses + max_ies;
3802 		e_max = max_ses + max_ies + max_aes;
3803 		break;
3804 	case CPT_SE_TYPE:
3805 		e_min = 0;
3806 		e_max = max_ses;
3807 		break;
3808 	case CPT_IE_TYPE:
3809 		e_min = max_ses;
3810 		e_max = max_ses + max_ies;
3811 		break;
3812 	default:
3813 		return -EINVAL;
3814 	}
3815 
3816 	for (e = e_min, i = 0; e < e_max; e++, i++) {
3817 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3818 		if (reg & 0x1)
3819 			busy_sts |= 1ULL << i;
3820 
3821 		if (reg & 0x2)
3822 			free_sts |= 1ULL << i;
3823 	}
3824 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3825 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3826 
3827 	return 0;
3828 }
3829 
3830 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3831 {
3832 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3833 }
3834 
3835 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3836 
3837 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3838 {
3839 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3840 }
3841 
3842 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3843 
3844 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3845 {
3846 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3847 }
3848 
3849 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3850 
3851 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3852 {
3853 	struct cpt_ctx *ctx = filp->private;
3854 	u16 max_ses, max_ies, max_aes;
3855 	struct rvu *rvu = ctx->rvu;
3856 	int blkaddr = ctx->blkaddr;
3857 	u32 e_max, e;
3858 	u64 reg;
3859 
3860 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3861 	max_ses = reg & 0xffff;
3862 	max_ies = (reg >> 16) & 0xffff;
3863 	max_aes = (reg >> 32) & 0xffff;
3864 
3865 	e_max = max_ses + max_ies + max_aes;
3866 
3867 	seq_puts(filp, "===========================================\n");
3868 	for (e = 0; e < e_max; e++) {
3869 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3870 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
3871 			   reg & 0xff);
3872 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3873 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
3874 			   reg);
3875 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3876 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
3877 			   reg);
3878 		seq_puts(filp, "===========================================\n");
3879 	}
3880 	return 0;
3881 }
3882 
3883 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3884 
3885 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3886 {
3887 	struct cpt_ctx *ctx = filp->private;
3888 	int blkaddr = ctx->blkaddr;
3889 	struct rvu *rvu = ctx->rvu;
3890 	struct rvu_block *block;
3891 	struct rvu_hwinfo *hw;
3892 	u64 reg;
3893 	u32 lf;
3894 
3895 	hw = rvu->hw;
3896 	block = &hw->block[blkaddr];
3897 	if (!block->lf.bmap)
3898 		return -ENODEV;
3899 
3900 	seq_puts(filp, "===========================================\n");
3901 	for (lf = 0; lf < block->lf.max; lf++) {
3902 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3903 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
3904 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3905 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
3906 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3907 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
3908 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3909 				(lf << block->lfshift));
3910 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
3911 		seq_puts(filp, "===========================================\n");
3912 	}
3913 	return 0;
3914 }
3915 
3916 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3917 
3918 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3919 {
3920 	struct cpt_ctx *ctx = filp->private;
3921 	struct rvu *rvu = ctx->rvu;
3922 	int blkaddr = ctx->blkaddr;
3923 	u64 reg0, reg1;
3924 
3925 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3926 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3927 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
3928 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3929 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3930 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
3931 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3932 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
3933 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3934 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
3935 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3936 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
3937 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3938 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
3939 
3940 	return 0;
3941 }
3942 
3943 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3944 
3945 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3946 {
3947 	struct cpt_ctx *ctx = filp->private;
3948 	struct rvu *rvu = ctx->rvu;
3949 	int blkaddr = ctx->blkaddr;
3950 	u64 reg;
3951 
3952 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3953 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
3954 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3955 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
3956 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3957 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
3958 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3959 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
3960 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3961 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
3962 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3963 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
3964 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3965 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
3966 
3967 	return 0;
3968 }
3969 
3970 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3971 
3972 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3973 {
3974 	struct cpt_ctx *ctx;
3975 
3976 	if (!is_block_implemented(rvu->hw, blkaddr))
3977 		return;
3978 
3979 	if (blkaddr == BLKADDR_CPT0) {
3980 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3981 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
3982 		ctx->blkaddr = BLKADDR_CPT0;
3983 		ctx->rvu = rvu;
3984 	} else {
3985 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3986 						      rvu->rvu_dbg.root);
3987 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
3988 		ctx->blkaddr = BLKADDR_CPT1;
3989 		ctx->rvu = rvu;
3990 	}
3991 
3992 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3993 			    &rvu_dbg_cpt_pc_fops);
3994 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3995 			    &rvu_dbg_cpt_ae_sts_fops);
3996 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3997 			    &rvu_dbg_cpt_se_sts_fops);
3998 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3999 			    &rvu_dbg_cpt_ie_sts_fops);
4000 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
4001 			    &rvu_dbg_cpt_engines_info_fops);
4002 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
4003 			    &rvu_dbg_cpt_lfs_info_fops);
4004 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
4005 			    &rvu_dbg_cpt_err_info_fops);
4006 }
4007 
4008 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
4009 {
4010 	if (is_cn20k(rvu->pdev))
4011 		return "cn20k";
4012 
4013 	if (!is_rvu_otx2(rvu))
4014 		return "cn10k";
4015 	else
4016 		return "octeontx2";
4017 }
4018 
4019 void rvu_dbg_init(struct rvu *rvu)
4020 {
4021 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
4022 
4023 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
4024 			    &rvu_dbg_rsrc_status_fops);
4025 
4026 	if (!is_rvu_otx2(rvu))
4027 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
4028 				    rvu, &rvu_dbg_lmtst_map_table_fops);
4029 
4030 	debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu,
4031 			    &rvu_dbg_rvu_fwdata_fops);
4032 
4033 	if (!cgx_get_cgxcnt_max())
4034 		goto create;
4035 
4036 	if (is_rvu_otx2(rvu))
4037 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
4038 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
4039 	else
4040 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
4041 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
4042 
4043 create:
4044 	rvu_dbg_npa_init(rvu);
4045 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
4046 
4047 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
4048 	rvu_dbg_cgx_init(rvu);
4049 	rvu_dbg_npc_init(rvu);
4050 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
4051 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
4052 	rvu_dbg_mcs_init(rvu);
4053 }
4054 
4055 void rvu_dbg_exit(struct rvu *rvu)
4056 {
4057 	debugfs_remove_recursive(rvu->rvu_dbg.root);
4058 }
4059 
4060 #endif /* CONFIG_DEBUG_FS */
4061