xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/debugfs.h>
5 #include <linux/device.h>
6 #include <linux/seq_file.h>
7 #include <linux/string_choices.h>
8 
9 #include "hnae3.h"
10 #include "hns3_debugfs.h"
11 #include "hns3_enet.h"
12 
13 static struct dentry *hns3_dbgfs_root;
14 
15 static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
16 	{
17 		.name = "tm"
18 	},
19 	{
20 		.name = "tx_bd_info"
21 	},
22 	{
23 		.name = "rx_bd_info"
24 	},
25 	{
26 		.name = "mac_list"
27 	},
28 	{
29 		.name = "reg"
30 	},
31 	{
32 		.name = "queue"
33 	},
34 	{
35 		.name = "fd"
36 	},
37 	/* keep common at the bottom and add new directory above */
38 	{
39 		.name = "common"
40 	},
41 };
42 
43 static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
44 static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd);
45 static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd);
46 
47 static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
48 	{
49 		.name = "tm_nodes",
50 		.cmd = HNAE3_DBG_CMD_TM_NODES,
51 		.dentry = HNS3_DBG_DENTRY_TM,
52 		.init = hns3_dbg_common_init_t2,
53 	},
54 	{
55 		.name = "tm_priority",
56 		.cmd = HNAE3_DBG_CMD_TM_PRI,
57 		.dentry = HNS3_DBG_DENTRY_TM,
58 		.init = hns3_dbg_common_init_t2,
59 	},
60 	{
61 		.name = "tm_qset",
62 		.cmd = HNAE3_DBG_CMD_TM_QSET,
63 		.dentry = HNS3_DBG_DENTRY_TM,
64 		.init = hns3_dbg_common_init_t2,
65 	},
66 	{
67 		.name = "tm_map",
68 		.cmd = HNAE3_DBG_CMD_TM_MAP,
69 		.dentry = HNS3_DBG_DENTRY_TM,
70 		.init = hns3_dbg_common_init_t2,
71 	},
72 	{
73 		.name = "tm_pg",
74 		.cmd = HNAE3_DBG_CMD_TM_PG,
75 		.dentry = HNS3_DBG_DENTRY_TM,
76 		.init = hns3_dbg_common_init_t2,
77 	},
78 	{
79 		.name = "tm_port",
80 		.cmd = HNAE3_DBG_CMD_TM_PORT,
81 		.dentry = HNS3_DBG_DENTRY_TM,
82 		.init = hns3_dbg_common_init_t2,
83 	},
84 	{
85 		.name = "tc_sch_info",
86 		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
87 		.dentry = HNS3_DBG_DENTRY_TM,
88 		.init = hns3_dbg_common_init_t2,
89 	},
90 	{
91 		.name = "qos_pause_cfg",
92 		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
93 		.dentry = HNS3_DBG_DENTRY_TM,
94 		.init = hns3_dbg_common_init_t2,
95 	},
96 	{
97 		.name = "qos_pri_map",
98 		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
99 		.dentry = HNS3_DBG_DENTRY_TM,
100 		.init = hns3_dbg_common_init_t2,
101 	},
102 	{
103 		.name = "qos_dscp_map",
104 		.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
105 		.dentry = HNS3_DBG_DENTRY_TM,
106 		.init = hns3_dbg_common_init_t2,
107 	},
108 	{
109 		.name = "qos_buf_cfg",
110 		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
111 		.dentry = HNS3_DBG_DENTRY_TM,
112 		.init = hns3_dbg_common_init_t2,
113 	},
114 	{
115 		.name = "dev_info",
116 		.cmd = HNAE3_DBG_CMD_DEV_INFO,
117 		.dentry = HNS3_DBG_DENTRY_COMMON,
118 		.init = hns3_dbg_common_init_t1,
119 	},
120 	{
121 		.name = "tx_bd_queue",
122 		.cmd = HNAE3_DBG_CMD_TX_BD,
123 		.dentry = HNS3_DBG_DENTRY_TX_BD,
124 		.init = hns3_dbg_bd_file_init,
125 	},
126 	{
127 		.name = "rx_bd_queue",
128 		.cmd = HNAE3_DBG_CMD_RX_BD,
129 		.dentry = HNS3_DBG_DENTRY_RX_BD,
130 		.init = hns3_dbg_bd_file_init,
131 	},
132 	{
133 		.name = "uc",
134 		.cmd = HNAE3_DBG_CMD_MAC_UC,
135 		.dentry = HNS3_DBG_DENTRY_MAC,
136 		.init = hns3_dbg_common_init_t2,
137 	},
138 	{
139 		.name = "mc",
140 		.cmd = HNAE3_DBG_CMD_MAC_MC,
141 		.dentry = HNS3_DBG_DENTRY_MAC,
142 		.init = hns3_dbg_common_init_t2,
143 	},
144 	{
145 		.name = "mng_tbl",
146 		.cmd = HNAE3_DBG_CMD_MNG_TBL,
147 		.dentry = HNS3_DBG_DENTRY_COMMON,
148 		.init = hns3_dbg_common_init_t2,
149 	},
150 	{
151 		.name = "loopback",
152 		.cmd = HNAE3_DBG_CMD_LOOPBACK,
153 		.dentry = HNS3_DBG_DENTRY_COMMON,
154 		.init = hns3_dbg_common_init_t2,
155 	},
156 	{
157 		.name = "interrupt_info",
158 		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
159 		.dentry = HNS3_DBG_DENTRY_COMMON,
160 		.init = hns3_dbg_common_init_t2,
161 	},
162 	{
163 		.name = "reset_info",
164 		.cmd = HNAE3_DBG_CMD_RESET_INFO,
165 		.dentry = HNS3_DBG_DENTRY_COMMON,
166 		.init = hns3_dbg_common_init_t2,
167 	},
168 	{
169 		.name = "imp_info",
170 		.cmd = HNAE3_DBG_CMD_IMP_INFO,
171 		.dentry = HNS3_DBG_DENTRY_COMMON,
172 		.init = hns3_dbg_common_init_t2,
173 	},
174 	{
175 		.name = "ncl_config",
176 		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
177 		.dentry = HNS3_DBG_DENTRY_COMMON,
178 		.init = hns3_dbg_common_init_t2,
179 	},
180 	{
181 		.name = "mac_tnl_status",
182 		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
183 		.dentry = HNS3_DBG_DENTRY_COMMON,
184 		.init = hns3_dbg_common_init_t2,
185 	},
186 	{
187 		.name = "bios_common",
188 		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
189 		.dentry = HNS3_DBG_DENTRY_REG,
190 		.init = hns3_dbg_common_init_t2,
191 	},
192 	{
193 		.name = "ssu",
194 		.cmd = HNAE3_DBG_CMD_REG_SSU,
195 		.dentry = HNS3_DBG_DENTRY_REG,
196 		.init = hns3_dbg_common_init_t2,
197 	},
198 	{
199 		.name = "igu_egu",
200 		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
201 		.dentry = HNS3_DBG_DENTRY_REG,
202 		.init = hns3_dbg_common_init_t2,
203 	},
204 	{
205 		.name = "rpu",
206 		.cmd = HNAE3_DBG_CMD_REG_RPU,
207 		.dentry = HNS3_DBG_DENTRY_REG,
208 		.init = hns3_dbg_common_init_t2,
209 	},
210 	{
211 		.name = "ncsi",
212 		.cmd = HNAE3_DBG_CMD_REG_NCSI,
213 		.dentry = HNS3_DBG_DENTRY_REG,
214 		.init = hns3_dbg_common_init_t2,
215 	},
216 	{
217 		.name = "rtc",
218 		.cmd = HNAE3_DBG_CMD_REG_RTC,
219 		.dentry = HNS3_DBG_DENTRY_REG,
220 		.init = hns3_dbg_common_init_t2,
221 	},
222 	{
223 		.name = "ppp",
224 		.cmd = HNAE3_DBG_CMD_REG_PPP,
225 		.dentry = HNS3_DBG_DENTRY_REG,
226 		.init = hns3_dbg_common_init_t2,
227 	},
228 	{
229 		.name = "rcb",
230 		.cmd = HNAE3_DBG_CMD_REG_RCB,
231 		.dentry = HNS3_DBG_DENTRY_REG,
232 		.init = hns3_dbg_common_init_t2,
233 	},
234 	{
235 		.name = "tqp",
236 		.cmd = HNAE3_DBG_CMD_REG_TQP,
237 		.dentry = HNS3_DBG_DENTRY_REG,
238 		.init = hns3_dbg_common_init_t2,
239 	},
240 	{
241 		.name = "mac",
242 		.cmd = HNAE3_DBG_CMD_REG_MAC,
243 		.dentry = HNS3_DBG_DENTRY_REG,
244 		.init = hns3_dbg_common_init_t2,
245 	},
246 	{
247 		.name = "dcb",
248 		.cmd = HNAE3_DBG_CMD_REG_DCB,
249 		.dentry = HNS3_DBG_DENTRY_REG,
250 		.init = hns3_dbg_common_init_t2,
251 	},
252 	{
253 		.name = "queue_map",
254 		.cmd = HNAE3_DBG_CMD_QUEUE_MAP,
255 		.dentry = HNS3_DBG_DENTRY_QUEUE,
256 		.init = hns3_dbg_common_init_t1,
257 	},
258 	{
259 		.name = "rx_queue_info",
260 		.cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
261 		.dentry = HNS3_DBG_DENTRY_QUEUE,
262 		.init = hns3_dbg_common_init_t1,
263 	},
264 	{
265 		.name = "tx_queue_info",
266 		.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
267 		.dentry = HNS3_DBG_DENTRY_QUEUE,
268 		.init = hns3_dbg_common_init_t1,
269 	},
270 	{
271 		.name = "fd_tcam",
272 		.cmd = HNAE3_DBG_CMD_FD_TCAM,
273 		.dentry = HNS3_DBG_DENTRY_FD,
274 		.init = hns3_dbg_common_init_t2,
275 	},
276 	{
277 		.name = "service_task_info",
278 		.cmd = HNAE3_DBG_CMD_SERV_INFO,
279 		.dentry = HNS3_DBG_DENTRY_COMMON,
280 		.init = hns3_dbg_common_init_t2,
281 	},
282 	{
283 		.name = "vlan_config",
284 		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
285 		.dentry = HNS3_DBG_DENTRY_COMMON,
286 		.init = hns3_dbg_common_init_t2,
287 	},
288 	{
289 		.name = "ptp_info",
290 		.cmd = HNAE3_DBG_CMD_PTP_INFO,
291 		.dentry = HNS3_DBG_DENTRY_COMMON,
292 		.init = hns3_dbg_common_init_t2,
293 	},
294 	{
295 		.name = "fd_counter",
296 		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
297 		.dentry = HNS3_DBG_DENTRY_FD,
298 		.init = hns3_dbg_common_init_t2,
299 	},
300 	{
301 		.name = "umv_info",
302 		.cmd = HNAE3_DBG_CMD_UMV_INFO,
303 		.dentry = HNS3_DBG_DENTRY_COMMON,
304 		.init = hns3_dbg_common_init_t2,
305 	},
306 	{
307 		.name = "page_pool_info",
308 		.cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
309 		.dentry = HNS3_DBG_DENTRY_COMMON,
310 		.init = hns3_dbg_common_init_t1,
311 	},
312 	{
313 		.name = "coalesce_info",
314 		.cmd = HNAE3_DBG_CMD_COAL_INFO,
315 		.dentry = HNS3_DBG_DENTRY_COMMON,
316 		.init = hns3_dbg_common_init_t1,
317 	},
318 };
319 
320 static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
321 	{
322 		.name = "support FD",
323 		.cap_bit = HNAE3_DEV_SUPPORT_FD_B,
324 	}, {
325 		.name = "support GRO",
326 		.cap_bit = HNAE3_DEV_SUPPORT_GRO_B,
327 	}, {
328 		.name = "support FEC",
329 		.cap_bit = HNAE3_DEV_SUPPORT_FEC_B,
330 	}, {
331 		.name = "support UDP GSO",
332 		.cap_bit = HNAE3_DEV_SUPPORT_UDP_GSO_B,
333 	}, {
334 		.name = "support PTP",
335 		.cap_bit = HNAE3_DEV_SUPPORT_PTP_B,
336 	}, {
337 		.name = "support INT QL",
338 		.cap_bit = HNAE3_DEV_SUPPORT_INT_QL_B,
339 	}, {
340 		.name = "support HW TX csum",
341 		.cap_bit = HNAE3_DEV_SUPPORT_HW_TX_CSUM_B,
342 	}, {
343 		.name = "support UDP tunnel csum",
344 		.cap_bit = HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
345 	}, {
346 		.name = "support TX push",
347 		.cap_bit = HNAE3_DEV_SUPPORT_TX_PUSH_B,
348 	}, {
349 		.name = "support imp-controlled PHY",
350 		.cap_bit = HNAE3_DEV_SUPPORT_PHY_IMP_B,
351 	}, {
352 		.name = "support imp-controlled RAS",
353 		.cap_bit = HNAE3_DEV_SUPPORT_RAS_IMP_B,
354 	}, {
355 		.name = "support rxd advanced layout",
356 		.cap_bit = HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
357 	}, {
358 		.name = "support port vlan bypass",
359 		.cap_bit = HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
360 	}, {
361 		.name = "support modify vlan filter state",
362 		.cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
363 	}, {
364 		.name = "support FEC statistics",
365 		.cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B,
366 	}, {
367 		.name = "support lane num",
368 		.cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B,
369 	}, {
370 		.name = "support wake on lan",
371 		.cap_bit = HNAE3_DEV_SUPPORT_WOL_B,
372 	}, {
373 		.name = "support tm flush",
374 		.cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B,
375 	}, {
376 		.name = "support vf fault detect",
377 		.cap_bit = HNAE3_DEV_SUPPORT_VF_FAULT_B,
378 	}
379 };
380 
381 static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
382 static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
383 static const char * const
384 dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
385 
hns3_get_coal_info(struct hns3_enet_tqp_vector * tqp_vector,struct seq_file * s,int i,bool is_tx)386 static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
387 			       struct seq_file *s, int i, bool is_tx)
388 {
389 	unsigned int gl_offset, ql_offset;
390 	struct hns3_enet_coalesce *coal;
391 	unsigned int reg_val;
392 	struct dim *dim;
393 	bool ql_enable;
394 
395 	if (is_tx) {
396 		coal = &tqp_vector->tx_group.coal;
397 		dim = &tqp_vector->tx_group.dim;
398 		gl_offset = HNS3_VECTOR_GL1_OFFSET;
399 		ql_offset = HNS3_VECTOR_TX_QL_OFFSET;
400 		ql_enable = tqp_vector->tx_group.coal.ql_enable;
401 	} else {
402 		coal = &tqp_vector->rx_group.coal;
403 		dim = &tqp_vector->rx_group.dim;
404 		gl_offset = HNS3_VECTOR_GL0_OFFSET;
405 		ql_offset = HNS3_VECTOR_RX_QL_OFFSET;
406 		ql_enable = tqp_vector->rx_group.coal.ql_enable;
407 	}
408 
409 	seq_printf(s, "%-8d", i);
410 	seq_printf(s, "%-12s", dim->state < ARRAY_SIZE(dim_state_str) ?
411 		   dim_state_str[dim->state] : "unknown");
412 	seq_printf(s, "%-12u", dim->profile_ix);
413 	seq_printf(s, "%-10s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
414 		   dim_cqe_mode_str[dim->mode] : "unknown");
415 	seq_printf(s, "%-12s", dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
416 		   dim_tune_stat_str[dim->tune_state] : "unknown");
417 	seq_printf(s, "%-12u%-13u%-7u%-7u%-7u", dim->steps_left,
418 		   dim->steps_right, dim->tired, coal->int_gl, coal->int_ql);
419 	reg_val = readl(tqp_vector->mask_addr + gl_offset) &
420 		  HNS3_VECTOR_GL_MASK;
421 	seq_printf(s, "%-7u", reg_val);
422 	if (ql_enable) {
423 		reg_val = readl(tqp_vector->mask_addr + ql_offset) &
424 			  HNS3_VECTOR_QL_MASK;
425 		seq_printf(s, "%u\n", reg_val);
426 	} else {
427 		seq_puts(s, "NA\n");
428 	}
429 }
430 
hns3_dump_coal_info(struct seq_file * s,bool is_tx)431 static void hns3_dump_coal_info(struct seq_file *s, bool is_tx)
432 {
433 	struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
434 	struct hns3_enet_tqp_vector *tqp_vector;
435 	struct hns3_nic_priv *priv = h->priv;
436 	unsigned int i;
437 
438 	seq_printf(s, "%s interrupt coalesce info:\n", is_tx ? "tx" : "rx");
439 
440 	seq_puts(s, "VEC_ID  ALGO_STATE  PROFILE_ID  CQE_MODE  TUNE_STATE  ");
441 	seq_puts(s, "STEPS_LEFT  STEPS_RIGHT  TIRED  SW_GL  SW_QL  ");
442 	seq_puts(s, "HW_GL  HW_QL\n");
443 
444 	for (i = 0; i < priv->vector_num; i++) {
445 		tqp_vector = &priv->tqp_vector[i];
446 		hns3_get_coal_info(tqp_vector, s, i, is_tx);
447 	}
448 }
449 
hns3_dbg_coal_info(struct seq_file * s,void * data)450 static int hns3_dbg_coal_info(struct seq_file *s, void *data)
451 {
452 	hns3_dump_coal_info(s, true);
453 	seq_puts(s, "\n");
454 	hns3_dump_coal_info(s, false);
455 
456 	return 0;
457 }
458 
hns3_dump_rx_queue_info(struct hns3_enet_ring * ring,struct seq_file * s,u32 index)459 static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
460 				    struct seq_file *s, u32 index)
461 {
462 	struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
463 	void __iomem *base = ring->tqp->io_base;
464 	u32 base_add_l, base_add_h;
465 
466 	seq_printf(s, "%-10u", index);
467 	seq_printf(s, "%-8u",
468 		   readl_relaxed(base + HNS3_RING_RX_RING_BD_NUM_REG));
469 	seq_printf(s, "%-8u",
470 		   readl_relaxed(base + HNS3_RING_RX_RING_BD_LEN_REG));
471 	seq_printf(s, "%-6u",
472 		   readl_relaxed(base + HNS3_RING_RX_RING_TAIL_REG));
473 	seq_printf(s, "%-6u",
474 		   readl_relaxed(base + HNS3_RING_RX_RING_HEAD_REG));
475 	seq_printf(s, "%-8u",
476 		   readl_relaxed(base + HNS3_RING_RX_RING_FBDNUM_REG));
477 	seq_printf(s, "%-11u", readl_relaxed(base +
478 		   HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
479 	seq_printf(s, "%-11u", ring->rx_copybreak);
480 	seq_printf(s, "%-9s",
481 		   str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
482 
483 	if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
484 		seq_printf(s, "%-12s", str_on_off(readl_relaxed(base +
485 						  HNS3_RING_RX_EN_REG)));
486 	else
487 		seq_printf(s, "%-12s", "NA");
488 
489 	base_add_h = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_H_REG);
490 	base_add_l = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_L_REG);
491 	seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
492 }
493 
hns3_dbg_rx_queue_info(struct seq_file * s,void * data)494 static int hns3_dbg_rx_queue_info(struct seq_file *s, void *data)
495 {
496 	struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
497 	struct hns3_nic_priv *priv = h->priv;
498 	struct hns3_enet_ring *ring;
499 	u32 i;
500 
501 	if (!priv->ring) {
502 		dev_err(&h->pdev->dev, "priv->ring is NULL\n");
503 		return -EFAULT;
504 	}
505 
506 	seq_puts(s, "QUEUE_ID  BD_NUM  BD_LEN  TAIL  HEAD  FBDNUM  ");
507 	seq_puts(s, "PKTNUM     COPYBREAK  RING_EN  RX_RING_EN  BASE_ADDR\n");
508 
509 	for (i = 0; i < h->kinfo.num_tqps; i++) {
510 		/* Each cycle needs to determine whether the instance is reset,
511 		 * to prevent reference to invalid memory. And need to ensure
512 		 * that the following code is executed within 100ms.
513 		 */
514 		if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
515 		    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
516 			return -EPERM;
517 
518 		ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
519 		hns3_dump_rx_queue_info(ring, s, i);
520 	}
521 
522 	return 0;
523 }
524 
hns3_dump_tx_queue_info(struct hns3_enet_ring * ring,struct seq_file * s,u32 index)525 static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
526 				    struct seq_file *s, u32 index)
527 {
528 	struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
529 	void __iomem *base = ring->tqp->io_base;
530 	u32 base_add_l, base_add_h;
531 
532 	seq_printf(s, "%-10u", index);
533 	seq_printf(s, "%-8u",
534 		   readl_relaxed(base + HNS3_RING_TX_RING_BD_NUM_REG));
535 	seq_printf(s, "%-4u", readl_relaxed(base + HNS3_RING_TX_RING_TC_REG));
536 	seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_TAIL_REG));
537 	seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_HEAD_REG));
538 	seq_printf(s, "%-8u",
539 		   readl_relaxed(base + HNS3_RING_TX_RING_FBDNUM_REG));
540 	seq_printf(s, "%-8u",
541 		   readl_relaxed(base + HNS3_RING_TX_RING_OFFSET_REG));
542 	seq_printf(s, "%-11u",
543 		   readl_relaxed(base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
544 	seq_printf(s, "%-9s",
545 		   str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
546 
547 	if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
548 		seq_printf(s, "%-12s",
549 			   str_on_off(readl_relaxed(base +
550 						    HNS3_RING_TX_EN_REG)));
551 	else
552 		seq_printf(s, "%-12s", "NA");
553 
554 	base_add_h = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_H_REG);
555 	base_add_l = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_L_REG);
556 	seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
557 }
558 
hns3_dbg_tx_queue_info(struct seq_file * s,void * data)559 static int hns3_dbg_tx_queue_info(struct seq_file *s, void *data)
560 {
561 	struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
562 	struct hns3_nic_priv *priv = h->priv;
563 	struct hns3_enet_ring *ring;
564 	u32 i;
565 
566 	if (!priv->ring) {
567 		dev_err(&h->pdev->dev, "priv->ring is NULL\n");
568 		return -EFAULT;
569 	}
570 
571 	seq_puts(s, "QUEUE_ID  BD_NUM  TC  TAIL  HEAD  FBDNUM  OFFSET  ");
572 	seq_puts(s, "PKTNUM     RING_EN  TX_RING_EN  BASE_ADDR\n");
573 
574 	for (i = 0; i < h->kinfo.num_tqps; i++) {
575 		/* Each cycle needs to determine whether the instance is reset,
576 		 * to prevent reference to invalid memory. And need to ensure
577 		 * that the following code is executed within 100ms.
578 		 */
579 		if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
580 		    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
581 			return -EPERM;
582 
583 		ring = &priv->ring[i];
584 		hns3_dump_tx_queue_info(ring, s, i);
585 	}
586 
587 	return 0;
588 }
589 
hns3_dbg_queue_map(struct seq_file * s,void * data)590 static int hns3_dbg_queue_map(struct seq_file *s, void *data)
591 {
592 	struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
593 	struct hns3_nic_priv *priv = h->priv;
594 	u32 i;
595 
596 	if (!h->ae_algo->ops->get_global_queue_id)
597 		return -EOPNOTSUPP;
598 
599 	seq_puts(s, "local_queue_id  global_queue_id  vector_id\n");
600 
601 	for (i = 0; i < h->kinfo.num_tqps; i++) {
602 		if (!priv->ring || !priv->ring[i].tqp_vector)
603 			continue;
604 		seq_printf(s, "%-16u%-17u%d\n", i,
605 			   h->ae_algo->ops->get_global_queue_id(h, i),
606 			   priv->ring[i].tqp_vector->vector_irq);
607 	}
608 
609 	return 0;
610 }
611 
hns3_dump_rx_bd_info(struct hns3_nic_priv * priv,struct hns3_desc * desc,struct seq_file * s,int idx)612 static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
613 				 struct hns3_desc *desc, struct seq_file *s,
614 				 int idx)
615 {
616 	seq_printf(s, "%-9d%#-11x%-10u%-8u%#-12x%-7u%-10u%-17u%-13u%#-14x",
617 		   idx, le32_to_cpu(desc->rx.l234_info),
618 		   le16_to_cpu(desc->rx.pkt_len), le16_to_cpu(desc->rx.size),
619 		   le32_to_cpu(desc->rx.rss_hash), le16_to_cpu(desc->rx.fd_id),
620 		   le16_to_cpu(desc->rx.vlan_tag),
621 		   le16_to_cpu(desc->rx.o_dm_vlan_id_fb),
622 		   le16_to_cpu(desc->rx.ot_vlan_tag),
623 		   le32_to_cpu(desc->rx.bd_base_info));
624 
625 	if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
626 		u32 ol_info = le32_to_cpu(desc->rx.ol_info);
627 
628 		seq_printf(s, "%-7lu%-9u\n",
629 			   hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
630 					   HNS3_RXD_PTYPE_S),
631 			   le16_to_cpu(desc->csum));
632 	} else {
633 		seq_puts(s, "NA     NA\n");
634 	}
635 }
636 
hns3_dbg_rx_bd_info(struct seq_file * s,void * private)637 static int hns3_dbg_rx_bd_info(struct seq_file *s, void *private)
638 {
639 	struct hns3_dbg_data *data = s->private;
640 	struct hnae3_handle *h = data->handle;
641 	struct hns3_nic_priv *priv = h->priv;
642 	struct hns3_enet_ring *ring;
643 	struct hns3_desc *desc;
644 	unsigned int i;
645 
646 	if (data->qid >= h->kinfo.num_tqps) {
647 		dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
648 		return -EINVAL;
649 	}
650 
651 	seq_printf(s, "Queue %u rx bd info:\n", data->qid);
652 	seq_puts(s, "BD_IDX   L234_INFO  PKT_LEN   SIZE    ");
653 	seq_puts(s, "RSS_HASH    FD_ID  VLAN_TAG  O_DM_VLAN_ID_FB  ");
654 	seq_puts(s, "OT_VLAN_TAG  BD_BASE_INFO  PTYPE  HW_CSUM\n");
655 
656 	ring = &priv->ring[data->qid + data->handle->kinfo.num_tqps];
657 	for (i = 0; i < ring->desc_num; i++) {
658 		desc = &ring->desc[i];
659 
660 		hns3_dump_rx_bd_info(priv, desc, s, i);
661 	}
662 
663 	return 0;
664 }
665 
hns3_dump_tx_bd_info(struct hns3_desc * desc,struct seq_file * s,int idx)666 static void hns3_dump_tx_bd_info(struct hns3_desc *desc, struct seq_file *s,
667 				 int idx)
668 {
669 	seq_printf(s, "%-8d%#-20llx%-10u%-6u%#-15x%-14u%-7u%-16u%#-14x%#-14x%-11u\n",
670 		   idx, le64_to_cpu(desc->addr),
671 		   le16_to_cpu(desc->tx.vlan_tag),
672 		   le16_to_cpu(desc->tx.send_size),
673 		   le32_to_cpu(desc->tx.type_cs_vlan_tso_len),
674 		   le16_to_cpu(desc->tx.outer_vlan_tag),
675 		   le16_to_cpu(desc->tx.tv),
676 		   le32_to_cpu(desc->tx.ol_type_vlan_len_msec),
677 		   le32_to_cpu(desc->tx.paylen_ol4cs),
678 		   le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri),
679 		   le16_to_cpu(desc->tx.mss_hw_csum));
680 }
681 
hns3_dbg_tx_bd_info(struct seq_file * s,void * private)682 static int hns3_dbg_tx_bd_info(struct seq_file *s, void *private)
683 {
684 	struct hns3_dbg_data *data = s->private;
685 	struct hnae3_handle *h = data->handle;
686 	struct hns3_nic_priv *priv = h->priv;
687 	struct hns3_enet_ring *ring;
688 	struct hns3_desc *desc;
689 	unsigned int i;
690 
691 	if (data->qid >= h->kinfo.num_tqps) {
692 		dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
693 		return -EINVAL;
694 	}
695 
696 	seq_printf(s, "Queue %u tx bd info:\n", data->qid);
697 	seq_puts(s, "BD_IDX  ADDRESS             VLAN_TAG  SIZE  ");
698 	seq_puts(s, "T_CS_VLAN_TSO  OT_VLAN_TAG   TV     OLT_VLAN_LEN  ");
699 	seq_puts(s, "PAYLEN_OL4CS  BD_FE_SC_VLD   MSS_HW_CSUM\n");
700 
701 	ring = &priv->ring[data->qid];
702 	for (i = 0; i < ring->desc_num; i++) {
703 		desc = &ring->desc[i];
704 
705 		hns3_dump_tx_bd_info(desc, s, i);
706 	}
707 
708 	return 0;
709 }
710 
hns3_dbg_dev_caps(struct hnae3_handle * h,struct seq_file * s)711 static void hns3_dbg_dev_caps(struct hnae3_handle *h, struct seq_file *s)
712 {
713 	struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
714 	unsigned long *caps = ae_dev->caps;
715 	u32 i, state;
716 
717 	seq_puts(s, "dev capability:\n");
718 
719 	for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
720 		state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
721 		seq_printf(s, "%s: %s\n", hns3_dbg_cap[i].name,
722 			   str_yes_no(state));
723 	}
724 
725 	seq_puts(s, "\n");
726 }
727 
hns3_dbg_dev_specs(struct hnae3_handle * h,struct seq_file * s)728 static void hns3_dbg_dev_specs(struct hnae3_handle *h, struct seq_file *s)
729 {
730 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
731 	struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
732 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
733 	struct net_device *dev = kinfo->netdev;
734 
735 	seq_puts(s, "dev_spec:\n");
736 	seq_printf(s, "MAC entry num: %u\n", dev_specs->mac_entry_num);
737 	seq_printf(s, "MNG entry num: %u\n", dev_specs->mng_entry_num);
738 	seq_printf(s, "MAX non tso bd num: %u\n",
739 		   dev_specs->max_non_tso_bd_num);
740 	seq_printf(s, "RSS ind tbl size: %u\n", dev_specs->rss_ind_tbl_size);
741 	seq_printf(s, "RSS key size: %u\n", dev_specs->rss_key_size);
742 	seq_printf(s, "RSS size: %u\n", kinfo->rss_size);
743 	seq_printf(s, "Allocated RSS size: %u\n", kinfo->req_rss_size);
744 	seq_printf(s, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
745 	seq_printf(s, "RX buffer length: %u\n", kinfo->rx_buf_len);
746 	seq_printf(s, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
747 	seq_printf(s, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
748 	seq_printf(s, "Total number of enabled TCs: %u\n",
749 		   kinfo->tc_info.num_tc);
750 	seq_printf(s, "MAX INT QL: %u\n", dev_specs->int_ql_max);
751 	seq_printf(s, "MAX INT GL: %u\n", dev_specs->max_int_gl);
752 	seq_printf(s, "MAX TM RATE: %u\n", dev_specs->max_tm_rate);
753 	seq_printf(s, "MAX QSET number: %u\n", dev_specs->max_qset_num);
754 	seq_printf(s, "umv size: %u\n", dev_specs->umv_size);
755 	seq_printf(s, "mc mac size: %u\n", dev_specs->mc_mac_size);
756 	seq_printf(s, "MAC statistics number: %u\n", dev_specs->mac_stats_num);
757 	seq_printf(s, "TX timeout threshold: %d seconds\n",
758 		   dev->watchdog_timeo / HZ);
759 	seq_printf(s, "mac tunnel number: %u\n", dev_specs->tnl_num);
760 	seq_printf(s, "Hilink Version: %u\n", dev_specs->hilink_version);
761 }
762 
hns3_dbg_dev_info(struct seq_file * s,void * data)763 static int hns3_dbg_dev_info(struct seq_file *s, void *data)
764 {
765 	struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
766 
767 	hns3_dbg_dev_caps(h, s);
768 	hns3_dbg_dev_specs(h, s);
769 
770 	return 0;
771 }
772 
hns3_dump_page_pool_info(struct hns3_enet_ring * ring,struct seq_file * s,u32 index)773 static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
774 				     struct seq_file *s, u32 index)
775 {
776 	seq_printf(s, "%-10u%-14u%-14d%-21u%-7u%-9d%uK\n",
777 		   index,
778 		   READ_ONCE(ring->page_pool->pages_state_hold_cnt),
779 		   atomic_read(&ring->page_pool->pages_state_release_cnt),
780 		   ring->page_pool->p.pool_size,
781 		   ring->page_pool->p.order,
782 		   ring->page_pool->p.nid,
783 		   ring->page_pool->p.max_len / 1024);
784 }
785 
hns3_dbg_page_pool_info(struct seq_file * s,void * data)786 static int hns3_dbg_page_pool_info(struct seq_file *s, void *data)
787 {
788 	struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
789 	struct hns3_nic_priv *priv = h->priv;
790 	struct hns3_enet_ring *ring;
791 	u32 i;
792 
793 	if (!priv->ring) {
794 		dev_err(&h->pdev->dev, "priv->ring is NULL\n");
795 		return -EFAULT;
796 	}
797 
798 	if (!priv->ring[h->kinfo.num_tqps].page_pool) {
799 		dev_err(&h->pdev->dev, "page pool is not initialized\n");
800 		return -EFAULT;
801 	}
802 
803 	seq_puts(s, "QUEUE_ID  ALLOCATE_CNT  FREE_CNT      ");
804 	seq_puts(s, "POOL_SIZE(PAGE_NUM)  ORDER  NUMA_ID  MAX_LEN\n");
805 
806 	for (i = 0; i < h->kinfo.num_tqps; i++) {
807 		if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
808 		    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
809 			return -EPERM;
810 
811 		ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
812 		hns3_dump_page_pool_info(ring, s, i);
813 	}
814 
815 	return 0;
816 }
817 
hns3_dbg_bd_info_show(struct seq_file * s,void * private)818 static int hns3_dbg_bd_info_show(struct seq_file *s, void *private)
819 {
820 	struct hns3_dbg_data *data = s->private;
821 	struct hnae3_handle *h = data->handle;
822 	struct hns3_nic_priv *priv = h->priv;
823 
824 	if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
825 	    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
826 		return -EBUSY;
827 
828 	if (data->cmd == HNAE3_DBG_CMD_TX_BD)
829 		return hns3_dbg_tx_bd_info(s, private);
830 	else if (data->cmd == HNAE3_DBG_CMD_RX_BD)
831 		return hns3_dbg_rx_bd_info(s, private);
832 
833 	return -EOPNOTSUPP;
834 }
835 DEFINE_SHOW_ATTRIBUTE(hns3_dbg_bd_info);
836 
hns3_dbg_bd_file_init(struct hnae3_handle * handle,u32 cmd)837 static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
838 {
839 	struct hns3_dbg_data *data;
840 	struct dentry *entry_dir;
841 	u16 max_queue_num;
842 	unsigned int i;
843 
844 	entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
845 	max_queue_num = hns3_get_max_available_channels(handle);
846 	data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data),
847 			    GFP_KERNEL);
848 	if (!data)
849 		return -ENOMEM;
850 
851 	for (i = 0; i < max_queue_num; i++) {
852 		char name[HNS3_DBG_FILE_NAME_LEN];
853 
854 		data[i].handle = handle;
855 		data[i].cmd = hns3_dbg_cmd[cmd].cmd;
856 		data[i].qid = i;
857 		sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
858 		debugfs_create_file(name, 0400, entry_dir, &data[i],
859 				    &hns3_dbg_bd_info_fops);
860 	}
861 
862 	return 0;
863 }
864 
hns3_dbg_common_init_t1(struct hnae3_handle * handle,u32 cmd)865 static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd)
866 {
867 	struct device *dev = &handle->pdev->dev;
868 	struct dentry *entry_dir;
869 	read_func func = NULL;
870 
871 	switch (hns3_dbg_cmd[cmd].cmd) {
872 	case HNAE3_DBG_CMD_TX_QUEUE_INFO:
873 		func = hns3_dbg_tx_queue_info;
874 		break;
875 	case HNAE3_DBG_CMD_RX_QUEUE_INFO:
876 		func = hns3_dbg_rx_queue_info;
877 		break;
878 	case HNAE3_DBG_CMD_QUEUE_MAP:
879 		func = hns3_dbg_queue_map;
880 		break;
881 	case HNAE3_DBG_CMD_PAGE_POOL_INFO:
882 		func = hns3_dbg_page_pool_info;
883 		break;
884 	case HNAE3_DBG_CMD_COAL_INFO:
885 		func = hns3_dbg_coal_info;
886 		break;
887 	case HNAE3_DBG_CMD_DEV_INFO:
888 		func = hns3_dbg_dev_info;
889 		break;
890 	default:
891 		return -EINVAL;
892 	}
893 
894 	entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
895 	debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
896 				    func);
897 
898 	return 0;
899 }
900 
hns3_dbg_common_init_t2(struct hnae3_handle * handle,u32 cmd)901 static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd)
902 {
903 	const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
904 	struct device *dev = &handle->pdev->dev;
905 	struct dentry *entry_dir;
906 	read_func func;
907 	int ret;
908 
909 	if (!ops->dbg_get_read_func)
910 		return 0;
911 
912 	ret = ops->dbg_get_read_func(handle, hns3_dbg_cmd[cmd].cmd, &func);
913 	if (ret)
914 		return ret;
915 
916 	entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
917 	debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
918 				    func);
919 
920 	return 0;
921 }
922 
hns3_dbg_init(struct hnae3_handle * handle)923 int hns3_dbg_init(struct hnae3_handle *handle)
924 {
925 	struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
926 	const char *name = pci_name(handle->pdev);
927 	int ret;
928 	u32 i;
929 
930 	hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
931 				debugfs_create_dir(name, hns3_dbgfs_root);
932 	handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
933 
934 	for (i = 0; i < HNS3_DBG_DENTRY_COMMON; i++)
935 		hns3_dbg_dentry[i].dentry =
936 			debugfs_create_dir(hns3_dbg_dentry[i].name,
937 					   handle->hnae3_dbgfs);
938 
939 	for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
940 		if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
941 		     ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
942 		    (hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_PTP_INFO &&
943 		     !test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps)))
944 			continue;
945 
946 		if (!hns3_dbg_cmd[i].init) {
947 			dev_err(&handle->pdev->dev,
948 				"cmd %s lack of init func\n",
949 				hns3_dbg_cmd[i].name);
950 			ret = -EINVAL;
951 			goto out;
952 		}
953 
954 		ret = hns3_dbg_cmd[i].init(handle, i);
955 		if (ret) {
956 			dev_err(&handle->pdev->dev, "failed to init cmd %s\n",
957 				hns3_dbg_cmd[i].name);
958 			goto out;
959 		}
960 	}
961 
962 	return 0;
963 
964 out:
965 	debugfs_remove_recursive(handle->hnae3_dbgfs);
966 	handle->hnae3_dbgfs = NULL;
967 	return ret;
968 }
969 
hns3_dbg_uninit(struct hnae3_handle * handle)970 void hns3_dbg_uninit(struct hnae3_handle *handle)
971 {
972 	debugfs_remove_recursive(handle->hnae3_dbgfs);
973 	handle->hnae3_dbgfs = NULL;
974 }
975 
hns3_dbg_register_debugfs(const char * debugfs_dir_name)976 void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
977 {
978 	hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
979 }
980 
hns3_dbg_unregister_debugfs(void)981 void hns3_dbg_unregister_debugfs(void)
982 {
983 	debugfs_remove_recursive(hns3_dbgfs_root);
984 	hns3_dbgfs_root = NULL;
985 }
986