Lines Matching +full:cmd +full:- +full:max +full:- +full:name

1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
15 .name = "tm"
18 .name = "tx_bd_info"
21 .name = "rx_bd_info"
24 .name = "mac_list"
27 .name = "reg"
30 .name = "queue"
33 .name = "fd"
37 .name = "common"
41 static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
42 static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
46 .name = "tm_nodes",
47 .cmd = HNAE3_DBG_CMD_TM_NODES,
53 .name = "tm_priority",
54 .cmd = HNAE3_DBG_CMD_TM_PRI,
60 .name = "tm_qset",
61 .cmd = HNAE3_DBG_CMD_TM_QSET,
67 .name = "tm_map",
68 .cmd = HNAE3_DBG_CMD_TM_MAP,
74 .name = "tm_pg",
75 .cmd = HNAE3_DBG_CMD_TM_PG,
81 .name = "tm_port",
82 .cmd = HNAE3_DBG_CMD_TM_PORT,
88 .name = "tc_sch_info",
89 .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
95 .name = "qos_pause_cfg",
96 .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
102 .name = "qos_pri_map",
103 .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
109 .name = "qos_dscp_map",
110 .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
116 .name = "qos_buf_cfg",
117 .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
123 .name = "dev_info",
124 .cmd = HNAE3_DBG_CMD_DEV_INFO,
130 .name = "tx_bd_queue",
131 .cmd = HNAE3_DBG_CMD_TX_BD,
137 .name = "rx_bd_queue",
138 .cmd = HNAE3_DBG_CMD_RX_BD,
144 .name = "uc",
145 .cmd = HNAE3_DBG_CMD_MAC_UC,
151 .name = "mc",
152 .cmd = HNAE3_DBG_CMD_MAC_MC,
158 .name = "mng_tbl",
159 .cmd = HNAE3_DBG_CMD_MNG_TBL,
165 .name = "loopback",
166 .cmd = HNAE3_DBG_CMD_LOOPBACK,
172 .name = "interrupt_info",
173 .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
179 .name = "reset_info",
180 .cmd = HNAE3_DBG_CMD_RESET_INFO,
186 .name = "imp_info",
187 .cmd = HNAE3_DBG_CMD_IMP_INFO,
193 .name = "ncl_config",
194 .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
200 .name = "mac_tnl_status",
201 .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
207 .name = "bios_common",
208 .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
214 .name = "ssu",
215 .cmd = HNAE3_DBG_CMD_REG_SSU,
221 .name = "igu_egu",
222 .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
228 .name = "rpu",
229 .cmd = HNAE3_DBG_CMD_REG_RPU,
235 .name = "ncsi",
236 .cmd = HNAE3_DBG_CMD_REG_NCSI,
242 .name = "rtc",
243 .cmd = HNAE3_DBG_CMD_REG_RTC,
249 .name = "ppp",
250 .cmd = HNAE3_DBG_CMD_REG_PPP,
256 .name = "rcb",
257 .cmd = HNAE3_DBG_CMD_REG_RCB,
263 .name = "tqp",
264 .cmd = HNAE3_DBG_CMD_REG_TQP,
270 .name = "mac",
271 .cmd = HNAE3_DBG_CMD_REG_MAC,
277 .name = "dcb",
278 .cmd = HNAE3_DBG_CMD_REG_DCB,
284 .name = "queue_map",
285 .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
291 .name = "rx_queue_info",
292 .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
298 .name = "tx_queue_info",
299 .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
305 .name = "fd_tcam",
306 .cmd = HNAE3_DBG_CMD_FD_TCAM,
312 .name = "service_task_info",
313 .cmd = HNAE3_DBG_CMD_SERV_INFO,
319 .name = "vlan_config",
320 .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
326 .name = "ptp_info",
327 .cmd = HNAE3_DBG_CMD_PTP_INFO,
333 .name = "fd_counter",
334 .cmd = HNAE3_DBG_CMD_FD_COUNTER,
340 .name = "umv_info",
341 .cmd = HNAE3_DBG_CMD_UMV_INFO,
347 .name = "page_pool_info",
348 .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
354 .name = "coalesce_info",
355 .cmd = HNAE3_DBG_CMD_COAL_INFO,
364 .name = "support FD",
367 .name = "support GRO",
370 .name = "support FEC",
373 .name = "support UDP GSO",
376 .name = "support PTP",
379 .name = "support INT QL",
382 .name = "support HW TX csum",
385 .name = "support UDP tunnel csum",
388 .name = "support TX push",
391 .name = "support imp-controlled PHY",
394 .name = "support imp-controlled RAS",
397 .name = "support rxd advanced layout",
400 .name = "support port vlan bypass",
403 .name = "support modify vlan filter state",
406 .name = "support FEC statistics",
409 .name = "support lane num",
412 .name = "support wake on lan",
415 .name = "support tm flush",
418 .name = "support vf fault detect",
460 len -= HNS3_DBG_LINE_END_LEN; in hns3_dbg_fill_content()
463 item_len = strlen(items[i].name) + items[i].interval; in hns3_dbg_fill_content()
472 memcpy(pos, items[i].name, strlen(items[i].name)); in hns3_dbg_fill_content()
475 len -= item_len; in hns3_dbg_fill_content()
492 coal = &tqp_vector->tx_group.coal; in hns3_get_coal_info()
493 dim = &tqp_vector->tx_group.dim; in hns3_get_coal_info()
496 ql_enable = tqp_vector->tx_group.coal.ql_enable; in hns3_get_coal_info()
498 coal = &tqp_vector->rx_group.coal; in hns3_get_coal_info()
499 dim = &tqp_vector->rx_group.dim; in hns3_get_coal_info()
502 ql_enable = tqp_vector->rx_group.coal.ql_enable; in hns3_get_coal_info()
506 sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ? in hns3_get_coal_info()
507 dim_state_str[dim->state] : "unknown"); in hns3_get_coal_info()
508 sprintf(result[j++], "%u", dim->profile_ix); in hns3_get_coal_info()
509 sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ? in hns3_get_coal_info()
510 dim_cqe_mode_str[dim->mode] : "unknown"); in hns3_get_coal_info()
512 dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ? in hns3_get_coal_info()
513 dim_tune_stat_str[dim->tune_state] : "unknown"); in hns3_get_coal_info()
514 sprintf(result[j++], "%u", dim->steps_left); in hns3_get_coal_info()
515 sprintf(result[j++], "%u", dim->steps_right); in hns3_get_coal_info()
516 sprintf(result[j++], "%u", dim->tired); in hns3_get_coal_info()
517 sprintf(result[j++], "%u", coal->int_gl); in hns3_get_coal_info()
518 sprintf(result[j++], "%u", coal->int_ql); in hns3_get_coal_info()
519 reg_val = readl(tqp_vector->mask_addr + gl_offset) & in hns3_get_coal_info()
523 reg_val = readl(tqp_vector->mask_addr + ql_offset) & in hns3_get_coal_info()
537 struct hns3_nic_priv *priv = h->priv; in hns3_dump_coal_info()
544 *pos += scnprintf(buf + *pos, len - *pos, in hns3_dump_coal_info()
549 *pos += scnprintf(buf + *pos, len - *pos, "%s", content); in hns3_dump_coal_info()
551 for (i = 0; i < priv->vector_num; i++) { in hns3_dump_coal_info()
552 tqp_vector = &priv->tqp_vector[i]; in hns3_dump_coal_info()
557 *pos += scnprintf(buf + *pos, len - *pos, "%s", content); in hns3_dump_coal_info()
566 pos += scnprintf(buf + pos, len - pos, "\n"); in hns3_dbg_coal_info()
586 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_dbg_tx_spare_info()
592 *pos += scnprintf(buf + *pos, len - *pos, in hns3_dbg_tx_spare_info()
600 *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n"); in hns3_dbg_tx_spare_info()
603 *pos += scnprintf(buf + *pos, len - *pos, "%s", content); in hns3_dbg_tx_spare_info()
608 sprintf(result[j++], "%u", ring->tx_copybreak); in hns3_dbg_tx_spare_info()
609 sprintf(result[j++], "%u", tx_spare->len); in hns3_dbg_tx_spare_info()
610 sprintf(result[j++], "%u", tx_spare->next_to_use); in hns3_dbg_tx_spare_info()
611 sprintf(result[j++], "%u", tx_spare->next_to_clean); in hns3_dbg_tx_spare_info()
612 sprintf(result[j++], "%u", tx_spare->last_to_clean); in hns3_dbg_tx_spare_info()
613 sprintf(result[j++], "%pad", &tx_spare->dma); in hns3_dbg_tx_spare_info()
618 *pos += scnprintf(buf + *pos, len - *pos, "%s", content); in hns3_dbg_tx_spare_info()
645 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
648 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
651 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
654 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
657 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
660 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
662 sprintf(result[j++], "%u", ring->rx_copybreak); in hns3_dump_rx_queue_info()
664 sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
668 sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
673 base_add_h = readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
675 base_add_l = readl_relaxed(ring->tqp->io_base + in hns3_dump_rx_queue_info()
684 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); in hns3_dbg_rx_queue_info()
686 struct hns3_nic_priv *priv = h->priv; in hns3_dbg_rx_queue_info()
692 if (!priv->ring) { in hns3_dbg_rx_queue_info()
693 dev_err(&h->pdev->dev, "priv->ring is NULL\n"); in hns3_dbg_rx_queue_info()
694 return -EFAULT; in hns3_dbg_rx_queue_info()
702 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_rx_queue_info()
703 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_dbg_rx_queue_info()
708 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || in hns3_dbg_rx_queue_info()
709 test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) in hns3_dbg_rx_queue_info()
710 return -EPERM; in hns3_dbg_rx_queue_info()
712 ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; in hns3_dbg_rx_queue_info()
718 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_rx_queue_info()
746 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
749 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
752 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
755 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
758 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
761 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
764 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
767 sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
771 sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
776 base_add_h = readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
778 base_add_l = readl_relaxed(ring->tqp->io_base + in hns3_dump_tx_queue_info()
787 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); in hns3_dbg_tx_queue_info()
789 struct hns3_nic_priv *priv = h->priv; in hns3_dbg_tx_queue_info()
795 if (!priv->ring) { in hns3_dbg_tx_queue_info()
796 dev_err(&h->pdev->dev, "priv->ring is NULL\n"); in hns3_dbg_tx_queue_info()
797 return -EFAULT; in hns3_dbg_tx_queue_info()
805 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_tx_queue_info()
807 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_dbg_tx_queue_info()
812 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || in hns3_dbg_tx_queue_info()
813 test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) in hns3_dbg_tx_queue_info()
814 return -EPERM; in hns3_dbg_tx_queue_info()
816 ring = &priv->ring[i]; in hns3_dbg_tx_queue_info()
822 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_tx_queue_info()
825 hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos); in hns3_dbg_tx_queue_info()
840 struct hns3_nic_priv *priv = h->priv; in hns3_dbg_queue_map()
846 if (!h->ae_algo->ops->get_global_queue_id) in hns3_dbg_queue_map()
847 return -EOPNOTSUPP; in hns3_dbg_queue_map()
854 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_queue_map()
855 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_dbg_queue_map()
856 if (!priv->ring || !priv->ring[i].tqp_vector) in hns3_dbg_queue_map()
861 h->ae_algo->ops->get_global_queue_id(h, i)); in hns3_dbg_queue_map()
863 priv->ring[i].tqp_vector->vector_irq); in hns3_dbg_queue_map()
867 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_queue_map()
894 sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info)); in hns3_dump_rx_bd_info()
895 sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len)); in hns3_dump_rx_bd_info()
896 sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size)); in hns3_dump_rx_bd_info()
897 sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash)); in hns3_dump_rx_bd_info()
898 sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id)); in hns3_dump_rx_bd_info()
899 sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag)); in hns3_dump_rx_bd_info()
900 sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); in hns3_dump_rx_bd_info()
901 sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag)); in hns3_dump_rx_bd_info()
902 sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info)); in hns3_dump_rx_bd_info()
903 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { in hns3_dump_rx_bd_info()
904 u32 ol_info = le32_to_cpu(desc->rx.ol_info); in hns3_dump_rx_bd_info()
909 sprintf(result[j++], "%7u", le16_to_cpu(desc->csum)); in hns3_dump_rx_bd_info()
919 struct hns3_nic_priv *priv = d->handle->priv; in hns3_dbg_rx_bd_info()
927 if (d->qid >= d->handle->kinfo.num_tqps) { in hns3_dbg_rx_bd_info()
928 dev_err(&d->handle->pdev->dev, in hns3_dbg_rx_bd_info()
929 "queue%u is not in use\n", d->qid); in hns3_dbg_rx_bd_info()
930 return -EINVAL; in hns3_dbg_rx_bd_info()
936 pos += scnprintf(buf + pos, len - pos, in hns3_dbg_rx_bd_info()
937 "Queue %u rx bd info:\n", d->qid); in hns3_dbg_rx_bd_info()
940 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_rx_bd_info()
942 ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps]; in hns3_dbg_rx_bd_info()
943 for (i = 0; i < ring->desc_num; i++) { in hns3_dbg_rx_bd_info()
944 desc = &ring->desc[i]; in hns3_dbg_rx_bd_info()
950 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_rx_bd_info()
975 sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr)); in hns3_dump_tx_bd_info()
976 sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag)); in hns3_dump_tx_bd_info()
977 sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size)); in hns3_dump_tx_bd_info()
979 le32_to_cpu(desc->tx.type_cs_vlan_tso_len)); in hns3_dump_tx_bd_info()
980 sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag)); in hns3_dump_tx_bd_info()
981 sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv)); in hns3_dump_tx_bd_info()
983 le32_to_cpu(desc->tx.ol_type_vlan_len_msec)); in hns3_dump_tx_bd_info()
984 sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs)); in hns3_dump_tx_bd_info()
985 sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri)); in hns3_dump_tx_bd_info()
986 sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum)); in hns3_dump_tx_bd_info()
992 struct hns3_nic_priv *priv = d->handle->priv; in hns3_dbg_tx_bd_info()
1000 if (d->qid >= d->handle->kinfo.num_tqps) { in hns3_dbg_tx_bd_info()
1001 dev_err(&d->handle->pdev->dev, in hns3_dbg_tx_bd_info()
1002 "queue%u is not in use\n", d->qid); in hns3_dbg_tx_bd_info()
1003 return -EINVAL; in hns3_dbg_tx_bd_info()
1009 pos += scnprintf(buf + pos, len - pos, in hns3_dbg_tx_bd_info()
1010 "Queue %u tx bd info:\n", d->qid); in hns3_dbg_tx_bd_info()
1013 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_tx_bd_info()
1015 ring = &priv->ring[d->qid]; in hns3_dbg_tx_bd_info()
1016 for (i = 0; i < ring->desc_num; i++) { in hns3_dbg_tx_bd_info()
1017 desc = &ring->desc[i]; in hns3_dbg_tx_bd_info()
1023 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_tx_bd_info()
1032 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); in hns3_dbg_dev_caps()
1034 unsigned long *caps = ae_dev->caps; in hns3_dbg_dev_caps()
1037 *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n"); in hns3_dbg_dev_caps()
1041 *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n", in hns3_dbg_dev_caps()
1042 hns3_dbg_cap[i].name, str[state]); in hns3_dbg_dev_caps()
1045 *pos += scnprintf(buf + *pos, len - *pos, "\n"); in hns3_dbg_dev_caps()
1051 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); in hns3_dbg_dev_specs()
1052 struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs; in hns3_dbg_dev_specs()
1053 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_dbg_dev_specs()
1054 struct net_device *dev = kinfo->netdev; in hns3_dbg_dev_specs()
1056 *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n"); in hns3_dbg_dev_specs()
1057 *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n", in hns3_dbg_dev_specs()
1058 dev_specs->mac_entry_num); in hns3_dbg_dev_specs()
1059 *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n", in hns3_dbg_dev_specs()
1060 dev_specs->mng_entry_num); in hns3_dbg_dev_specs()
1061 *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n", in hns3_dbg_dev_specs()
1062 dev_specs->max_non_tso_bd_num); in hns3_dbg_dev_specs()
1063 *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n", in hns3_dbg_dev_specs()
1064 dev_specs->rss_ind_tbl_size); in hns3_dbg_dev_specs()
1065 *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n", in hns3_dbg_dev_specs()
1066 dev_specs->rss_key_size); in hns3_dbg_dev_specs()
1067 *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n", in hns3_dbg_dev_specs()
1068 kinfo->rss_size); in hns3_dbg_dev_specs()
1069 *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n", in hns3_dbg_dev_specs()
1070 kinfo->req_rss_size); in hns3_dbg_dev_specs()
1071 *pos += scnprintf(buf + *pos, len - *pos, in hns3_dbg_dev_specs()
1073 kinfo->num_tqps); in hns3_dbg_dev_specs()
1074 *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n", in hns3_dbg_dev_specs()
1075 kinfo->rx_buf_len); in hns3_dbg_dev_specs()
1076 *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n", in hns3_dbg_dev_specs()
1077 kinfo->num_tx_desc); in hns3_dbg_dev_specs()
1078 *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n", in hns3_dbg_dev_specs()
1079 kinfo->num_rx_desc); in hns3_dbg_dev_specs()
1080 *pos += scnprintf(buf + *pos, len - *pos, in hns3_dbg_dev_specs()
1082 kinfo->tc_info.num_tc); in hns3_dbg_dev_specs()
1083 *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n", in hns3_dbg_dev_specs()
1084 dev_specs->int_ql_max); in hns3_dbg_dev_specs()
1085 *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n", in hns3_dbg_dev_specs()
1086 dev_specs->max_int_gl); in hns3_dbg_dev_specs()
1087 *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n", in hns3_dbg_dev_specs()
1088 dev_specs->max_tm_rate); in hns3_dbg_dev_specs()
1089 *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n", in hns3_dbg_dev_specs()
1090 dev_specs->max_qset_num); in hns3_dbg_dev_specs()
1091 *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n", in hns3_dbg_dev_specs()
1092 dev_specs->umv_size); in hns3_dbg_dev_specs()
1093 *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n", in hns3_dbg_dev_specs()
1094 dev_specs->mc_mac_size); in hns3_dbg_dev_specs()
1095 *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n", in hns3_dbg_dev_specs()
1096 dev_specs->mac_stats_num); in hns3_dbg_dev_specs()
1097 *pos += scnprintf(buf + *pos, len - *pos, in hns3_dbg_dev_specs()
1099 dev->watchdog_timeo / HZ); in hns3_dbg_dev_specs()
1130 READ_ONCE(ring->page_pool->pages_state_hold_cnt)); in hns3_dump_page_pool_info()
1132 atomic_read(&ring->page_pool->pages_state_release_cnt)); in hns3_dump_page_pool_info()
1133 sprintf(result[j++], "%u", ring->page_pool->p.pool_size); in hns3_dump_page_pool_info()
1134 sprintf(result[j++], "%u", ring->page_pool->p.order); in hns3_dump_page_pool_info()
1135 sprintf(result[j++], "%d", ring->page_pool->p.nid); in hns3_dump_page_pool_info()
1136 sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024); in hns3_dump_page_pool_info()
1144 struct hns3_nic_priv *priv = h->priv; in hns3_dbg_page_pool_info()
1150 if (!priv->ring) { in hns3_dbg_page_pool_info()
1151 dev_err(&h->pdev->dev, "priv->ring is NULL\n"); in hns3_dbg_page_pool_info()
1152 return -EFAULT; in hns3_dbg_page_pool_info()
1155 if (!priv->ring[h->kinfo.num_tqps].page_pool) { in hns3_dbg_page_pool_info()
1156 dev_err(&h->pdev->dev, "page pool is not initialized\n"); in hns3_dbg_page_pool_info()
1157 return -EFAULT; in hns3_dbg_page_pool_info()
1165 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_page_pool_info()
1166 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_dbg_page_pool_info()
1167 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || in hns3_dbg_page_pool_info()
1168 test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) in hns3_dbg_page_pool_info()
1169 return -EPERM; in hns3_dbg_page_pool_info()
1170 ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; in hns3_dbg_page_pool_info()
1176 pos += scnprintf(buf + pos, len - pos, "%s", content); in hns3_dbg_page_pool_info()
1187 if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) { in hns3_dbg_get_cmd_index()
1193 dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n", in hns3_dbg_get_cmd_index()
1194 dbg_data->cmd); in hns3_dbg_get_cmd_index()
1195 return -EINVAL; in hns3_dbg_get_cmd_index()
1200 .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
1204 .cmd = HNAE3_DBG_CMD_DEV_INFO,
1208 .cmd = HNAE3_DBG_CMD_TX_BD,
1212 .cmd = HNAE3_DBG_CMD_RX_BD,
1216 .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
1220 .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
1224 .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
1228 .cmd = HNAE3_DBG_CMD_COAL_INFO,
1234 enum hnae3_dbg_cmd cmd, char *buf, int len) in hns3_dbg_read_cmd() argument
1236 const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops; in hns3_dbg_read_cmd()
1241 if (cmd == hns3_dbg_cmd_func[i].cmd) { in hns3_dbg_read_cmd()
1243 if (cmd_func->dbg_dump) in hns3_dbg_read_cmd()
1244 return cmd_func->dbg_dump(dbg_data->handle, buf, in hns3_dbg_read_cmd()
1247 return cmd_func->dbg_dump_bd(dbg_data, buf, in hns3_dbg_read_cmd()
1252 if (!ops->dbg_read_cmd) in hns3_dbg_read_cmd()
1253 return -EOPNOTSUPP; in hns3_dbg_read_cmd()
1255 return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len); in hns3_dbg_read_cmd()
1261 struct hns3_dbg_data *dbg_data = filp->private_data; in hns3_dbg_read()
1262 struct hnae3_handle *handle = dbg_data->handle; in hns3_dbg_read()
1263 struct hns3_nic_priv *priv = handle->priv; in hns3_dbg_read()
1274 mutex_lock(&handle->dbgfs_lock); in hns3_dbg_read()
1275 save_buf = &handle->dbgfs_buf[index]; in hns3_dbg_read()
1277 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || in hns3_dbg_read()
1278 test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { in hns3_dbg_read()
1279 ret = -EBUSY; in hns3_dbg_read()
1288 ret = -ENOMEM; in hns3_dbg_read()
1296 ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, in hns3_dbg_read()
1305 mutex_unlock(&handle->dbgfs_lock); in hns3_dbg_read()
1316 mutex_unlock(&handle->dbgfs_lock); in hns3_dbg_read()
1326 static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd) in hns3_dbg_bd_file_init() argument
1333 entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; in hns3_dbg_bd_file_init()
1335 data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data), in hns3_dbg_bd_file_init()
1338 return -ENOMEM; in hns3_dbg_bd_file_init()
1341 char name[HNS3_DBG_FILE_NAME_LEN]; in hns3_dbg_bd_file_init() local
1344 data[i].cmd = hns3_dbg_cmd[cmd].cmd; in hns3_dbg_bd_file_init()
1346 sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i); in hns3_dbg_bd_file_init()
1347 debugfs_create_file(name, 0400, entry_dir, &data[i], in hns3_dbg_bd_file_init()
1355 hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd) in hns3_dbg_common_file_init() argument
1360 data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL); in hns3_dbg_common_file_init()
1362 return -ENOMEM; in hns3_dbg_common_file_init()
1364 data->handle = handle; in hns3_dbg_common_file_init()
1365 data->cmd = hns3_dbg_cmd[cmd].cmd; in hns3_dbg_common_file_init()
1366 entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; in hns3_dbg_common_file_init()
1367 debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir, in hns3_dbg_common_file_init()
1375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hns3_dbg_init()
1376 const char *name = pci_name(handle->pdev); in hns3_dbg_init() local
1380 handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev, in hns3_dbg_init()
1382 sizeof(*handle->dbgfs_buf), in hns3_dbg_init()
1384 if (!handle->dbgfs_buf) in hns3_dbg_init()
1385 return -ENOMEM; in hns3_dbg_init()
1388 debugfs_create_dir(name, hns3_dbgfs_root); in hns3_dbg_init()
1389 handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry; in hns3_dbg_init()
1393 debugfs_create_dir(hns3_dbg_dentry[i].name, in hns3_dbg_init()
1394 handle->hnae3_dbgfs); in hns3_dbg_init()
1396 mutex_init(&handle->dbgfs_lock); in hns3_dbg_init()
1399 if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES && in hns3_dbg_init()
1400 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) || in hns3_dbg_init()
1401 (hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_PTP_INFO && in hns3_dbg_init()
1402 !test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps))) in hns3_dbg_init()
1406 dev_err(&handle->pdev->dev, in hns3_dbg_init()
1407 "cmd %s lack of init func\n", in hns3_dbg_init()
1408 hns3_dbg_cmd[i].name); in hns3_dbg_init()
1409 ret = -EINVAL; in hns3_dbg_init()
1415 dev_err(&handle->pdev->dev, "failed to init cmd %s\n", in hns3_dbg_init()
1416 hns3_dbg_cmd[i].name); in hns3_dbg_init()
1424 debugfs_remove_recursive(handle->hnae3_dbgfs); in hns3_dbg_init()
1425 handle->hnae3_dbgfs = NULL; in hns3_dbg_init()
1426 mutex_destroy(&handle->dbgfs_lock); in hns3_dbg_init()
1434 debugfs_remove_recursive(handle->hnae3_dbgfs); in hns3_dbg_uninit()
1435 handle->hnae3_dbgfs = NULL; in hns3_dbg_uninit()
1438 if (handle->dbgfs_buf[i]) { in hns3_dbg_uninit()
1439 kvfree(handle->dbgfs_buf[i]); in hns3_dbg_uninit()
1440 handle->dbgfs_buf[i] = NULL; in hns3_dbg_uninit()
1443 mutex_destroy(&handle->dbgfs_lock); in hns3_dbg_uninit()