1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
3
4 #include "reporter_vnic.h"
5 #include "en_stats.h"
6 #include "devlink.h"
7
8 #define VNIC_ENV_GET64(vnic_env_stats, c) \
9 MLX5_GET64(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \
10 vport_env.c)
11
12 struct mlx5_vnic_diag_stats {
13 __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
14 };
15
mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev * dev,struct devlink_fmsg * fmsg,u16 vport_num,bool other_vport)16 static void mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev *dev,
17 struct devlink_fmsg *fmsg,
18 u16 vport_num, bool other_vport)
19 {
20 u32 out_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
21 u32 in_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
22 u32 out_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
23 u32 in_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
24 u32 cur_alloc_icm;
25 int vhca_icm_ctrl;
26 u16 vhca_id;
27 int err;
28
29 err = mlx5_core_access_reg(dev, in_reg, sizeof(in_reg), out_reg,
30 sizeof(out_reg), MLX5_REG_NIC_CAP, 0, 0);
31 if (err) {
32 mlx5_core_warn(dev, "Reading nic_cap_reg failed. err = %d\n", err);
33 return;
34 }
35 vhca_icm_ctrl = MLX5_GET(nic_cap_reg, out_reg, vhca_icm_ctrl);
36 if (!vhca_icm_ctrl)
37 return;
38
39 MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id_valid, other_vport);
40 if (other_vport) {
41 err = mlx5_vport_get_vhca_id(dev, vport_num, &vhca_id);
42 if (err) {
43 mlx5_core_warn(dev, "vport to vhca_id failed. vport_num = %d, err = %d\n",
44 vport_num, err);
45 return;
46 }
47 MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id, vhca_id);
48 }
49 err = mlx5_core_access_reg(dev, in_icm_reg, sizeof(in_icm_reg),
50 out_icm_reg, sizeof(out_icm_reg),
51 MLX5_REG_VHCA_ICM_CTRL, 0, 0);
52 if (err) {
53 mlx5_core_warn(dev, "Reading vhca_icm_ctrl failed. err = %d\n", err);
54 return;
55 }
56 cur_alloc_icm = MLX5_GET(vhca_icm_ctrl_reg, out_icm_reg, cur_alloc_icm);
57 devlink_fmsg_u32_pair_put(fmsg, "icm_consumption", cur_alloc_icm);
58 }
59
mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev * dev,struct devlink_fmsg * fmsg,u16 vport_num,bool other_vport)60 void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
61 struct devlink_fmsg *fmsg,
62 u16 vport_num, bool other_vport)
63 {
64 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
65 struct mlx5_vnic_diag_stats vnic;
66
67 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
68 MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
69 MLX5_SET(query_vnic_env_in, in, other_vport, !!other_vport);
70
71 mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out);
72
73 devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters");
74 devlink_fmsg_obj_nest_start(fmsg);
75
76 if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) {
77 devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
78 VNIC_ENV_GET(&vnic, total_error_queues));
79 devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
80 VNIC_ENV_GET(&vnic, send_queue_priority_update_flow));
81 }
82 if (MLX5_CAP_GEN(dev, eq_overrun_count)) {
83 devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
84 VNIC_ENV_GET(&vnic, comp_eq_overrun));
85 devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
86 VNIC_ENV_GET(&vnic, async_eq_overrun));
87 }
88 if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun))
89 devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
90 VNIC_ENV_GET(&vnic, cq_overrun));
91 if (MLX5_CAP_GEN(dev, invalid_command_count))
92 devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
93 VNIC_ENV_GET(&vnic, invalid_command));
94 if (MLX5_CAP_GEN(dev, quota_exceeded_count))
95 devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
96 VNIC_ENV_GET(&vnic, quota_exceeded_command));
97 if (MLX5_CAP_GEN(dev, nic_receive_steering_discard))
98 devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
99 VNIC_ENV_GET64(&vnic, nic_receive_steering_discard));
100 if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) {
101 devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
102 VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail));
103 devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
104 VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
105 }
106 if (MLX5_CAP_GEN(dev, nic_cap_reg))
107 mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
108
109 devlink_fmsg_obj_nest_end(fmsg);
110 devlink_fmsg_pair_nest_end(fmsg);
111 }
112
mlx5_reporter_vnic_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,struct netlink_ext_ack * extack)113 static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter,
114 struct devlink_fmsg *fmsg,
115 struct netlink_ext_ack *extack)
116 {
117 struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
118
119 mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false);
120 return 0;
121 }
122
123 static const struct devlink_health_reporter_ops mlx5_reporter_vnic_ops = {
124 .name = "vnic",
125 .diagnose = mlx5_reporter_vnic_diagnose,
126 };
127
mlx5_reporter_vnic_create(struct mlx5_core_dev * dev)128 void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev)
129 {
130 struct mlx5_core_health *health = &dev->priv.health;
131 struct devlink *devlink = priv_to_devlink(dev);
132
133 health->vnic_reporter =
134 devlink_health_reporter_create(devlink,
135 &mlx5_reporter_vnic_ops,
136 0, dev);
137 if (IS_ERR(health->vnic_reporter))
138 mlx5_core_warn(dev,
139 "Failed to create vnic reporter, err = %ld\n",
140 PTR_ERR(health->vnic_reporter));
141 }
142
mlx5_reporter_vnic_destroy(struct mlx5_core_dev * dev)143 void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev)
144 {
145 struct mlx5_core_health *health = &dev->priv.health;
146
147 if (!IS_ERR_OR_NULL(health->vnic_reporter))
148 devlink_health_reporter_destroy(health->vnic_reporter);
149 }
150