1 /* 2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include "lib/events.h" 34 #include "en.h" 35 #include "en_accel/ktls.h" 36 #include "en_accel/en_accel.h" 37 #include "en/ptp.h" 38 #include "en/port.h" 39 40 #include <net/page_pool/helpers.h> 41 42 void mlx5e_ethtool_put_stat(u64 **data, u64 val) 43 { 44 *(*data)++ = val; 45 } 46 47 static unsigned int stats_grps_num(struct mlx5e_priv *priv) 48 { 49 return !priv->profile->stats_grps_num ? 0 : 50 priv->profile->stats_grps_num(priv); 51 } 52 53 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv) 54 { 55 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 56 const unsigned int num_stats_grps = stats_grps_num(priv); 57 unsigned int total = 0; 58 int i; 59 60 for (i = 0; i < num_stats_grps; i++) 61 total += stats_grps[i]->get_num_stats(priv); 62 63 return total; 64 } 65 66 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv) 67 { 68 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 69 const unsigned int num_stats_grps = stats_grps_num(priv); 70 int i; 71 72 for (i = num_stats_grps - 1; i >= 0; i--) 73 if (stats_grps[i]->update_stats && 74 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS) 75 stats_grps[i]->update_stats(priv); 76 } 77 78 void mlx5e_stats_update(struct mlx5e_priv *priv) 79 { 80 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 81 const unsigned int num_stats_grps = stats_grps_num(priv); 82 int i; 83 84 for (i = num_stats_grps - 1; i >= 0; i--) 85 if (stats_grps[i]->update_stats) 86 stats_grps[i]->update_stats(priv); 87 } 88 89 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx) 90 { 91 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 92 const unsigned int num_stats_grps = stats_grps_num(priv); 93 int i; 94 95 for (i = 0; i < num_stats_grps; i++) 96 stats_grps[i]->fill_stats(priv, &data); 97 } 98 99 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data) 100 { 101 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 102 const unsigned int num_stats_grps = stats_grps_num(priv); 103 int i; 104 105 for (i = 0; i < num_stats_grps; i++) 106 stats_grps[i]->fill_strings(priv, &data); 107 } 108 109 /* Concrete NIC Stats */ 110 111 static const struct counter_desc sw_stats_desc[] = { 112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, 114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, 115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, 116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) }, 117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, 118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, 119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, 120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, 121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, 122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) }, 123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) }, 124 125 #ifdef CONFIG_MLX5_EN_TLS 126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) }, 127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, 128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, 129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, 130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, 131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, 132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) }, 133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, 134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, 135 #endif 136 137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, 138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, 139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) }, 140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) }, 141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) }, 142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) }, 143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) }, 144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) }, 145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) }, 146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) }, 147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, 148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, 149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, 150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, 151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, 152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, 153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, 154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, 155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, 157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, 158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, 159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, 160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) }, 161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, 162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, 163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, 164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, 165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, 166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, 167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, 169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, 170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, 171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, 172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, 174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, 175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, 176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, 177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) }, 178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, 179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, 180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, 181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, 183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, 184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, 185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, 186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, 187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, 188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, 189 #ifdef CONFIG_MLX5_EN_ARFS 190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_add) }, 191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_in) }, 192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_out) }, 193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_expired) }, 194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, 195 #endif 196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, 197 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) }, 198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) }, 199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) }, 200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) }, 201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) }, 202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) }, 203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) }, 204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) }, 205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) }, 206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) }, 207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) }, 208 #ifdef CONFIG_MLX5_EN_TLS 209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, 210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, 211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, 212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, 213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, 214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, 215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, 216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) }, 217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, 218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, 219 #endif 220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, 221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, 222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, 223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, 224 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) }, 225 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, 226 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) }, 227 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) }, 228 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) }, 229 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) }, 230 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) }, 231 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) }, 232 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) }, 233 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) }, 234 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) }, 235 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) }, 236 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) }, 237 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) }, 238 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) }, 239 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) }, 240 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) }, 241 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) }, 242 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) }, 243 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) }, 244 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) }, 245 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) }, 246 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) }, 247 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) }, 248 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) }, 249 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) }, 250 }; 251 252 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) 253 254 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw) 255 { 256 return NUM_SW_COUNTERS; 257 } 258 259 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw) 260 { 261 int i; 262 263 for (i = 0; i < NUM_SW_COUNTERS; i++) 264 ethtool_puts(data, sw_stats_desc[i].format); 265 } 266 267 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) 268 { 269 int i; 270 271 for (i = 0; i < NUM_SW_COUNTERS; i++) 272 mlx5e_ethtool_put_stat(data, 273 MLX5E_READ_CTR64_CPU(&priv->stats.sw, 274 sw_stats_desc, i)); 275 } 276 277 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s, 278 struct mlx5e_xdpsq_stats *xdpsq_red_stats) 279 { 280 s->tx_xdp_xmit += xdpsq_red_stats->xmit; 281 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; 282 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; 283 s->tx_xdp_nops += xdpsq_red_stats->nops; 284 s->tx_xdp_full += xdpsq_red_stats->full; 285 s->tx_xdp_err += xdpsq_red_stats->err; 286 s->tx_xdp_cqes += xdpsq_red_stats->cqes; 287 } 288 289 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s, 290 struct mlx5e_xdpsq_stats *xdpsq_stats) 291 { 292 s->rx_xdp_tx_xmit += xdpsq_stats->xmit; 293 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; 294 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; 295 s->rx_xdp_tx_nops += xdpsq_stats->nops; 296 s->rx_xdp_tx_full += xdpsq_stats->full; 297 s->rx_xdp_tx_err += xdpsq_stats->err; 298 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; 299 } 300 301 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s, 302 struct mlx5e_xdpsq_stats *xsksq_stats) 303 { 304 s->tx_xsk_xmit += xsksq_stats->xmit; 305 s->tx_xsk_mpwqe += xsksq_stats->mpwqe; 306 s->tx_xsk_inlnw += xsksq_stats->inlnw; 307 s->tx_xsk_full += xsksq_stats->full; 308 s->tx_xsk_err += xsksq_stats->err; 309 s->tx_xsk_cqes += xsksq_stats->cqes; 310 } 311 312 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s, 313 struct mlx5e_rq_stats *xskrq_stats) 314 { 315 s->rx_xsk_packets += xskrq_stats->packets; 316 s->rx_xsk_bytes += xskrq_stats->bytes; 317 s->rx_xsk_csum_complete += xskrq_stats->csum_complete; 318 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; 319 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; 320 s->rx_xsk_csum_none += xskrq_stats->csum_none; 321 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; 322 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; 323 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; 324 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; 325 s->rx_xsk_wqe_err += xskrq_stats->wqe_err; 326 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; 327 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; 328 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; 329 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; 330 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; 331 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; 332 s->rx_xsk_congst_umr += xskrq_stats->congst_umr; 333 } 334 335 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, 336 struct mlx5e_rq_stats *rq_stats) 337 { 338 s->rx_packets += rq_stats->packets; 339 s->rx_bytes += rq_stats->bytes; 340 s->rx_lro_packets += rq_stats->lro_packets; 341 s->rx_lro_bytes += rq_stats->lro_bytes; 342 s->rx_gro_packets += rq_stats->gro_packets; 343 s->rx_gro_bytes += rq_stats->gro_bytes; 344 s->rx_gro_skbs += rq_stats->gro_skbs; 345 s->rx_gro_large_hds += rq_stats->gro_large_hds; 346 s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets; 347 s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes; 348 s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets; 349 s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes; 350 s->rx_ecn_mark += rq_stats->ecn_mark; 351 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; 352 s->rx_csum_none += rq_stats->csum_none; 353 s->rx_csum_complete += rq_stats->csum_complete; 354 s->rx_csum_complete_tail += rq_stats->csum_complete_tail; 355 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; 356 s->rx_csum_unnecessary += rq_stats->csum_unnecessary; 357 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; 358 s->rx_xdp_drop += rq_stats->xdp_drop; 359 s->rx_xdp_redirect += rq_stats->xdp_redirect; 360 s->rx_wqe_err += rq_stats->wqe_err; 361 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; 362 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; 363 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; 364 s->rx_buff_alloc_err += rq_stats->buff_alloc_err; 365 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; 366 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; 367 s->rx_congst_umr += rq_stats->congst_umr; 368 #ifdef CONFIG_MLX5_EN_ARFS 369 s->rx_arfs_add += rq_stats->arfs_add; 370 s->rx_arfs_request_in += rq_stats->arfs_request_in; 371 s->rx_arfs_request_out += rq_stats->arfs_request_out; 372 s->rx_arfs_expired += rq_stats->arfs_expired; 373 s->rx_arfs_err += rq_stats->arfs_err; 374 #endif 375 s->rx_recover += rq_stats->recover; 376 s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast; 377 s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow; 378 s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty; 379 s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill; 380 s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive; 381 s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order; 382 s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached; 383 s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full; 384 s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring; 385 s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full; 386 s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref; 387 #ifdef CONFIG_MLX5_EN_TLS 388 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; 389 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; 390 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; 391 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; 392 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; 393 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; 394 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; 395 s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry; 396 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; 397 s->rx_tls_err += rq_stats->tls_err; 398 #endif 399 } 400 401 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s, 402 struct mlx5e_ch_stats *ch_stats) 403 { 404 s->ch_events += ch_stats->events; 405 s->ch_poll += ch_stats->poll; 406 s->ch_arm += ch_stats->arm; 407 s->ch_aff_change += ch_stats->aff_change; 408 s->ch_force_irq += ch_stats->force_irq; 409 s->ch_eq_rearm += ch_stats->eq_rearm; 410 } 411 412 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s, 413 struct mlx5e_sq_stats *sq_stats) 414 { 415 s->tx_packets += sq_stats->packets; 416 s->tx_bytes += sq_stats->bytes; 417 s->tx_tso_packets += sq_stats->tso_packets; 418 s->tx_tso_bytes += sq_stats->tso_bytes; 419 s->tx_tso_inner_packets += sq_stats->tso_inner_packets; 420 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; 421 s->tx_added_vlan_packets += sq_stats->added_vlan_packets; 422 s->tx_nop += sq_stats->nop; 423 s->tx_mpwqe_blks += sq_stats->mpwqe_blks; 424 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; 425 s->tx_queue_stopped += sq_stats->stopped; 426 s->tx_queue_wake += sq_stats->wake; 427 s->tx_queue_dropped += sq_stats->dropped; 428 s->tx_cqe_err += sq_stats->cqe_err; 429 s->tx_recover += sq_stats->recover; 430 s->tx_xmit_more += sq_stats->xmit_more; 431 s->tx_csum_partial_inner += sq_stats->csum_partial_inner; 432 s->tx_csum_none += sq_stats->csum_none; 433 s->tx_csum_partial += sq_stats->csum_partial; 434 #ifdef CONFIG_MLX5_EN_TLS 435 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; 436 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; 437 s->tx_tls_ooo += sq_stats->tls_ooo; 438 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; 439 s->tx_tls_dump_packets += sq_stats->tls_dump_packets; 440 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; 441 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; 442 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; 443 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; 444 #endif 445 s->tx_cqes += sq_stats->cqes; 446 } 447 448 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv, 449 struct mlx5e_sw_stats *s) 450 { 451 int i; 452 453 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 454 return; 455 456 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch); 457 458 if (priv->tx_ptp_opened) { 459 for (i = 0; i < priv->max_opened_tc; i++) { 460 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]); 461 462 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 463 barrier(); 464 } 465 } 466 if (priv->rx_ptp_opened) { 467 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq); 468 469 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 470 barrier(); 471 } 472 } 473 474 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, 475 struct mlx5e_sw_stats *s) 476 { 477 struct mlx5e_sq_stats **stats; 478 u16 max_qos_sqs; 479 int i; 480 481 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 482 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs); 483 stats = READ_ONCE(priv->htb_qos_sq_stats); 484 485 for (i = 0; i < max_qos_sqs; i++) { 486 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i])); 487 488 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 489 barrier(); 490 } 491 } 492 493 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c) 494 { 495 struct mlx5e_rq_stats *rq_stats = c->rq.stats; 496 struct page_pool *pool = c->rq.page_pool; 497 struct page_pool_stats stats = { 0 }; 498 499 if (!page_pool_get_stats(pool, &stats)) 500 return; 501 502 rq_stats->pp_alloc_fast = stats.alloc_stats.fast; 503 rq_stats->pp_alloc_slow = stats.alloc_stats.slow; 504 rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order; 505 rq_stats->pp_alloc_empty = stats.alloc_stats.empty; 506 rq_stats->pp_alloc_waive = stats.alloc_stats.waive; 507 rq_stats->pp_alloc_refill = stats.alloc_stats.refill; 508 509 rq_stats->pp_recycle_cached = stats.recycle_stats.cached; 510 rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full; 511 rq_stats->pp_recycle_ring = stats.recycle_stats.ring; 512 rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full; 513 rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt; 514 } 515 516 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) 517 { 518 struct mlx5e_sw_stats *s = &priv->stats.sw; 519 int i; 520 521 memset(s, 0, sizeof(*s)); 522 523 for (i = 0; i < priv->channels.num; i++) /* for active channels only */ 524 mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]); 525 526 for (i = 0; i < priv->stats_nch; i++) { 527 struct mlx5e_channel_stats *channel_stats = 528 priv->channel_stats[i]; 529 530 int j; 531 532 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); 533 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq); 534 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch); 535 /* xdp redirect */ 536 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq); 537 /* AF_XDP zero-copy */ 538 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq); 539 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq); 540 541 for (j = 0; j < priv->max_opened_tc; j++) { 542 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]); 543 544 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 545 barrier(); 546 } 547 } 548 mlx5e_stats_grp_sw_update_stats_ptp(priv, s); 549 mlx5e_stats_grp_sw_update_stats_qos(priv, s); 550 } 551 552 static const struct counter_desc q_stats_desc[] = { 553 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, 554 }; 555 556 static const struct counter_desc drop_rq_stats_desc[] = { 557 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) }, 558 }; 559 560 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) 561 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) 562 563 static bool q_counter_any(struct mlx5e_priv *priv) 564 { 565 struct mlx5_core_dev *pos; 566 int i; 567 568 mlx5_sd_for_each_dev(i, priv->mdev, pos) 569 if (priv->q_counter[i++]) 570 return true; 571 572 return false; 573 } 574 575 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt) 576 { 577 int num_stats = 0; 578 579 if (q_counter_any(priv)) 580 num_stats += NUM_Q_COUNTERS; 581 582 if (priv->drop_rq_q_counter) 583 num_stats += NUM_DROP_RQ_COUNTERS; 584 585 return num_stats; 586 } 587 588 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt) 589 { 590 int i; 591 592 for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++) 593 ethtool_puts(data, q_stats_desc[i].format); 594 595 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) 596 ethtool_puts(data, drop_rq_stats_desc[i].format); 597 } 598 599 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) 600 { 601 int i; 602 603 for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++) 604 mlx5e_ethtool_put_stat(data, 605 MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, 606 q_stats_desc, i)); 607 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) 608 mlx5e_ethtool_put_stat( 609 data, MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, 610 drop_rq_stats_desc, i)); 611 } 612 613 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) 614 { 615 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; 616 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; 617 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; 618 struct mlx5_core_dev *pos; 619 u32 rx_out_of_buffer = 0; 620 int ret, i; 621 622 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); 623 624 mlx5_sd_for_each_dev(i, priv->mdev, pos) { 625 if (priv->q_counter[i]) { 626 MLX5_SET(query_q_counter_in, in, counter_set_id, 627 priv->q_counter[i]); 628 ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out); 629 if (!ret) 630 rx_out_of_buffer += MLX5_GET(query_q_counter_out, 631 out, out_of_buffer); 632 } 633 } 634 qcnt->rx_out_of_buffer = rx_out_of_buffer; 635 636 if (priv->drop_rq_q_counter) { 637 MLX5_SET(query_q_counter_in, in, counter_set_id, 638 priv->drop_rq_q_counter); 639 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); 640 if (!ret) 641 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, 642 out, out_of_buffer); 643 } 644 } 645 646 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) 647 static const struct counter_desc vnic_env_stats_steer_desc[] = { 648 { "rx_steer_missed_packets", 649 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, 650 }; 651 652 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = { 653 { "dev_internal_queue_oob", 654 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) }, 655 }; 656 657 static const struct counter_desc vnic_env_stats_drop_desc[] = { 658 { "rx_oversize_pkts_buffer", 659 VNIC_ENV_OFF(vport_env.eth_wqe_too_small) }, 660 }; 661 662 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \ 663 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \ 664 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0) 665 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \ 666 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ 667 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) 668 #define NUM_VNIC_ENV_DROP_COUNTERS(dev) \ 669 (MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \ 670 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0) 671 672 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env) 673 { 674 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + 675 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) + 676 NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); 677 } 678 679 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) 680 { 681 int i; 682 683 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) 684 ethtool_puts(data, vnic_env_stats_steer_desc[i].format); 685 686 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) 687 ethtool_puts(data, vnic_env_stats_dev_oob_desc[i].format); 688 689 for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++) 690 ethtool_puts(data, vnic_env_stats_drop_desc[i].format); 691 } 692 693 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) 694 { 695 int i; 696 697 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) 698 mlx5e_ethtool_put_stat( 699 data, 700 MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, 701 vnic_env_stats_steer_desc, i)); 702 703 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) 704 mlx5e_ethtool_put_stat( 705 data, 706 MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, 707 vnic_env_stats_dev_oob_desc, i)); 708 709 for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++) 710 mlx5e_ethtool_put_stat( 711 data, 712 MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, 713 vnic_env_stats_drop_desc, i)); 714 } 715 716 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) 717 { 718 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; 719 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 720 struct mlx5_core_dev *mdev = priv->mdev; 721 722 if (!mlx5e_stats_grp_vnic_env_num_stats(priv)) 723 return; 724 725 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); 726 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out); 727 } 728 729 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) 730 static const struct counter_desc vport_stats_desc[] = { 731 { "rx_vport_unicast_packets", 732 VPORT_COUNTER_OFF(received_eth_unicast.packets) }, 733 { "rx_vport_unicast_bytes", 734 VPORT_COUNTER_OFF(received_eth_unicast.octets) }, 735 { "tx_vport_unicast_packets", 736 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, 737 { "tx_vport_unicast_bytes", 738 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, 739 { "rx_vport_multicast_packets", 740 VPORT_COUNTER_OFF(received_eth_multicast.packets) }, 741 { "rx_vport_multicast_bytes", 742 VPORT_COUNTER_OFF(received_eth_multicast.octets) }, 743 { "tx_vport_multicast_packets", 744 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, 745 { "tx_vport_multicast_bytes", 746 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, 747 { "rx_vport_broadcast_packets", 748 VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, 749 { "rx_vport_broadcast_bytes", 750 VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, 751 { "tx_vport_broadcast_packets", 752 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, 753 { "tx_vport_broadcast_bytes", 754 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, 755 { "rx_vport_rdma_unicast_packets", 756 VPORT_COUNTER_OFF(received_ib_unicast.packets) }, 757 { "rx_vport_rdma_unicast_bytes", 758 VPORT_COUNTER_OFF(received_ib_unicast.octets) }, 759 { "tx_vport_rdma_unicast_packets", 760 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, 761 { "tx_vport_rdma_unicast_bytes", 762 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, 763 { "rx_vport_rdma_multicast_packets", 764 VPORT_COUNTER_OFF(received_ib_multicast.packets) }, 765 { "rx_vport_rdma_multicast_bytes", 766 VPORT_COUNTER_OFF(received_ib_multicast.octets) }, 767 { "tx_vport_rdma_multicast_packets", 768 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, 769 { "tx_vport_rdma_multicast_bytes", 770 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, 771 }; 772 773 static const struct counter_desc vport_loopback_stats_desc[] = { 774 { "vport_loopback_packets", 775 VPORT_COUNTER_OFF(local_loopback.packets) }, 776 { "vport_loopback_bytes", 777 VPORT_COUNTER_OFF(local_loopback.octets) }, 778 }; 779 780 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) 781 #define NUM_VPORT_LOOPBACK_COUNTERS(dev) \ 782 (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \ 783 ARRAY_SIZE(vport_loopback_stats_desc) : 0) 784 785 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport) 786 { 787 return NUM_VPORT_COUNTERS + 788 NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); 789 } 790 791 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport) 792 { 793 int i; 794 795 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 796 ethtool_puts(data, vport_stats_desc[i].format); 797 798 for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++) 799 ethtool_puts(data, vport_loopback_stats_desc[i].format); 800 } 801 802 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) 803 { 804 int i; 805 806 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 807 mlx5e_ethtool_put_stat( 808 data, 809 MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, 810 vport_stats_desc, i)); 811 812 for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++) 813 mlx5e_ethtool_put_stat( 814 data, 815 MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, 816 vport_loopback_stats_desc, i)); 817 } 818 819 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport) 820 { 821 u32 *out = (u32 *)priv->stats.vport.query_vport_out; 822 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; 823 struct mlx5_core_dev *mdev = priv->mdev; 824 825 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); 826 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out); 827 } 828 829 #define PPORT_802_3_OFF(c) \ 830 MLX5_BYTE_OFF(ppcnt_reg, \ 831 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) 832 static const struct counter_desc pport_802_3_stats_desc[] = { 833 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) }, 834 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) }, 835 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, 836 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) }, 837 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) }, 838 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, 839 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, 840 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, 841 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, 842 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) }, 843 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) }, 844 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) }, 845 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, 846 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, 847 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) }, 848 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) }, 849 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, 850 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, 851 }; 852 853 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) 854 855 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3) 856 { 857 return NUM_PPORT_802_3_COUNTERS; 858 } 859 860 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3) 861 { 862 int i; 863 864 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 865 ethtool_puts(data, pport_802_3_stats_desc[i].format); 866 } 867 868 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3) 869 { 870 int i; 871 872 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 873 mlx5e_ethtool_put_stat( 874 data, MLX5E_READ_CTR64_BE( 875 &priv->stats.pport.IEEE_802_3_counters, 876 pport_802_3_stats_desc, i)); 877 } 878 879 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \ 880 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1) 881 882 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3) 883 { 884 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 885 struct mlx5_core_dev *mdev = priv->mdev; 886 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 887 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 888 void *out; 889 890 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 891 return; 892 893 MLX5_SET(ppcnt_reg, in, local_port, 1); 894 out = pstats->IEEE_802_3_counters; 895 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 896 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 897 } 898 899 #define MLX5E_READ_CTR64_BE_F(ptr, set, c) \ 900 be64_to_cpu(*(__be64 *)((char *)ptr + \ 901 MLX5_BYTE_OFF(ppcnt_reg, \ 902 counter_set.set.c##_high))) 903 904 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev, 905 u32 *ppcnt_ieee_802_3) 906 { 907 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 908 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 909 910 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 911 return -EOPNOTSUPP; 912 913 MLX5_SET(ppcnt_reg, in, local_port, 1); 914 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 915 return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, 916 sz, MLX5_REG_PPCNT, 0, 0); 917 } 918 919 static int mlx5e_stats_get_per_prio(struct mlx5_core_dev *mdev, 920 u32 *ppcnt_per_prio, int prio) 921 { 922 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 923 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 924 925 if (!(MLX5_CAP_PCAM_FEATURE(mdev, pfcc_mask) && 926 MLX5_CAP_DEBUG(mdev, stall_detect))) 927 return -EOPNOTSUPP; 928 929 MLX5_SET(ppcnt_reg, in, local_port, 1); 930 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 931 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 932 return mlx5_core_access_reg(mdev, in, sz, ppcnt_per_prio, sz, 933 MLX5_REG_PPCNT, 0, 0); 934 } 935 936 void mlx5e_stats_pause_get(struct mlx5e_priv *priv, 937 struct ethtool_pause_stats *pause_stats) 938 { 939 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 940 struct mlx5_core_dev *mdev = priv->mdev; 941 u64 ps_stats = 0; 942 int prio; 943 944 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 945 return; 946 947 pause_stats->tx_pause_frames = 948 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 949 eth_802_3_cntrs_grp_data_layout, 950 a_pause_mac_ctrl_frames_transmitted); 951 pause_stats->rx_pause_frames = 952 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 953 eth_802_3_cntrs_grp_data_layout, 954 a_pause_mac_ctrl_frames_received); 955 956 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 957 if (mlx5e_stats_get_per_prio(mdev, ppcnt_ieee_802_3, prio)) 958 return; 959 960 ps_stats += MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 961 eth_per_prio_grp_data_layout, 962 device_stall_critical_watermark_cnt); 963 } 964 965 pause_stats->tx_pause_storm_events = ps_stats; 966 } 967 968 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv, 969 struct ethtool_eth_phy_stats *phy_stats) 970 { 971 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 972 struct mlx5_core_dev *mdev = priv->mdev; 973 974 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 975 return; 976 977 phy_stats->SymbolErrorDuringCarrier = 978 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 979 eth_802_3_cntrs_grp_data_layout, 980 a_symbol_error_during_carrier); 981 } 982 983 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv, 984 struct ethtool_eth_mac_stats *mac_stats) 985 { 986 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 987 struct mlx5_core_dev *mdev = priv->mdev; 988 989 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 990 return; 991 992 #define RD(name) \ 993 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \ 994 eth_802_3_cntrs_grp_data_layout, \ 995 name) 996 997 mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok); 998 mac_stats->FramesReceivedOK = RD(a_frames_received_ok); 999 mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors); 1000 mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok); 1001 mac_stats->OctetsReceivedOK = RD(a_octets_received_ok); 1002 mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok); 1003 mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok); 1004 mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok); 1005 mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok); 1006 mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors); 1007 mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field); 1008 mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors); 1009 #undef RD 1010 } 1011 1012 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, 1013 struct ethtool_eth_ctrl_stats *ctrl_stats) 1014 { 1015 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 1016 struct mlx5_core_dev *mdev = priv->mdev; 1017 1018 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 1019 return; 1020 1021 ctrl_stats->MACControlFramesTransmitted = 1022 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 1023 eth_802_3_cntrs_grp_data_layout, 1024 a_mac_control_frames_transmitted); 1025 ctrl_stats->MACControlFramesReceived = 1026 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 1027 eth_802_3_cntrs_grp_data_layout, 1028 a_mac_control_frames_received); 1029 ctrl_stats->UnsupportedOpcodesReceived = 1030 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 1031 eth_802_3_cntrs_grp_data_layout, 1032 a_unsupported_opcodes_received); 1033 } 1034 1035 #define PPORT_2863_OFF(c) \ 1036 MLX5_BYTE_OFF(ppcnt_reg, \ 1037 counter_set.eth_2863_cntrs_grp_data_layout.c##_high) 1038 static const struct counter_desc pport_2863_stats_desc[] = { 1039 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, 1040 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, 1041 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) }, 1042 }; 1043 1044 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) 1045 1046 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863) 1047 { 1048 return NUM_PPORT_2863_COUNTERS; 1049 } 1050 1051 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863) 1052 { 1053 int i; 1054 1055 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 1056 ethtool_puts(data, pport_2863_stats_desc[i].format); 1057 } 1058 1059 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863) 1060 { 1061 int i; 1062 1063 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 1064 mlx5e_ethtool_put_stat( 1065 data, MLX5E_READ_CTR64_BE( 1066 &priv->stats.pport.RFC_2863_counters, 1067 pport_2863_stats_desc, i)); 1068 } 1069 1070 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863) 1071 { 1072 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1073 struct mlx5_core_dev *mdev = priv->mdev; 1074 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1075 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1076 void *out; 1077 1078 MLX5_SET(ppcnt_reg, in, local_port, 1); 1079 out = pstats->RFC_2863_counters; 1080 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 1081 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1082 } 1083 1084 #define PPORT_2819_OFF(c) \ 1085 MLX5_BYTE_OFF(ppcnt_reg, \ 1086 counter_set.eth_2819_cntrs_grp_data_layout.c##_high) 1087 static const struct counter_desc pport_2819_stats_desc[] = { 1088 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, 1089 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, 1090 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) }, 1091 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) }, 1092 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, 1093 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, 1094 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, 1095 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, 1096 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, 1097 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, 1098 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, 1099 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, 1100 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, 1101 }; 1102 1103 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) 1104 1105 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819) 1106 { 1107 return NUM_PPORT_2819_COUNTERS; 1108 } 1109 1110 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819) 1111 { 1112 int i; 1113 1114 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 1115 ethtool_puts(data, pport_2819_stats_desc[i].format); 1116 } 1117 1118 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819) 1119 { 1120 int i; 1121 1122 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 1123 mlx5e_ethtool_put_stat( 1124 data, MLX5E_READ_CTR64_BE( 1125 &priv->stats.pport.RFC_2819_counters, 1126 pport_2819_stats_desc, i)); 1127 } 1128 1129 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) 1130 { 1131 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1132 struct mlx5_core_dev *mdev = priv->mdev; 1133 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1134 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1135 void *out; 1136 1137 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 1138 return; 1139 1140 MLX5_SET(ppcnt_reg, in, local_port, 1); 1141 out = pstats->RFC_2819_counters; 1142 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 1143 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1144 } 1145 1146 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = { 1147 { 0, 64 }, 1148 { 65, 127 }, 1149 { 128, 255 }, 1150 { 256, 511 }, 1151 { 512, 1023 }, 1152 { 1024, 1518 }, 1153 { 1519, 2047 }, 1154 { 2048, 4095 }, 1155 { 4096, 8191 }, 1156 { 8192, 10239 }, 1157 {} 1158 }; 1159 1160 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, 1161 struct ethtool_rmon_stats *rmon, 1162 const struct ethtool_rmon_hist_range **ranges) 1163 { 1164 u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)]; 1165 struct mlx5_core_dev *mdev = priv->mdev; 1166 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1167 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1168 1169 MLX5_SET(ppcnt_reg, in, local_port, 1); 1170 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 1171 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters, 1172 sz, MLX5_REG_PPCNT, 0, 0)) 1173 return; 1174 1175 #define RD(name) \ 1176 MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \ 1177 eth_2819_cntrs_grp_data_layout, \ 1178 name) 1179 1180 rmon->undersize_pkts = RD(ether_stats_undersize_pkts); 1181 rmon->fragments = RD(ether_stats_fragments); 1182 rmon->jabbers = RD(ether_stats_jabbers); 1183 1184 rmon->hist[0] = RD(ether_stats_pkts64octets); 1185 rmon->hist[1] = RD(ether_stats_pkts65to127octets); 1186 rmon->hist[2] = RD(ether_stats_pkts128to255octets); 1187 rmon->hist[3] = RD(ether_stats_pkts256to511octets); 1188 rmon->hist[4] = RD(ether_stats_pkts512to1023octets); 1189 rmon->hist[5] = RD(ether_stats_pkts1024to1518octets); 1190 rmon->hist[6] = RD(ether_stats_pkts1519to2047octets); 1191 rmon->hist[7] = RD(ether_stats_pkts2048to4095octets); 1192 rmon->hist[8] = RD(ether_stats_pkts4096to8191octets); 1193 rmon->hist[9] = RD(ether_stats_pkts8192to10239octets); 1194 #undef RD 1195 1196 *ranges = mlx5e_rmon_ranges; 1197 } 1198 1199 void mlx5e_stats_ts_get(struct mlx5e_priv *priv, 1200 struct ethtool_ts_stats *ts_stats) 1201 { 1202 int i, j; 1203 1204 mutex_lock(&priv->state_lock); 1205 1206 if (priv->tx_ptp_opened) { 1207 struct mlx5e_ptp *ptp = priv->channels.ptp; 1208 1209 ts_stats->pkts = 0; 1210 ts_stats->err = 0; 1211 ts_stats->lost = 0; 1212 1213 if (!ptp) 1214 goto out; 1215 1216 /* Aggregate stats across all TCs */ 1217 for (i = 0; i < ptp->num_tc; i++) { 1218 struct mlx5e_ptp_cq_stats *stats = 1219 ptp->ptpsq[i].cq_stats; 1220 1221 ts_stats->pkts += stats->cqe; 1222 ts_stats->err += stats->abort + stats->err_cqe + 1223 stats->late_cqe; 1224 ts_stats->lost += stats->lost_cqe; 1225 } 1226 } else { 1227 /* DMA layer will always successfully timestamp packets. Other 1228 * counters do not make sense for this layer. 1229 */ 1230 ts_stats->pkts = 0; 1231 1232 /* Aggregate stats across all SQs */ 1233 for (j = 0; j < priv->channels.num; j++) { 1234 struct mlx5e_channel *c = priv->channels.c[j]; 1235 1236 for (i = 0; i < c->num_tc; i++) { 1237 struct mlx5e_sq_stats *stats = c->sq[i].stats; 1238 1239 ts_stats->pkts += stats->timestamps; 1240 } 1241 } 1242 } 1243 1244 out: 1245 mutex_unlock(&priv->state_lock); 1246 } 1247 1248 #define PPORT_PHY_LAYER_OFF(c) \ 1249 MLX5_BYTE_OFF(ppcnt_reg, \ 1250 counter_set.phys_layer_cntrs.c) 1251 static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = { 1252 { "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) } 1253 }; 1254 1255 #define PPORT_PHY_STATISTICAL_OFF(c) \ 1256 MLX5_BYTE_OFF(ppcnt_reg, \ 1257 counter_set.phys_layer_statistical_cntrs.c##_high) 1258 static const struct counter_desc pport_phy_statistical_stats_desc[] = { 1259 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, 1260 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, 1261 }; 1262 1263 static const struct counter_desc 1264 pport_phy_statistical_err_lanes_stats_desc[] = { 1265 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) }, 1266 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) }, 1267 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) }, 1268 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) }, 1269 }; 1270 1271 #define PPORT_PHY_RECOVERY_OFF(c) \ 1272 MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c) 1273 static const struct counter_desc 1274 pport_phy_recovery_cntrs_stats_desc[] = { 1275 { "total_success_recovery_phy", 1276 PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) } 1277 }; 1278 1279 #define NUM_PPORT_PHY_LAYER_COUNTERS \ 1280 ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc) 1281 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \ 1282 ARRAY_SIZE(pport_phy_statistical_stats_desc) 1283 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \ 1284 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc) 1285 #define NUM_PPORT_PHY_RECOVERY_COUNTERS \ 1286 ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc) 1287 1288 #define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \ 1289 (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \ 1290 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0) 1291 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \ 1292 (MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \ 1293 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0) 1294 #define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \ 1295 (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \ 1296 NUM_PPORT_PHY_RECOVERY_COUNTERS : 0) 1297 1298 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy) 1299 { 1300 struct mlx5_core_dev *mdev = priv->mdev; 1301 int num_stats; 1302 1303 num_stats = NUM_PPORT_PHY_LAYER_COUNTERS; 1304 1305 num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); 1306 1307 num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev); 1308 1309 num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); 1310 return num_stats; 1311 } 1312 1313 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy) 1314 { 1315 struct mlx5_core_dev *mdev = priv->mdev; 1316 int i; 1317 1318 for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++) 1319 ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format); 1320 1321 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++) 1322 ethtool_puts(data, pport_phy_statistical_stats_desc[i].format); 1323 1324 for (i = 0; 1325 i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev); 1326 i++) 1327 ethtool_puts(data, 1328 pport_phy_statistical_err_lanes_stats_desc[i] 1329 .format); 1330 1331 for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++) 1332 ethtool_puts(data, 1333 pport_phy_recovery_cntrs_stats_desc[i].format); 1334 } 1335 1336 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) 1337 { 1338 struct mlx5_core_dev *mdev = priv->mdev; 1339 int i; 1340 1341 for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++) 1342 mlx5e_ethtool_put_stat( 1343 data, 1344 MLX5E_READ_CTR32_BE(&priv->stats.pport 1345 .phy_counters, 1346 pport_phy_layer_cntrs_stats_desc, i)); 1347 1348 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++) 1349 mlx5e_ethtool_put_stat( 1350 data, 1351 MLX5E_READ_CTR64_BE( 1352 &priv->stats.pport.phy_statistical_counters, 1353 pport_phy_statistical_stats_desc, i)); 1354 1355 for (i = 0; 1356 i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev); 1357 i++) 1358 mlx5e_ethtool_put_stat( 1359 data, 1360 MLX5E_READ_CTR64_BE( 1361 &priv->stats.pport.phy_statistical_counters, 1362 pport_phy_statistical_err_lanes_stats_desc, i)); 1363 1364 for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++) 1365 mlx5e_ethtool_put_stat( 1366 data, 1367 MLX5E_READ_CTR32_BE( 1368 &priv->stats.pport.phy_recovery_counters, 1369 pport_phy_recovery_cntrs_stats_desc, i)); 1370 } 1371 1372 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) 1373 { 1374 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1375 struct mlx5_core_dev *mdev = priv->mdev; 1376 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1377 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1378 void *out; 1379 1380 MLX5_SET(ppcnt_reg, in, local_port, 1); 1381 out = pstats->phy_counters; 1382 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 1383 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1384 1385 if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) { 1386 out = pstats->phy_statistical_counters; 1387 MLX5_SET(ppcnt_reg, in, grp, 1388 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 1389 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 1390 0); 1391 } 1392 1393 if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) { 1394 out = pstats->phy_recovery_counters; 1395 MLX5_SET(ppcnt_reg, in, grp, 1396 MLX5_PHYSICAL_LAYER_RECOVERY_GROUP); 1397 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 1398 0); 1399 } 1400 } 1401 1402 void mlx5e_get_link_ext_stats(struct net_device *dev, 1403 struct ethtool_link_ext_stats *stats) 1404 { 1405 struct mlx5e_priv *priv = netdev_priv(dev); 1406 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1407 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1408 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1409 1410 MLX5_SET(ppcnt_reg, in, local_port, 1); 1411 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 1412 mlx5_core_access_reg(priv->mdev, in, sz, out, 1413 MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0); 1414 1415 stats->link_down_events = MLX5_GET(ppcnt_reg, out, 1416 counter_set.phys_layer_cntrs.link_down_events); 1417 } 1418 1419 static int fec_num_lanes(struct mlx5_core_dev *dev) 1420 { 1421 u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {}; 1422 u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {}; 1423 int err; 1424 1425 MLX5_SET(pmlp_reg, in, local_port, 1); 1426 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 1427 MLX5_REG_PMLP, 0, 0); 1428 if (err) 1429 return 0; 1430 1431 return MLX5_GET(pmlp_reg, out, width); 1432 } 1433 1434 static int fec_active_mode(struct mlx5_core_dev *mdev) 1435 { 1436 unsigned long fec_active_long; 1437 u32 fec_active; 1438 1439 if (mlx5e_get_fec_mode(mdev, &fec_active, NULL)) 1440 return MLX5E_FEC_NOFEC; 1441 1442 fec_active_long = fec_active; 1443 return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE); 1444 } 1445 1446 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \ 1447 fec_stats->corrected_blocks.lanes[(idx)] = \ 1448 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \ 1449 fc_fec_corrected_blocks_lane##idx); \ 1450 fec_stats->uncorrectable_blocks.lanes[(idx)] = \ 1451 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \ 1452 fc_fec_uncorrectable_blocks_lane##idx); \ 1453 }) 1454 1455 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats, 1456 u32 *ppcnt, u8 lanes) 1457 { 1458 if (lanes > 3) { /* 4 lanes */ 1459 MLX5E_STATS_SET_FEC_BLOCK(3); 1460 MLX5E_STATS_SET_FEC_BLOCK(2); 1461 } 1462 if (lanes > 1) /* 2 lanes */ 1463 MLX5E_STATS_SET_FEC_BLOCK(1); 1464 if (lanes > 0) /* 1 lane */ 1465 MLX5E_STATS_SET_FEC_BLOCK(0); 1466 } 1467 1468 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt) 1469 { 1470 fec_stats->corrected_blocks.total = 1471 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, 1472 rs_fec_corrected_blocks); 1473 fec_stats->uncorrectable_blocks.total = 1474 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, 1475 rs_fec_uncorrectable_blocks); 1476 } 1477 1478 static void fec_set_block_stats(struct mlx5e_priv *priv, 1479 int mode, 1480 struct ethtool_fec_stats *fec_stats) 1481 { 1482 struct mlx5_core_dev *mdev = priv->mdev; 1483 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1484 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1485 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1486 1487 MLX5_SET(ppcnt_reg, in, local_port, 1); 1488 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 1489 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0)) 1490 return; 1491 1492 switch (mode) { 1493 case MLX5E_FEC_RS_528_514: 1494 case MLX5E_FEC_RS_544_514: 1495 case MLX5E_FEC_LLRS_272_257_1: 1496 case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD: 1497 fec_set_rs_stats(fec_stats, out); 1498 return; 1499 case MLX5E_FEC_FIRECODE: 1500 fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev)); 1501 } 1502 } 1503 1504 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, 1505 struct ethtool_fec_stats *fec_stats) 1506 { 1507 u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)]; 1508 struct mlx5_core_dev *mdev = priv->mdev; 1509 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1510 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1511 1512 MLX5_SET(ppcnt_reg, in, local_port, 1); 1513 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 1514 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical, 1515 sz, MLX5_REG_PPCNT, 0, 0)) 1516 return; 1517 1518 fec_stats->corrected_bits.total = 1519 MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical, 1520 phys_layer_statistical_cntrs, 1521 phy_corrected_bits); 1522 } 1523 1524 #define MLX5_RS_HISTOGRAM_ENTRIES \ 1525 (MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist) / \ 1526 MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist[0])) 1527 1528 enum { 1529 MLX5E_HISTOGRAM_FEC_RS_544_514 = 1, 1530 MLX5E_HISTOGRAM_FEC_LLRS = 2, 1531 MLX5E_HISTOGRAM_FEC_RS_528_514 = 3, 1532 }; 1533 1534 static bool fec_rs_validate_hist_type(int mode, int hist_type) 1535 { 1536 switch (mode) { 1537 case MLX5E_FEC_RS_528_514: 1538 return hist_type == MLX5E_HISTOGRAM_FEC_RS_528_514; 1539 case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD: 1540 case MLX5E_FEC_RS_544_514: 1541 return hist_type == MLX5E_HISTOGRAM_FEC_RS_544_514; 1542 case MLX5E_FEC_LLRS_272_257_1: 1543 return hist_type == MLX5E_HISTOGRAM_FEC_LLRS; 1544 default: 1545 break; 1546 } 1547 1548 return false; 1549 } 1550 1551 static u8 1552 fec_rs_histogram_fill_ranges(struct mlx5e_priv *priv, int mode, 1553 const struct ethtool_fec_hist_range **ranges) 1554 { 1555 struct mlx5_core_dev *mdev = priv->mdev; 1556 u32 out[MLX5_ST_SZ_DW(pphcr_reg)] = {0}; 1557 u32 in[MLX5_ST_SZ_DW(pphcr_reg)] = {0}; 1558 int sz = MLX5_ST_SZ_BYTES(pphcr_reg); 1559 u8 hist_type, num_of_bins; 1560 1561 memset(priv->fec_ranges, 0, 1562 ETHTOOL_FEC_HIST_MAX * sizeof(*priv->fec_ranges)); 1563 MLX5_SET(pphcr_reg, in, local_port, 1); 1564 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPHCR, 0, 0)) 1565 return 0; 1566 1567 hist_type = MLX5_GET(pphcr_reg, out, active_hist_type); 1568 if (!fec_rs_validate_hist_type(mode, hist_type)) 1569 return 0; 1570 1571 num_of_bins = MLX5_GET(pphcr_reg, out, num_of_bins); 1572 if (WARN_ON_ONCE(num_of_bins > MLX5_RS_HISTOGRAM_ENTRIES)) 1573 return 0; 1574 1575 for (int i = 0; i < num_of_bins; i++) { 1576 void *bin_range = MLX5_ADDR_OF(pphcr_reg, out, bin_range[i]); 1577 1578 priv->fec_ranges[i].high = MLX5_GET(bin_range_layout, bin_range, 1579 high_val); 1580 priv->fec_ranges[i].low = MLX5_GET(bin_range_layout, bin_range, 1581 low_val); 1582 } 1583 *ranges = priv->fec_ranges; 1584 1585 return num_of_bins; 1586 } 1587 1588 static void fec_rs_histogram_fill_stats(struct mlx5e_priv *priv, 1589 u8 num_of_bins, 1590 struct ethtool_fec_hist *hist) 1591 { 1592 struct mlx5_core_dev *mdev = priv->mdev; 1593 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1594 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1595 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1596 void *rs_histogram_cntrs; 1597 1598 MLX5_SET(ppcnt_reg, in, local_port, 1); 1599 MLX5_SET(ppcnt_reg, in, grp, MLX5_RS_FEC_HISTOGRAM_GROUP); 1600 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0)) 1601 return; 1602 1603 rs_histogram_cntrs = MLX5_ADDR_OF(ppcnt_reg, out, 1604 counter_set.rs_histogram_cntrs); 1605 /* Guaranteed that num_of_bins is less than MLX5E_FEC_RS_HIST_MAX 1606 * by fec_rs_histogram_fill_ranges(). 1607 */ 1608 for (int i = 0; i < num_of_bins; i++) 1609 hist->values[i].sum = MLX5_GET64(rs_histogram_cntrs, 1610 rs_histogram_cntrs, 1611 hist[i]); 1612 } 1613 1614 static void fec_set_histograms_stats(struct mlx5e_priv *priv, int mode, 1615 struct ethtool_fec_hist *hist) 1616 { 1617 u8 num_of_bins; 1618 1619 switch (mode) { 1620 case MLX5E_FEC_RS_528_514: 1621 case MLX5E_FEC_RS_544_514: 1622 case MLX5E_FEC_LLRS_272_257_1: 1623 case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD: 1624 num_of_bins = 1625 fec_rs_histogram_fill_ranges(priv, mode, &hist->ranges); 1626 if (num_of_bins) 1627 return fec_rs_histogram_fill_stats(priv, num_of_bins, 1628 hist); 1629 break; 1630 default: 1631 return; 1632 } 1633 } 1634 1635 void mlx5e_stats_fec_get(struct mlx5e_priv *priv, 1636 struct ethtool_fec_stats *fec_stats, 1637 struct ethtool_fec_hist *hist) 1638 { 1639 int mode = fec_active_mode(priv->mdev); 1640 1641 if (mode == MLX5E_FEC_NOFEC) 1642 return; 1643 1644 if (MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) { 1645 fec_set_corrected_bits_total(priv, fec_stats); 1646 fec_set_block_stats(priv, mode, fec_stats); 1647 } 1648 1649 if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr)) 1650 fec_set_histograms_stats(priv, mode, hist); 1651 } 1652 1653 #define PPORT_ETH_EXT_OFF(c) \ 1654 MLX5_BYTE_OFF(ppcnt_reg, \ 1655 counter_set.eth_extended_cntrs_grp_data_layout.c##_high) 1656 static const struct counter_desc pport_eth_ext_stats_desc[] = { 1657 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, 1658 }; 1659 1660 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc) 1661 1662 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext) 1663 { 1664 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1665 return NUM_PPORT_ETH_EXT_COUNTERS; 1666 1667 return 0; 1668 } 1669 1670 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext) 1671 { 1672 int i; 1673 1674 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1675 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) 1676 ethtool_puts(data, pport_eth_ext_stats_desc[i].format); 1677 } 1678 1679 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext) 1680 { 1681 int i; 1682 1683 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1684 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) 1685 mlx5e_ethtool_put_stat( 1686 data, 1687 MLX5E_READ_CTR64_BE( 1688 &priv->stats.pport.eth_ext_counters, 1689 pport_eth_ext_stats_desc, i)); 1690 } 1691 1692 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext) 1693 { 1694 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1695 struct mlx5_core_dev *mdev = priv->mdev; 1696 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1697 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1698 void *out; 1699 1700 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) 1701 return; 1702 1703 MLX5_SET(ppcnt_reg, in, local_port, 1); 1704 out = pstats->eth_ext_counters; 1705 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 1706 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1707 } 1708 1709 #define PCIE_PERF_OFF(c) \ 1710 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) 1711 static const struct counter_desc pcie_perf_stats_desc[] = { 1712 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, 1713 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, 1714 }; 1715 1716 #define PCIE_PERF_OFF64(c) \ 1717 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) 1718 static const struct counter_desc pcie_perf_stats_desc64[] = { 1719 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, 1720 }; 1721 1722 static const struct counter_desc pcie_perf_stall_stats_desc[] = { 1723 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, 1724 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, 1725 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, 1726 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, 1727 }; 1728 1729 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc) 1730 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64) 1731 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc) 1732 1733 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie) 1734 { 1735 int num_stats = 0; 1736 1737 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1738 num_stats += NUM_PCIE_PERF_COUNTERS; 1739 1740 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1741 num_stats += NUM_PCIE_PERF_COUNTERS64; 1742 1743 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1744 num_stats += NUM_PCIE_PERF_STALL_COUNTERS; 1745 1746 return num_stats; 1747 } 1748 1749 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie) 1750 { 1751 int i; 1752 1753 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1754 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 1755 ethtool_puts(data, pcie_perf_stats_desc[i].format); 1756 1757 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1758 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) 1759 ethtool_puts(data, pcie_perf_stats_desc64[i].format); 1760 1761 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1762 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) 1763 ethtool_puts(data, 1764 pcie_perf_stall_stats_desc[i].format); 1765 } 1766 1767 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie) 1768 { 1769 int i; 1770 1771 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1772 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 1773 mlx5e_ethtool_put_stat( 1774 data, 1775 MLX5E_READ_CTR32_BE( 1776 &priv->stats.pcie.pcie_perf_counters, 1777 pcie_perf_stats_desc, i)); 1778 1779 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1780 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) 1781 mlx5e_ethtool_put_stat( 1782 data, 1783 MLX5E_READ_CTR64_BE( 1784 &priv->stats.pcie.pcie_perf_counters, 1785 pcie_perf_stats_desc64, i)); 1786 1787 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1788 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) 1789 mlx5e_ethtool_put_stat( 1790 data, 1791 MLX5E_READ_CTR32_BE( 1792 &priv->stats.pcie.pcie_perf_counters, 1793 pcie_perf_stall_stats_desc, i)); 1794 } 1795 1796 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie) 1797 { 1798 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; 1799 struct mlx5_core_dev *mdev = priv->mdev; 1800 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0}; 1801 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 1802 void *out; 1803 1804 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) 1805 return; 1806 1807 out = pcie_stats->pcie_perf_counters; 1808 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 1809 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 1810 } 1811 1812 #define PPORT_PER_TC_PRIO_OFF(c) \ 1813 MLX5_BYTE_OFF(ppcnt_reg, \ 1814 counter_set.eth_per_tc_prio_grp_data_layout.c##_high) 1815 1816 static const struct counter_desc pport_per_tc_prio_stats_desc[] = { 1817 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) }, 1818 }; 1819 1820 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc) 1821 1822 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \ 1823 MLX5_BYTE_OFF(ppcnt_reg, \ 1824 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high) 1825 1826 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = { 1827 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) }, 1828 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) }, 1829 }; 1830 1831 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \ 1832 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc) 1833 1834 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv) 1835 { 1836 struct mlx5_core_dev *mdev = priv->mdev; 1837 1838 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1839 return 0; 1840 1841 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO; 1842 } 1843 1844 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest) 1845 { 1846 struct mlx5_core_dev *mdev = priv->mdev; 1847 int i, prio; 1848 1849 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1850 return; 1851 1852 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1853 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) 1854 ethtool_sprintf(data, 1855 pport_per_tc_prio_stats_desc[i].format, 1856 prio); 1857 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++) 1858 ethtool_sprintf(data, 1859 pport_per_tc_congest_prio_stats_desc[i].format, 1860 prio); 1861 } 1862 } 1863 1864 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest) 1865 { 1866 struct mlx5e_pport_stats *pport = &priv->stats.pport; 1867 struct mlx5_core_dev *mdev = priv->mdev; 1868 int i, prio; 1869 1870 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1871 return; 1872 1873 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1874 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) 1875 mlx5e_ethtool_put_stat( 1876 data, 1877 MLX5E_READ_CTR64_BE( 1878 &pport->per_tc_prio_counters[prio], 1879 pport_per_tc_prio_stats_desc, i)); 1880 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++) 1881 mlx5e_ethtool_put_stat( 1882 data, 1883 MLX5E_READ_CTR64_BE( 1884 &pport->per_tc_congest_prio_counters 1885 [prio], 1886 pport_per_tc_congest_prio_stats_desc, 1887 i)); 1888 } 1889 } 1890 1891 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv) 1892 { 1893 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1894 struct mlx5_core_dev *mdev = priv->mdev; 1895 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1896 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1897 void *out; 1898 int prio; 1899 1900 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1901 return; 1902 1903 MLX5_SET(ppcnt_reg, in, pnat, 2); 1904 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP); 1905 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1906 out = pstats->per_tc_prio_counters[prio]; 1907 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1908 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1909 } 1910 } 1911 1912 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv) 1913 { 1914 struct mlx5_core_dev *mdev = priv->mdev; 1915 1916 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1917 return 0; 1918 1919 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO; 1920 } 1921 1922 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv) 1923 { 1924 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1925 struct mlx5_core_dev *mdev = priv->mdev; 1926 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1927 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1928 void *out; 1929 int prio; 1930 1931 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1932 return; 1933 1934 MLX5_SET(ppcnt_reg, in, pnat, 2); 1935 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP); 1936 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1937 out = pstats->per_tc_congest_prio_counters[prio]; 1938 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1939 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1940 } 1941 } 1942 1943 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest) 1944 { 1945 return mlx5e_grp_per_tc_prio_get_num_stats(priv) + 1946 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv); 1947 } 1948 1949 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest) 1950 { 1951 mlx5e_grp_per_tc_prio_update_stats(priv); 1952 mlx5e_grp_per_tc_congest_prio_update_stats(priv); 1953 } 1954 1955 #define PPORT_PER_PRIO_OFF(c) \ 1956 MLX5_BYTE_OFF(ppcnt_reg, \ 1957 counter_set.eth_per_prio_grp_data_layout.c##_high) 1958 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { 1959 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, 1960 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, 1961 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) }, 1962 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, 1963 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, 1964 }; 1965 1966 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc) 1967 1968 static int mlx5e_grp_per_prio_traffic_get_num_stats(void) 1969 { 1970 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO; 1971 } 1972 1973 static void mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv, 1974 u8 **data) 1975 { 1976 int i, prio; 1977 1978 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1979 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 1980 ethtool_sprintf(data, 1981 pport_per_prio_traffic_stats_desc[i].format, 1982 prio); 1983 } 1984 } 1985 1986 static void mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv, 1987 u64 **data) 1988 { 1989 int i, prio; 1990 1991 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1992 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 1993 mlx5e_ethtool_put_stat( 1994 data, 1995 MLX5E_READ_CTR64_BE( 1996 &priv->stats.pport 1997 .per_prio_counters[prio], 1998 pport_per_prio_traffic_stats_desc, i)); 1999 } 2000 } 2001 2002 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { 2003 /* %s is "global" or "prio{i}" */ 2004 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, 2005 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, 2006 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, 2007 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, 2008 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, 2009 }; 2010 2011 static const struct counter_desc pport_pfc_stall_stats_desc[] = { 2012 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, 2013 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, 2014 }; 2015 2016 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc) 2017 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \ 2018 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \ 2019 MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) 2020 2021 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) 2022 { 2023 struct mlx5_core_dev *mdev = priv->mdev; 2024 u8 pfc_en_tx; 2025 u8 pfc_en_rx; 2026 int err; 2027 2028 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 2029 return 0; 2030 2031 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx); 2032 2033 return err ? 0 : pfc_en_tx | pfc_en_rx; 2034 } 2035 2036 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) 2037 { 2038 struct mlx5_core_dev *mdev = priv->mdev; 2039 u32 rx_pause; 2040 u32 tx_pause; 2041 int err; 2042 2043 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 2044 return false; 2045 2046 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); 2047 2048 return err ? false : rx_pause | tx_pause; 2049 } 2050 2051 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv) 2052 { 2053 return (mlx5e_query_global_pause_combined(priv) + 2054 hweight8(mlx5e_query_pfc_combined(priv))) * 2055 NUM_PPORT_PER_PRIO_PFC_COUNTERS + 2056 NUM_PPORT_PFC_STALL_COUNTERS(priv); 2057 } 2058 2059 static void mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, 2060 u8 **data) 2061 { 2062 unsigned long pfc_combined; 2063 int i, prio; 2064 2065 pfc_combined = mlx5e_query_pfc_combined(priv); 2066 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 2067 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 2068 char pfc_string[ETH_GSTRING_LEN]; 2069 2070 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); 2071 ethtool_sprintf(data, 2072 pport_per_prio_pfc_stats_desc[i].format, 2073 pfc_string); 2074 } 2075 } 2076 2077 if (mlx5e_query_global_pause_combined(priv)) { 2078 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 2079 ethtool_sprintf(data, 2080 pport_per_prio_pfc_stats_desc[i].format, 2081 "global"); 2082 } 2083 } 2084 2085 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) 2086 ethtool_puts(data, pport_pfc_stall_stats_desc[i].format); 2087 } 2088 2089 static void mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, 2090 u64 **data) 2091 { 2092 unsigned long pfc_combined; 2093 int i, prio; 2094 2095 pfc_combined = mlx5e_query_pfc_combined(priv); 2096 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 2097 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 2098 mlx5e_ethtool_put_stat( 2099 data, 2100 MLX5E_READ_CTR64_BE( 2101 &priv->stats.pport 2102 .per_prio_counters[prio], 2103 pport_per_prio_pfc_stats_desc, i)); 2104 } 2105 } 2106 2107 if (mlx5e_query_global_pause_combined(priv)) { 2108 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 2109 mlx5e_ethtool_put_stat( 2110 data, 2111 MLX5E_READ_CTR64_BE( 2112 &priv->stats.pport.per_prio_counters[0], 2113 pport_per_prio_pfc_stats_desc, i)); 2114 } 2115 } 2116 2117 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) 2118 mlx5e_ethtool_put_stat( 2119 data, MLX5E_READ_CTR64_BE( 2120 &priv->stats.pport.per_prio_counters[0], 2121 pport_pfc_stall_stats_desc, i)); 2122 } 2123 2124 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio) 2125 { 2126 return mlx5e_grp_per_prio_traffic_get_num_stats() + 2127 mlx5e_grp_per_prio_pfc_get_num_stats(priv); 2128 } 2129 2130 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio) 2131 { 2132 mlx5e_grp_per_prio_traffic_fill_strings(priv, data); 2133 mlx5e_grp_per_prio_pfc_fill_strings(priv, data); 2134 } 2135 2136 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio) 2137 { 2138 mlx5e_grp_per_prio_traffic_fill_stats(priv, data); 2139 mlx5e_grp_per_prio_pfc_fill_stats(priv, data); 2140 } 2141 2142 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio) 2143 { 2144 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 2145 struct mlx5_core_dev *mdev = priv->mdev; 2146 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 2147 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 2148 int prio; 2149 void *out; 2150 2151 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 2152 return; 2153 2154 MLX5_SET(ppcnt_reg, in, local_port, 1); 2155 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 2156 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 2157 out = pstats->per_prio_counters[prio]; 2158 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 2159 mlx5_core_access_reg(mdev, in, sz, out, sz, 2160 MLX5_REG_PPCNT, 0, 0); 2161 } 2162 } 2163 2164 static const struct counter_desc mlx5e_pme_status_desc[] = { 2165 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED }, 2166 }; 2167 2168 static const struct counter_desc mlx5e_pme_error_desc[] = { 2169 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK }, 2170 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE }, 2171 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE }, 2172 }; 2173 2174 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) 2175 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc) 2176 2177 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme) 2178 { 2179 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS; 2180 } 2181 2182 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme) 2183 { 2184 int i; 2185 2186 for (i = 0; i < NUM_PME_STATUS_STATS; i++) 2187 ethtool_puts(data, mlx5e_pme_status_desc[i].format); 2188 2189 for (i = 0; i < NUM_PME_ERR_STATS; i++) 2190 ethtool_puts(data, mlx5e_pme_error_desc[i].format); 2191 } 2192 2193 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme) 2194 { 2195 struct mlx5_pme_stats pme_stats; 2196 int i; 2197 2198 mlx5_get_pme_stats(priv->mdev, &pme_stats); 2199 2200 for (i = 0; i < NUM_PME_STATUS_STATS; i++) 2201 mlx5e_ethtool_put_stat( 2202 data, MLX5E_READ_CTR64_CPU(pme_stats.status_counters, 2203 mlx5e_pme_status_desc, i)); 2204 2205 for (i = 0; i < NUM_PME_ERR_STATS; i++) 2206 mlx5e_ethtool_put_stat( 2207 data, MLX5E_READ_CTR64_CPU(pme_stats.error_counters, 2208 mlx5e_pme_error_desc, i)); 2209 } 2210 2211 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } 2212 2213 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) 2214 { 2215 return mlx5e_ktls_get_count(priv); 2216 } 2217 2218 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) 2219 { 2220 mlx5e_ktls_get_strings(priv, data); 2221 } 2222 2223 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) 2224 { 2225 mlx5e_ktls_get_stats(priv, data); 2226 } 2227 2228 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } 2229 2230 static const struct counter_desc rq_stats_desc[] = { 2231 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 2232 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 2233 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, 2234 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, 2235 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, 2236 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 2237 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 2238 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 2239 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, 2240 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 2241 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, 2242 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, 2243 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) }, 2244 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) }, 2245 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) }, 2246 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) }, 2247 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) }, 2248 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) }, 2249 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) }, 2250 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) }, 2251 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, 2252 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 2253 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, 2254 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 2255 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 2256 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 2257 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 2258 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 2259 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 2260 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, 2261 #ifdef CONFIG_MLX5_EN_ARFS 2262 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_add) }, 2263 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_in) }, 2264 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_out) }, 2265 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_expired) }, 2266 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, 2267 #endif 2268 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, 2269 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) }, 2270 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) }, 2271 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) }, 2272 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) }, 2273 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) }, 2274 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) }, 2275 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) }, 2276 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) }, 2277 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) }, 2278 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) }, 2279 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) }, 2280 #ifdef CONFIG_MLX5_EN_TLS 2281 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, 2282 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, 2283 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, 2284 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, 2285 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, 2286 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, 2287 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, 2288 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) }, 2289 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, 2290 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, 2291 #endif 2292 }; 2293 2294 static const struct counter_desc sq_stats_desc[] = { 2295 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, 2296 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, 2297 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 2298 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 2299 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 2300 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 2301 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 2302 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 2303 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 2304 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, 2305 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, timestamps) }, 2306 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, 2307 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, 2308 #ifdef CONFIG_MLX5_EN_TLS 2309 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, 2310 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 2311 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 2312 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 2313 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 2314 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, 2315 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, 2316 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, 2317 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, 2318 #endif 2319 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 2320 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, 2321 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, 2322 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 2323 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, 2324 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2325 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, 2326 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 2327 }; 2328 2329 static const struct counter_desc rq_xdpsq_stats_desc[] = { 2330 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 2331 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2332 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 2333 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, 2334 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 2335 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 2336 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2337 }; 2338 2339 static const struct counter_desc xdpsq_stats_desc[] = { 2340 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 2341 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2342 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 2343 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, 2344 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 2345 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 2346 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2347 }; 2348 2349 static const struct counter_desc xskrq_stats_desc[] = { 2350 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) }, 2351 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) }, 2352 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) }, 2353 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 2354 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 2355 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) }, 2356 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, 2357 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 2358 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, 2359 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 2360 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) }, 2361 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 2362 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 2363 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 2364 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 2365 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 2366 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 2367 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) }, 2368 }; 2369 2370 static const struct counter_desc xsksq_stats_desc[] = { 2371 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 2372 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2373 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 2374 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 2375 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 2376 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2377 }; 2378 2379 static const struct counter_desc ch_stats_desc[] = { 2380 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, 2381 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, 2382 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, 2383 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, 2384 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) }, 2385 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, 2386 }; 2387 2388 static const struct counter_desc ptp_sq_stats_desc[] = { 2389 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) }, 2390 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) }, 2391 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 2392 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 2393 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 2394 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) }, 2395 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 2396 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) }, 2397 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) }, 2398 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 2399 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) }, 2400 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2401 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) }, 2402 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 2403 }; 2404 2405 static const struct counter_desc ptp_ch_stats_desc[] = { 2406 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) }, 2407 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) }, 2408 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) }, 2409 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, 2410 }; 2411 2412 static const struct counter_desc ptp_cq_stats_desc[] = { 2413 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) }, 2414 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, 2415 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, 2416 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, 2417 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) }, 2418 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, lost_cqe) }, 2419 }; 2420 2421 static const struct counter_desc ptp_rq_stats_desc[] = { 2422 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) }, 2423 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) }, 2424 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) }, 2425 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, 2426 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, 2427 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 2428 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 2429 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) }, 2430 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, 2431 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 2432 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) }, 2433 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) }, 2434 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, 2435 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 2436 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) }, 2437 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 2438 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 2439 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 2440 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 2441 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 2442 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 2443 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) }, 2444 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) }, 2445 }; 2446 2447 static const struct counter_desc qos_sq_stats_desc[] = { 2448 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) }, 2449 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) }, 2450 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 2451 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 2452 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 2453 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 2454 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 2455 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 2456 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 2457 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) }, 2458 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, timestamps) }, 2459 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, 2460 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, 2461 #ifdef CONFIG_MLX5_EN_TLS 2462 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, 2463 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 2464 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 2465 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 2466 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 2467 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, 2468 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, 2469 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, 2470 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, 2471 #endif 2472 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 2473 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) }, 2474 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) }, 2475 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 2476 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) }, 2477 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2478 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) }, 2479 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 2480 }; 2481 2482 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) 2483 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) 2484 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) 2485 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) 2486 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc) 2487 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) 2488 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) 2489 #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc) 2490 #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc) 2491 #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc) 2492 #define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc) 2493 #define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc) 2494 2495 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos) 2496 { 2497 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2498 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs); 2499 } 2500 2501 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos) 2502 { 2503 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2504 u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs); 2505 int i, qid; 2506 2507 for (qid = 0; qid < max_qos_sqs; qid++) 2508 for (i = 0; i < NUM_QOS_SQ_STATS; i++) 2509 ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid); 2510 } 2511 2512 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) 2513 { 2514 struct mlx5e_sq_stats **stats; 2515 u16 max_qos_sqs; 2516 int i, qid; 2517 2518 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2519 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs); 2520 stats = READ_ONCE(priv->htb_qos_sq_stats); 2521 2522 for (qid = 0; qid < max_qos_sqs; qid++) { 2523 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]); 2524 2525 for (i = 0; i < NUM_QOS_SQ_STATS; i++) 2526 mlx5e_ethtool_put_stat( 2527 data, 2528 MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i)); 2529 } 2530 } 2531 2532 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; } 2533 2534 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp) 2535 { 2536 int num = NUM_PTP_CH_STATS; 2537 2538 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2539 return 0; 2540 2541 if (priv->tx_ptp_opened) 2542 num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc; 2543 if (priv->rx_ptp_opened) 2544 num += NUM_PTP_RQ_STATS; 2545 2546 return num; 2547 } 2548 2549 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) 2550 { 2551 int i, tc; 2552 2553 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2554 return; 2555 2556 for (i = 0; i < NUM_PTP_CH_STATS; i++) 2557 ethtool_puts(data, ptp_ch_stats_desc[i].format); 2558 2559 if (priv->tx_ptp_opened) { 2560 for (tc = 0; tc < priv->max_opened_tc; tc++) 2561 for (i = 0; i < NUM_PTP_SQ_STATS; i++) 2562 ethtool_sprintf(data, 2563 ptp_sq_stats_desc[i].format, 2564 tc); 2565 2566 for (tc = 0; tc < priv->max_opened_tc; tc++) 2567 for (i = 0; i < NUM_PTP_CQ_STATS; i++) 2568 ethtool_sprintf(data, 2569 ptp_cq_stats_desc[i].format, 2570 tc); 2571 } 2572 if (priv->rx_ptp_opened) { 2573 for (i = 0; i < NUM_PTP_RQ_STATS; i++) 2574 ethtool_puts(data, ptp_rq_stats_desc[i].format); 2575 } 2576 } 2577 2578 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) 2579 { 2580 int i, tc; 2581 2582 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2583 return; 2584 2585 for (i = 0; i < NUM_PTP_CH_STATS; i++) 2586 mlx5e_ethtool_put_stat( 2587 data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch, 2588 ptp_ch_stats_desc, i)); 2589 2590 if (priv->tx_ptp_opened) { 2591 for (tc = 0; tc < priv->max_opened_tc; tc++) 2592 for (i = 0; i < NUM_PTP_SQ_STATS; i++) 2593 mlx5e_ethtool_put_stat( 2594 data, MLX5E_READ_CTR64_CPU( 2595 &priv->ptp_stats.sq[tc], 2596 ptp_sq_stats_desc, i)); 2597 2598 for (tc = 0; tc < priv->max_opened_tc; tc++) 2599 for (i = 0; i < NUM_PTP_CQ_STATS; i++) 2600 mlx5e_ethtool_put_stat( 2601 data, MLX5E_READ_CTR64_CPU( 2602 &priv->ptp_stats.cq[tc], 2603 ptp_cq_stats_desc, i)); 2604 } 2605 if (priv->rx_ptp_opened) { 2606 for (i = 0; i < NUM_PTP_RQ_STATS; i++) 2607 mlx5e_ethtool_put_stat( 2608 data, 2609 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq, 2610 ptp_rq_stats_desc, i)); 2611 } 2612 } 2613 2614 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; } 2615 2616 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) 2617 { 2618 int max_nch = priv->stats_nch; 2619 2620 return (NUM_RQ_STATS * max_nch) + 2621 (NUM_CH_STATS * max_nch) + 2622 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + 2623 (NUM_RQ_XDPSQ_STATS * max_nch) + 2624 (NUM_XDPSQ_STATS * max_nch) + 2625 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) + 2626 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used); 2627 } 2628 2629 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels) 2630 { 2631 bool is_xsk = priv->xsk.ever_used; 2632 int max_nch = priv->stats_nch; 2633 int i, j, tc; 2634 2635 for (i = 0; i < max_nch; i++) 2636 for (j = 0; j < NUM_CH_STATS; j++) 2637 ethtool_sprintf(data, ch_stats_desc[j].format, i); 2638 2639 for (i = 0; i < max_nch; i++) { 2640 for (j = 0; j < NUM_RQ_STATS; j++) 2641 ethtool_sprintf(data, rq_stats_desc[j].format, i); 2642 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) 2643 ethtool_sprintf(data, xskrq_stats_desc[j].format, i); 2644 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 2645 ethtool_sprintf(data, rq_xdpsq_stats_desc[j].format, i); 2646 } 2647 2648 for (tc = 0; tc < priv->max_opened_tc; tc++) 2649 for (i = 0; i < max_nch; i++) 2650 for (j = 0; j < NUM_SQ_STATS; j++) 2651 ethtool_sprintf(data, sq_stats_desc[j].format, 2652 i + tc * max_nch); 2653 2654 for (i = 0; i < max_nch; i++) { 2655 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) 2656 ethtool_sprintf(data, xsksq_stats_desc[j].format, i); 2657 for (j = 0; j < NUM_XDPSQ_STATS; j++) 2658 ethtool_sprintf(data, xdpsq_stats_desc[j].format, i); 2659 } 2660 } 2661 2662 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) 2663 { 2664 bool is_xsk = priv->xsk.ever_used; 2665 int max_nch = priv->stats_nch; 2666 int i, j, tc; 2667 2668 for (i = 0; i < max_nch; i++) 2669 for (j = 0; j < NUM_CH_STATS; j++) 2670 mlx5e_ethtool_put_stat( 2671 data, MLX5E_READ_CTR64_CPU( 2672 &priv->channel_stats[i]->ch, 2673 ch_stats_desc, j)); 2674 2675 for (i = 0; i < max_nch; i++) { 2676 for (j = 0; j < NUM_RQ_STATS; j++) 2677 mlx5e_ethtool_put_stat( 2678 data, MLX5E_READ_CTR64_CPU( 2679 &priv->channel_stats[i]->rq, 2680 rq_stats_desc, j)); 2681 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) 2682 mlx5e_ethtool_put_stat( 2683 data, MLX5E_READ_CTR64_CPU( 2684 &priv->channel_stats[i]->xskrq, 2685 xskrq_stats_desc, j)); 2686 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 2687 mlx5e_ethtool_put_stat( 2688 data, MLX5E_READ_CTR64_CPU( 2689 &priv->channel_stats[i]->rq_xdpsq, 2690 rq_xdpsq_stats_desc, j)); 2691 } 2692 2693 for (tc = 0; tc < priv->max_opened_tc; tc++) 2694 for (i = 0; i < max_nch; i++) 2695 for (j = 0; j < NUM_SQ_STATS; j++) 2696 mlx5e_ethtool_put_stat( 2697 data, 2698 MLX5E_READ_CTR64_CPU( 2699 &priv->channel_stats[i]->sq[tc], 2700 sq_stats_desc, j)); 2701 2702 for (i = 0; i < max_nch; i++) { 2703 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) 2704 mlx5e_ethtool_put_stat( 2705 data, MLX5E_READ_CTR64_CPU( 2706 &priv->channel_stats[i]->xsksq, 2707 xsksq_stats_desc, j)); 2708 for (j = 0; j < NUM_XDPSQ_STATS; j++) 2709 mlx5e_ethtool_put_stat( 2710 data, MLX5E_READ_CTR64_CPU( 2711 &priv->channel_stats[i]->xdpsq, 2712 xdpsq_stats_desc, j)); 2713 } 2714 } 2715 2716 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; } 2717 2718 MLX5E_DEFINE_STATS_GRP(sw, 0); 2719 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS); 2720 MLX5E_DEFINE_STATS_GRP(vnic_env, 0); 2721 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS); 2722 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS); 2723 MLX5E_DEFINE_STATS_GRP(2863, 0); 2724 MLX5E_DEFINE_STATS_GRP(2819, 0); 2725 MLX5E_DEFINE_STATS_GRP(phy, 0); 2726 MLX5E_DEFINE_STATS_GRP(pcie, 0); 2727 MLX5E_DEFINE_STATS_GRP(per_prio, 0); 2728 MLX5E_DEFINE_STATS_GRP(pme, 0); 2729 MLX5E_DEFINE_STATS_GRP(channels, 0); 2730 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); 2731 MLX5E_DEFINE_STATS_GRP(eth_ext, 0); 2732 static MLX5E_DEFINE_STATS_GRP(tls, 0); 2733 MLX5E_DEFINE_STATS_GRP(ptp, 0); 2734 static MLX5E_DEFINE_STATS_GRP(qos, 0); 2735 2736 /* The stats groups order is opposite to the update_stats() order calls */ 2737 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { 2738 &MLX5E_STATS_GRP(sw), 2739 &MLX5E_STATS_GRP(qcnt), 2740 &MLX5E_STATS_GRP(vnic_env), 2741 &MLX5E_STATS_GRP(vport), 2742 &MLX5E_STATS_GRP(802_3), 2743 &MLX5E_STATS_GRP(2863), 2744 &MLX5E_STATS_GRP(2819), 2745 &MLX5E_STATS_GRP(phy), 2746 &MLX5E_STATS_GRP(eth_ext), 2747 &MLX5E_STATS_GRP(pcie), 2748 &MLX5E_STATS_GRP(per_prio), 2749 &MLX5E_STATS_GRP(pme), 2750 #ifdef CONFIG_MLX5_EN_IPSEC 2751 &MLX5E_STATS_GRP(ipsec_hw), 2752 &MLX5E_STATS_GRP(ipsec_sw), 2753 #endif 2754 &MLX5E_STATS_GRP(tls), 2755 &MLX5E_STATS_GRP(channels), 2756 &MLX5E_STATS_GRP(per_port_buff_congest), 2757 &MLX5E_STATS_GRP(ptp), 2758 &MLX5E_STATS_GRP(qos), 2759 #ifdef CONFIG_MLX5_MACSEC 2760 &MLX5E_STATS_GRP(macsec_hw), 2761 #endif 2762 &MLX5E_STATS_GRP(pcie_cong), 2763 }; 2764 2765 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) 2766 { 2767 return ARRAY_SIZE(mlx5e_nic_stats_grps); 2768 } 2769