1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "health.h"
5 #include "en/ptp.h"
6 #include "en/devlink.h"
7 #include "lib/tout.h"
8
9 /* Keep this string array consistent with the MLX5E_SQ_STATE_* enums in en.h */
10 static const char * const sq_sw_state_type_name[] = {
11 [MLX5E_SQ_STATE_ENABLED] = "enabled",
12 [MLX5E_SQ_STATE_MPWQE] = "mpwqe",
13 [MLX5E_SQ_STATE_RECOVERING] = "recovering",
14 [MLX5E_SQ_STATE_IPSEC] = "ipsec",
15 [MLX5E_SQ_STATE_DIM] = "dim",
16 [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
17 [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
18 [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
19 [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
20 };
21
mlx5e_wait_for_sq_flush(struct mlx5e_txqsq * sq)22 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
23 {
24 struct mlx5_core_dev *dev = sq->mdev;
25 unsigned long exp_time;
26
27 exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
28
29 while (time_before(jiffies, exp_time)) {
30 if (sq->cc == sq->pc)
31 return 0;
32
33 msleep(20);
34 }
35
36 netdev_err(sq->netdev,
37 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
38 sq->sqn, sq->cc, sq->pc);
39
40 return -ETIMEDOUT;
41 }
42
mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq * sq)43 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
44 {
45 WARN_ONCE(sq->cc != sq->pc,
46 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
47 sq->sqn, sq->cc, sq->pc);
48 sq->cc = 0;
49 sq->dma_fifo_cc = 0;
50 sq->pc = 0;
51 }
52
mlx5e_health_sq_put_sw_state(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq)53 static void mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
54 {
55 int i;
56
57 BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
58 "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
59 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
60
61 for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i)
62 devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
63 test_bit(i, &sq->state));
64
65 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
66 }
67
mlx5e_tx_reporter_err_cqe_recover(void * ctx)68 static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
69 {
70 struct mlx5_core_dev *mdev;
71 struct net_device *dev;
72 struct mlx5e_txqsq *sq;
73 u8 state;
74 int err;
75
76 sq = ctx;
77 mdev = sq->mdev;
78 dev = sq->netdev;
79
80 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
81 return 0;
82
83 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
84 if (err) {
85 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
86 sq->sqn, err);
87 goto out;
88 }
89
90 if (state != MLX5_SQC_STATE_ERR)
91 goto out;
92
93 mlx5e_tx_disable_queue(sq->txq);
94
95 err = mlx5e_wait_for_sq_flush(sq);
96 if (err)
97 goto out;
98
99 /* At this point, no new packets will arrive from the stack as TXQ is
100 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
101 * pending WQEs. SQ can safely reset the SQ.
102 */
103
104 err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn);
105 if (err)
106 goto out;
107
108 mlx5e_reset_txqsq_cc_pc(sq);
109 sq->stats->recover++;
110 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
111 mlx5e_activate_txqsq(sq);
112 if (sq->channel)
113 mlx5e_trigger_napi_icosq(sq->channel);
114 else
115 mlx5e_trigger_napi_sched(sq->cq.napi);
116
117 return 0;
118 out:
119 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
120 return err;
121 }
122
123 struct mlx5e_tx_timeout_ctx {
124 struct mlx5e_txqsq *sq;
125 signed int status;
126 };
127
mlx5e_tx_reporter_timeout_recover(void * ctx)128 static int mlx5e_tx_reporter_timeout_recover(void *ctx)
129 {
130 struct mlx5e_tx_timeout_ctx *to_ctx;
131 struct mlx5e_priv *priv;
132 struct mlx5_eq_comp *eq;
133 struct mlx5e_txqsq *sq;
134 int err;
135
136 to_ctx = ctx;
137 sq = to_ctx->sq;
138 eq = sq->cq.mcq.eq;
139 priv = sq->priv;
140 err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
141 if (!err) {
142 to_ctx->status = 0; /* this sq recovered */
143 return err;
144 }
145
146 err = mlx5e_safe_reopen_channels(priv);
147 if (!err) {
148 to_ctx->status = 1; /* all channels recovered */
149 return err;
150 }
151
152 to_ctx->status = err;
153 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
154 netdev_err(priv->netdev,
155 "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
156 err);
157
158 return err;
159 }
160
mlx5e_tx_reporter_ptpsq_unhealthy_recover(void * ctx)161 static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
162 {
163 struct mlx5e_ptpsq *ptpsq = ctx;
164 struct mlx5e_channels *chs;
165 struct net_device *netdev;
166 struct mlx5e_priv *priv;
167 int carrier_ok;
168 int err;
169
170 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state))
171 return 0;
172
173 priv = ptpsq->txqsq.priv;
174
175 mutex_lock(&priv->state_lock);
176 chs = &priv->channels;
177 netdev = priv->netdev;
178
179 carrier_ok = netif_carrier_ok(netdev);
180 netif_carrier_off(netdev);
181
182 mlx5e_deactivate_priv_channels(priv);
183
184 mlx5e_ptp_close(chs->ptp);
185 err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
186
187 mlx5e_activate_priv_channels(priv);
188
189 /* return carrier back if needed */
190 if (carrier_ok)
191 netif_carrier_on(netdev);
192
193 mutex_unlock(&priv->state_lock);
194
195 return err;
196 }
197
198 /* state lock cannot be grabbed within this function.
199 * It can cause a dead lock or a read-after-free.
200 */
mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx * err_ctx)201 static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
202 {
203 return err_ctx->recover(err_ctx->ctx);
204 }
205
mlx5e_tx_reporter_recover(struct devlink_health_reporter * reporter,void * context,struct netlink_ext_ack * extack)206 static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
207 void *context,
208 struct netlink_ext_ack *extack)
209 {
210 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
211 struct mlx5e_err_ctx *err_ctx = context;
212
213 return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
214 mlx5e_health_recover_channels(priv);
215 }
216
217 static void
mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq,int tc)218 mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
219 struct mlx5e_txqsq *sq, int tc)
220 {
221 bool stopped = netif_xmit_stopped(sq->txq);
222 struct mlx5e_priv *priv = sq->priv;
223 u8 state;
224 int err;
225
226 devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
227 devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
228 devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
229
230 err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
231 if (!err)
232 devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
233
234 devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
235 devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
236 devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
237 mlx5e_health_sq_put_sw_state(fmsg, sq);
238 mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
239 mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
240 }
241
242 static void
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq,int tc)243 mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
244 struct mlx5e_txqsq *sq, int tc)
245 {
246 devlink_fmsg_obj_nest_start(fmsg);
247 devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
248 mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
249 devlink_fmsg_obj_nest_end(fmsg);
250 }
251
252 static void
mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg * fmsg,struct mlx5e_ptpsq * ptpsq,int tc)253 mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
254 struct mlx5e_ptpsq *ptpsq, int tc)
255 {
256 devlink_fmsg_obj_nest_start(fmsg);
257 devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
258 mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
259 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
260 mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
261 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
262 devlink_fmsg_obj_nest_end(fmsg);
263 }
264
265 static void
mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * txqsq)266 mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
267 struct mlx5e_txqsq *txqsq)
268 {
269 bool real_time = mlx5_is_real_time_sq(txqsq->mdev);
270 u32 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
271 u32 sq_stride = MLX5_SEND_WQE_BB;
272
273 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
274 devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
275 devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
276 devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
277 mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
278 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
279 }
280
281 static void
mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg * fmsg,struct mlx5e_ptpsq * ptpsq)282 mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
283 struct mlx5e_ptpsq *ptpsq)
284 {
285 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
286 mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
287 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
288 }
289
290 static void
mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg)291 mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
292 struct devlink_fmsg *fmsg)
293 {
294 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
295 struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
296 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
297 struct mlx5e_ptpsq *generic_ptpsq;
298
299 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
300 mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
301
302 if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
303 goto out;
304
305 generic_ptpsq = &ptp_ch->ptpsq[0];
306 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
307 mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
308 mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
309 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
310 out:
311 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
312 }
313
mlx5e_tx_reporter_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,struct netlink_ext_ack * extack)314 static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
315 struct devlink_fmsg *fmsg,
316 struct netlink_ext_ack *extack)
317 {
318 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
319 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
320
321 int i, tc;
322
323 mutex_lock(&priv->state_lock);
324
325 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
326 goto unlock;
327
328 mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
329 devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
330
331 for (i = 0; i < priv->channels.num; i++) {
332 struct mlx5e_channel *c = priv->channels.c[i];
333
334 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
335 struct mlx5e_txqsq *sq = &c->sq[tc];
336
337 mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
338 }
339 }
340
341 if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
342 goto close_sqs_nest;
343
344 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++)
345 mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
346 &ptp_ch->ptpsq[tc],
347 tc);
348
349 close_sqs_nest:
350 devlink_fmsg_arr_pair_nest_end(fmsg);
351 unlock:
352 mutex_unlock(&priv->state_lock);
353 return 0;
354 }
355
mlx5e_tx_reporter_dump_sq(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)356 static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
357 void *ctx)
358 {
359 struct mlx5_rsc_key key = {};
360 struct mlx5e_txqsq *sq = ctx;
361
362 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
363 return 0;
364
365 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
366 key.size = PAGE_SIZE;
367 key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
368 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
369 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
370
371 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
372 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
373 key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
374 key.index1 = sq->sqn;
375 key.num_of_obj1 = 1;
376 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
377 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
378
379 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
380 key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
381 key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
382 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
383 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
384
385 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
386
387 return 0;
388 }
389
mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)390 static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
391 void *ctx)
392 {
393 struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
394
395 return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
396 }
397
mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)398 static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv,
399 struct devlink_fmsg *fmsg,
400 void *ctx)
401 {
402 struct mlx5e_ptpsq *ptpsq = ctx;
403
404 return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq);
405 }
406
mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg)407 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
408 struct devlink_fmsg *fmsg)
409 {
410 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
411 struct mlx5_rsc_key key = {};
412 int i, tc;
413
414 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
415 return 0;
416
417 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
418 key.size = PAGE_SIZE;
419 key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
420 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
421 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
422 devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
423
424 for (i = 0; i < priv->channels.num; i++) {
425 struct mlx5e_channel *c = priv->channels.c[i];
426
427 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
428 struct mlx5e_txqsq *sq = &c->sq[tc];
429
430 mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
431 }
432 }
433
434 if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
435 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
436 struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
437
438 mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
439 }
440 }
441
442 devlink_fmsg_arr_pair_nest_end(fmsg);
443 return 0;
444 }
445
mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv * priv,struct mlx5e_err_ctx * err_ctx,struct devlink_fmsg * fmsg)446 static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
447 struct mlx5e_err_ctx *err_ctx,
448 struct devlink_fmsg *fmsg)
449 {
450 return err_ctx->dump(priv, fmsg, err_ctx->ctx);
451 }
452
mlx5e_tx_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * context,struct netlink_ext_ack * extack)453 static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
454 struct devlink_fmsg *fmsg, void *context,
455 struct netlink_ext_ack *extack)
456 {
457 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
458 struct mlx5e_err_ctx *err_ctx = context;
459
460 return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
461 mlx5e_tx_reporter_dump_all_sqs(priv, fmsg);
462 }
463
mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq * sq)464 void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
465 {
466 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
467 struct mlx5e_priv *priv = sq->priv;
468 struct mlx5e_err_ctx err_ctx = {};
469
470 err_ctx.ctx = sq;
471 err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
472 err_ctx.dump = mlx5e_tx_reporter_dump_sq;
473 snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn);
474
475 mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
476 }
477
mlx5e_reporter_tx_timeout(struct mlx5e_txqsq * sq)478 int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
479 {
480 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
481 struct mlx5e_tx_timeout_ctx to_ctx = {};
482 struct mlx5e_priv *priv = sq->priv;
483 struct mlx5e_err_ctx err_ctx = {};
484
485 to_ctx.sq = sq;
486 err_ctx.ctx = &to_ctx;
487 err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
488 err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
489 snprintf(err_str, sizeof(err_str),
490 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
491 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
492 jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start)));
493
494 mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
495 return to_ctx.status;
496 }
497
mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq * ptpsq)498 void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
499 {
500 struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
501 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
502 struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
503 struct mlx5e_cq *ts_cq = &ptpsq->ts_cq;
504 struct mlx5e_priv *priv = txqsq->priv;
505 struct mlx5e_err_ctx err_ctx = {};
506
507 err_ctx.ctx = ptpsq;
508 err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover;
509 err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump;
510 snprintf(err_str, sizeof(err_str),
511 "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u",
512 txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
513
514 mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
515 }
516
517 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
518 .name = "tx",
519 .recover = mlx5e_tx_reporter_recover,
520 .diagnose = mlx5e_tx_reporter_diagnose,
521 .dump = mlx5e_tx_reporter_dump,
522 };
523
524 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
525
mlx5e_reporter_tx_create(struct mlx5e_priv * priv)526 void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
527 {
528 struct devlink_health_reporter *reporter;
529
530 reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
531 &mlx5_tx_reporter_ops,
532 MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
533 if (IS_ERR(reporter)) {
534 netdev_warn(priv->netdev,
535 "Failed to create tx reporter, err = %ld\n",
536 PTR_ERR(reporter));
537 return;
538 }
539 priv->tx_reporter = reporter;
540 }
541
mlx5e_reporter_tx_destroy(struct mlx5e_priv * priv)542 void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
543 {
544 if (!priv->tx_reporter)
545 return;
546
547 devlink_health_reporter_destroy(priv->tx_reporter);
548 priv->tx_reporter = NULL;
549 }
550