1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include<linux/bitfield.h>
9
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
14
15 #define DRV_NAME "octeontx2-af"
16
rvu_report_pair_start(struct devlink_fmsg * fmsg,const char * name)17 static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
18 {
19 int err;
20
21 err = devlink_fmsg_pair_nest_start(fmsg, name);
22 if (err)
23 return err;
24
25 return devlink_fmsg_obj_nest_start(fmsg);
26 }
27
rvu_report_pair_end(struct devlink_fmsg * fmsg)28 static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
29 {
30 int err;
31
32 err = devlink_fmsg_obj_nest_end(fmsg);
33 if (err)
34 return err;
35
36 return devlink_fmsg_pair_nest_end(fmsg);
37 }
38
rvu_common_request_irq(struct rvu * rvu,int offset,const char * name,irq_handler_t fn)39 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
40 const char *name, irq_handler_t fn)
41 {
42 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
43 int rc;
44
45 sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
46 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
47 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
48 if (rc)
49 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
50 else
51 rvu->irq_allocated[offset] = true;
52
53 return rvu->irq_allocated[offset];
54 }
55
rvu_nix_intr_work(struct work_struct * work)56 static void rvu_nix_intr_work(struct work_struct *work)
57 {
58 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
59
60 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
61 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
62 "NIX_AF_RVU Error",
63 rvu_nix_health_reporter->nix_event_ctx);
64 }
65
rvu_nix_af_rvu_intr_handler(int irq,void * rvu_irq)66 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
67 {
68 struct rvu_nix_event_ctx *nix_event_context;
69 struct rvu_devlink *rvu_dl = rvu_irq;
70 struct rvu *rvu;
71 int blkaddr;
72 u64 intr;
73
74 rvu = rvu_dl->rvu;
75 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
76 if (blkaddr < 0)
77 return IRQ_NONE;
78
79 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
80 intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
81 nix_event_context->nix_af_rvu_int = intr;
82
83 /* Clear interrupts */
84 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
85 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
86 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
87
88 return IRQ_HANDLED;
89 }
90
rvu_nix_gen_work(struct work_struct * work)91 static void rvu_nix_gen_work(struct work_struct *work)
92 {
93 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
94
95 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
96 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
97 "NIX_AF_GEN Error",
98 rvu_nix_health_reporter->nix_event_ctx);
99 }
100
rvu_nix_af_rvu_gen_handler(int irq,void * rvu_irq)101 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
102 {
103 struct rvu_nix_event_ctx *nix_event_context;
104 struct rvu_devlink *rvu_dl = rvu_irq;
105 struct rvu *rvu;
106 int blkaddr;
107 u64 intr;
108
109 rvu = rvu_dl->rvu;
110 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
111 if (blkaddr < 0)
112 return IRQ_NONE;
113
114 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
115 intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
116 nix_event_context->nix_af_rvu_gen = intr;
117
118 /* Clear interrupts */
119 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
120 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
121 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
122
123 return IRQ_HANDLED;
124 }
125
rvu_nix_err_work(struct work_struct * work)126 static void rvu_nix_err_work(struct work_struct *work)
127 {
128 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
129
130 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
131 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
132 "NIX_AF_ERR Error",
133 rvu_nix_health_reporter->nix_event_ctx);
134 }
135
rvu_nix_af_rvu_err_handler(int irq,void * rvu_irq)136 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
137 {
138 struct rvu_nix_event_ctx *nix_event_context;
139 struct rvu_devlink *rvu_dl = rvu_irq;
140 struct rvu *rvu;
141 int blkaddr;
142 u64 intr;
143
144 rvu = rvu_dl->rvu;
145 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
146 if (blkaddr < 0)
147 return IRQ_NONE;
148
149 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
150 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
151 nix_event_context->nix_af_rvu_err = intr;
152
153 /* Clear interrupts */
154 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
155 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
156 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
157
158 return IRQ_HANDLED;
159 }
160
rvu_nix_ras_work(struct work_struct * work)161 static void rvu_nix_ras_work(struct work_struct *work)
162 {
163 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
164
165 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
166 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
167 "NIX_AF_RAS Error",
168 rvu_nix_health_reporter->nix_event_ctx);
169 }
170
rvu_nix_af_rvu_ras_handler(int irq,void * rvu_irq)171 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
172 {
173 struct rvu_nix_event_ctx *nix_event_context;
174 struct rvu_devlink *rvu_dl = rvu_irq;
175 struct rvu *rvu;
176 int blkaddr;
177 u64 intr;
178
179 rvu = rvu_dl->rvu;
180 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
181 if (blkaddr < 0)
182 return IRQ_NONE;
183
184 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
185 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
186 nix_event_context->nix_af_rvu_ras = intr;
187
188 /* Clear interrupts */
189 rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
190 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
191 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
192
193 return IRQ_HANDLED;
194 }
195
rvu_nix_unregister_interrupts(struct rvu * rvu)196 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
197 {
198 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
199 int offs, i, blkaddr;
200
201 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
202 if (blkaddr < 0)
203 return;
204
205 offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
206 if (!offs)
207 return;
208
209 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
210 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
211 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
212 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
213
214 if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
215 free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
216 rvu_dl);
217 rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
218 }
219
220 for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
221 if (rvu->irq_allocated[offs + i]) {
222 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
223 rvu->irq_allocated[offs + i] = false;
224 }
225 }
226
rvu_nix_register_interrupts(struct rvu * rvu)227 static int rvu_nix_register_interrupts(struct rvu *rvu)
228 {
229 int blkaddr, base;
230 bool rc;
231
232 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
233 if (blkaddr < 0)
234 return blkaddr;
235
236 /* Get NIX AF MSIX vectors offset. */
237 base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
238 if (!base) {
239 dev_warn(rvu->dev,
240 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
241 blkaddr - BLKADDR_NIX0);
242 return 0;
243 }
244 /* Register and enable NIX_AF_RVU_INT interrupt */
245 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
246 "NIX_AF_RVU_INT",
247 rvu_nix_af_rvu_intr_handler);
248 if (!rc)
249 goto err;
250 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
251
252 /* Register and enable NIX_AF_GEN_INT interrupt */
253 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
254 "NIX_AF_GEN_INT",
255 rvu_nix_af_rvu_gen_handler);
256 if (!rc)
257 goto err;
258 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
259
260 /* Register and enable NIX_AF_ERR_INT interrupt */
261 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
262 "NIX_AF_ERR_INT",
263 rvu_nix_af_rvu_err_handler);
264 if (!rc)
265 goto err;
266 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
267
268 /* Register and enable NIX_AF_RAS interrupt */
269 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
270 "NIX_AF_RAS",
271 rvu_nix_af_rvu_ras_handler);
272 if (!rc)
273 goto err;
274 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
275
276 return 0;
277 err:
278 rvu_nix_unregister_interrupts(rvu);
279 return rc;
280 }
281
rvu_nix_report_show(struct devlink_fmsg * fmsg,void * ctx,enum nix_af_rvu_health health_reporter)282 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
283 enum nix_af_rvu_health health_reporter)
284 {
285 struct rvu_nix_event_ctx *nix_event_context;
286 u64 intr_val;
287 int err;
288
289 nix_event_context = ctx;
290 switch (health_reporter) {
291 case NIX_AF_RVU_INTR:
292 intr_val = nix_event_context->nix_af_rvu_int;
293 err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
294 if (err)
295 return err;
296 err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
297 nix_event_context->nix_af_rvu_int);
298 if (err)
299 return err;
300 if (intr_val & BIT_ULL(0)) {
301 err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
302 if (err)
303 return err;
304 }
305 err = rvu_report_pair_end(fmsg);
306 if (err)
307 return err;
308 break;
309 case NIX_AF_RVU_GEN:
310 intr_val = nix_event_context->nix_af_rvu_gen;
311 err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
312 if (err)
313 return err;
314 err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
315 nix_event_context->nix_af_rvu_gen);
316 if (err)
317 return err;
318 if (intr_val & BIT_ULL(0)) {
319 err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
320 if (err)
321 return err;
322 }
323 if (intr_val & BIT_ULL(1)) {
324 err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
325 if (err)
326 return err;
327 }
328 if (intr_val & BIT_ULL(4)) {
329 err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
330 if (err)
331 return err;
332 }
333 err = rvu_report_pair_end(fmsg);
334 if (err)
335 return err;
336 break;
337 case NIX_AF_RVU_ERR:
338 intr_val = nix_event_context->nix_af_rvu_err;
339 err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
340 if (err)
341 return err;
342 err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
343 nix_event_context->nix_af_rvu_err);
344 if (err)
345 return err;
346 if (intr_val & BIT_ULL(14)) {
347 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
348 if (err)
349 return err;
350 }
351 if (intr_val & BIT_ULL(13)) {
352 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
353 if (err)
354 return err;
355 }
356 if (intr_val & BIT_ULL(12)) {
357 err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
358 if (err)
359 return err;
360 }
361 if (intr_val & BIT_ULL(6)) {
362 err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
363 if (err)
364 return err;
365 }
366 if (intr_val & BIT_ULL(5)) {
367 err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
368 if (err)
369 return err;
370 }
371 if (intr_val & BIT_ULL(4)) {
372 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
373 if (err)
374 return err;
375 }
376 if (intr_val & BIT_ULL(3)) {
377 err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
378 if (err)
379 return err;
380 }
381 if (intr_val & BIT_ULL(2)) {
382 err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
383 if (err)
384 return err;
385 }
386 if (intr_val & BIT_ULL(1)) {
387 err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
388 if (err)
389 return err;
390 }
391 if (intr_val & BIT_ULL(0)) {
392 err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
393 if (err)
394 return err;
395 }
396 err = rvu_report_pair_end(fmsg);
397 if (err)
398 return err;
399 break;
400 case NIX_AF_RVU_RAS:
401 intr_val = nix_event_context->nix_af_rvu_err;
402 err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
403 if (err)
404 return err;
405 err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
406 nix_event_context->nix_af_rvu_err);
407 if (err)
408 return err;
409 err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
410 if (err)
411 return err;
412 if (intr_val & BIT_ULL(34)) {
413 err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
414 if (err)
415 return err;
416 }
417 if (intr_val & BIT_ULL(33)) {
418 err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
419 if (err)
420 return err;
421 }
422 if (intr_val & BIT_ULL(32)) {
423 err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
424 if (err)
425 return err;
426 }
427 if (intr_val & BIT_ULL(4)) {
428 err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
429 if (err)
430 return err;
431 }
432 if (intr_val & BIT_ULL(3)) {
433 err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
434
435 if (err)
436 return err;
437 }
438 if (intr_val & BIT_ULL(2)) {
439 err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
440 if (err)
441 return err;
442 }
443 if (intr_val & BIT_ULL(1)) {
444 err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
445 if (err)
446 return err;
447 }
448 if (intr_val & BIT_ULL(0)) {
449 err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
450 if (err)
451 return err;
452 }
453 err = rvu_report_pair_end(fmsg);
454 if (err)
455 return err;
456 break;
457 default:
458 return -EINVAL;
459 }
460
461 return 0;
462 }
463
rvu_hw_nix_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)464 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
465 struct devlink_fmsg *fmsg, void *ctx,
466 struct netlink_ext_ack *netlink_extack)
467 {
468 struct rvu *rvu = devlink_health_reporter_priv(reporter);
469 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
470 struct rvu_nix_event_ctx *nix_ctx;
471
472 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
473
474 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
475 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
476 }
477
rvu_hw_nix_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)478 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
479 void *ctx, struct netlink_ext_ack *netlink_extack)
480 {
481 struct rvu *rvu = devlink_health_reporter_priv(reporter);
482 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
483 int blkaddr;
484
485 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
486 if (blkaddr < 0)
487 return blkaddr;
488
489 if (nix_event_ctx->nix_af_rvu_int)
490 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
491
492 return 0;
493 }
494
rvu_hw_nix_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)495 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
496 struct devlink_fmsg *fmsg, void *ctx,
497 struct netlink_ext_ack *netlink_extack)
498 {
499 struct rvu *rvu = devlink_health_reporter_priv(reporter);
500 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
501 struct rvu_nix_event_ctx *nix_ctx;
502
503 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
504
505 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
506 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
507 }
508
rvu_hw_nix_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)509 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
510 void *ctx, struct netlink_ext_ack *netlink_extack)
511 {
512 struct rvu *rvu = devlink_health_reporter_priv(reporter);
513 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
514 int blkaddr;
515
516 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
517 if (blkaddr < 0)
518 return blkaddr;
519
520 if (nix_event_ctx->nix_af_rvu_gen)
521 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
522
523 return 0;
524 }
525
rvu_hw_nix_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)526 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
527 struct devlink_fmsg *fmsg, void *ctx,
528 struct netlink_ext_ack *netlink_extack)
529 {
530 struct rvu *rvu = devlink_health_reporter_priv(reporter);
531 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
532 struct rvu_nix_event_ctx *nix_ctx;
533
534 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
535
536 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
537 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
538 }
539
rvu_hw_nix_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)540 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
541 void *ctx, struct netlink_ext_ack *netlink_extack)
542 {
543 struct rvu *rvu = devlink_health_reporter_priv(reporter);
544 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
545 int blkaddr;
546
547 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
548 if (blkaddr < 0)
549 return blkaddr;
550
551 if (nix_event_ctx->nix_af_rvu_err)
552 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
553
554 return 0;
555 }
556
rvu_hw_nix_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)557 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
558 struct devlink_fmsg *fmsg, void *ctx,
559 struct netlink_ext_ack *netlink_extack)
560 {
561 struct rvu *rvu = devlink_health_reporter_priv(reporter);
562 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
563 struct rvu_nix_event_ctx *nix_ctx;
564
565 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
566
567 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
568 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
569 }
570
rvu_hw_nix_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)571 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
572 void *ctx, struct netlink_ext_ack *netlink_extack)
573 {
574 struct rvu *rvu = devlink_health_reporter_priv(reporter);
575 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
576 int blkaddr;
577
578 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
579 if (blkaddr < 0)
580 return blkaddr;
581
582 if (nix_event_ctx->nix_af_rvu_int)
583 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
584
585 return 0;
586 }
587
588 RVU_REPORTERS(hw_nix_intr);
589 RVU_REPORTERS(hw_nix_gen);
590 RVU_REPORTERS(hw_nix_err);
591 RVU_REPORTERS(hw_nix_ras);
592
593 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
594
rvu_nix_register_reporters(struct rvu_devlink * rvu_dl)595 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
596 {
597 struct rvu_nix_health_reporters *rvu_reporters;
598 struct rvu_nix_event_ctx *nix_event_context;
599 struct rvu *rvu = rvu_dl->rvu;
600
601 rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
602 if (!rvu_reporters)
603 return -ENOMEM;
604
605 rvu_dl->rvu_nix_health_reporter = rvu_reporters;
606 nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
607 if (!nix_event_context)
608 return -ENOMEM;
609
610 rvu_reporters->nix_event_ctx = nix_event_context;
611 rvu_reporters->rvu_hw_nix_intr_reporter =
612 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
613 if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
614 dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
615 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
616 return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
617 }
618
619 rvu_reporters->rvu_hw_nix_gen_reporter =
620 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
621 if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
622 dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
623 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
624 return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
625 }
626
627 rvu_reporters->rvu_hw_nix_err_reporter =
628 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
629 if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
630 dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
631 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
632 return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
633 }
634
635 rvu_reporters->rvu_hw_nix_ras_reporter =
636 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
637 if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
638 dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
639 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
640 return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
641 }
642
643 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
644 if (!rvu_dl->devlink_wq)
645 goto err;
646
647 INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
648 INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
649 INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
650 INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
651
652 return 0;
653 err:
654 rvu_nix_health_reporters_destroy(rvu_dl);
655 return -ENOMEM;
656 }
657
rvu_nix_health_reporters_create(struct rvu_devlink * rvu_dl)658 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
659 {
660 struct rvu *rvu = rvu_dl->rvu;
661 int err;
662
663 err = rvu_nix_register_reporters(rvu_dl);
664 if (err) {
665 dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
666 err);
667 return err;
668 }
669 rvu_nix_register_interrupts(rvu);
670
671 return 0;
672 }
673
rvu_nix_health_reporters_destroy(struct rvu_devlink * rvu_dl)674 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
675 {
676 struct rvu_nix_health_reporters *nix_reporters;
677 struct rvu *rvu = rvu_dl->rvu;
678
679 nix_reporters = rvu_dl->rvu_nix_health_reporter;
680
681 if (!nix_reporters->rvu_hw_nix_ras_reporter)
682 return;
683 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
684 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
685
686 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
687 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
688
689 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
690 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
691
692 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
693 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
694
695 rvu_nix_unregister_interrupts(rvu);
696 kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
697 kfree(rvu_dl->rvu_nix_health_reporter);
698 }
699
rvu_npa_intr_work(struct work_struct * work)700 static void rvu_npa_intr_work(struct work_struct *work)
701 {
702 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
703
704 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
705 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
706 "NPA_AF_RVU Error",
707 rvu_npa_health_reporter->npa_event_ctx);
708 }
709
rvu_npa_af_rvu_intr_handler(int irq,void * rvu_irq)710 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
711 {
712 struct rvu_npa_event_ctx *npa_event_context;
713 struct rvu_devlink *rvu_dl = rvu_irq;
714 struct rvu *rvu;
715 int blkaddr;
716 u64 intr;
717
718 rvu = rvu_dl->rvu;
719 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
720 if (blkaddr < 0)
721 return IRQ_NONE;
722
723 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
724 intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
725 npa_event_context->npa_af_rvu_int = intr;
726
727 /* Clear interrupts */
728 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
729 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
730 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
731
732 return IRQ_HANDLED;
733 }
734
rvu_npa_gen_work(struct work_struct * work)735 static void rvu_npa_gen_work(struct work_struct *work)
736 {
737 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
738
739 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
740 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
741 "NPA_AF_GEN Error",
742 rvu_npa_health_reporter->npa_event_ctx);
743 }
744
rvu_npa_af_gen_intr_handler(int irq,void * rvu_irq)745 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
746 {
747 struct rvu_npa_event_ctx *npa_event_context;
748 struct rvu_devlink *rvu_dl = rvu_irq;
749 struct rvu *rvu;
750 int blkaddr;
751 u64 intr;
752
753 rvu = rvu_dl->rvu;
754 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
755 if (blkaddr < 0)
756 return IRQ_NONE;
757
758 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
759 intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
760 npa_event_context->npa_af_rvu_gen = intr;
761
762 /* Clear interrupts */
763 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
764 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
765 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
766
767 return IRQ_HANDLED;
768 }
769
rvu_npa_err_work(struct work_struct * work)770 static void rvu_npa_err_work(struct work_struct *work)
771 {
772 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
773
774 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
775 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
776 "NPA_AF_ERR Error",
777 rvu_npa_health_reporter->npa_event_ctx);
778 }
779
rvu_npa_af_err_intr_handler(int irq,void * rvu_irq)780 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
781 {
782 struct rvu_npa_event_ctx *npa_event_context;
783 struct rvu_devlink *rvu_dl = rvu_irq;
784 struct rvu *rvu;
785 int blkaddr;
786 u64 intr;
787
788 rvu = rvu_dl->rvu;
789 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
790 if (blkaddr < 0)
791 return IRQ_NONE;
792 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
793 intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
794 npa_event_context->npa_af_rvu_err = intr;
795
796 /* Clear interrupts */
797 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
798 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
799 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
800
801 return IRQ_HANDLED;
802 }
803
rvu_npa_ras_work(struct work_struct * work)804 static void rvu_npa_ras_work(struct work_struct *work)
805 {
806 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
807
808 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
809 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
810 "HW NPA_AF_RAS Error reported",
811 rvu_npa_health_reporter->npa_event_ctx);
812 }
813
rvu_npa_af_ras_intr_handler(int irq,void * rvu_irq)814 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
815 {
816 struct rvu_npa_event_ctx *npa_event_context;
817 struct rvu_devlink *rvu_dl = rvu_irq;
818 struct rvu *rvu;
819 int blkaddr;
820 u64 intr;
821
822 rvu = rvu_dl->rvu;
823 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
824 if (blkaddr < 0)
825 return IRQ_NONE;
826
827 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
828 intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
829 npa_event_context->npa_af_rvu_ras = intr;
830
831 /* Clear interrupts */
832 rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
833 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
834 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
835
836 return IRQ_HANDLED;
837 }
838
rvu_npa_unregister_interrupts(struct rvu * rvu)839 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
840 {
841 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
842 int i, offs, blkaddr;
843 u64 reg;
844
845 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
846 if (blkaddr < 0)
847 return;
848
849 reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
850 offs = reg & 0x3FF;
851
852 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
853 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
854 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
855 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
856
857 for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
858 if (rvu->irq_allocated[offs + i]) {
859 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
860 rvu->irq_allocated[offs + i] = false;
861 }
862 }
863
rvu_npa_register_interrupts(struct rvu * rvu)864 static int rvu_npa_register_interrupts(struct rvu *rvu)
865 {
866 int blkaddr, base;
867 bool rc;
868
869 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
870 if (blkaddr < 0)
871 return blkaddr;
872
873 /* Get NPA AF MSIX vectors offset. */
874 base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
875 if (!base) {
876 dev_warn(rvu->dev,
877 "Failed to get NPA_AF_INT vector offsets\n");
878 return 0;
879 }
880
881 /* Register and enable NPA_AF_RVU_INT interrupt */
882 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
883 "NPA_AF_RVU_INT",
884 rvu_npa_af_rvu_intr_handler);
885 if (!rc)
886 goto err;
887 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
888
889 /* Register and enable NPA_AF_GEN_INT interrupt */
890 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
891 "NPA_AF_RVU_GEN",
892 rvu_npa_af_gen_intr_handler);
893 if (!rc)
894 goto err;
895 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
896
897 /* Register and enable NPA_AF_ERR_INT interrupt */
898 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
899 "NPA_AF_ERR_INT",
900 rvu_npa_af_err_intr_handler);
901 if (!rc)
902 goto err;
903 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
904
905 /* Register and enable NPA_AF_RAS interrupt */
906 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
907 "NPA_AF_RAS",
908 rvu_npa_af_ras_intr_handler);
909 if (!rc)
910 goto err;
911 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
912
913 return 0;
914 err:
915 rvu_npa_unregister_interrupts(rvu);
916 return rc;
917 }
918
rvu_npa_report_show(struct devlink_fmsg * fmsg,void * ctx,enum npa_af_rvu_health health_reporter)919 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
920 enum npa_af_rvu_health health_reporter)
921 {
922 struct rvu_npa_event_ctx *npa_event_context;
923 unsigned int alloc_dis, free_dis;
924 u64 intr_val;
925 int err;
926
927 npa_event_context = ctx;
928 switch (health_reporter) {
929 case NPA_AF_RVU_GEN:
930 intr_val = npa_event_context->npa_af_rvu_gen;
931 err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
932 if (err)
933 return err;
934 err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
935 npa_event_context->npa_af_rvu_gen);
936 if (err)
937 return err;
938 if (intr_val & BIT_ULL(32)) {
939 err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
940 if (err)
941 return err;
942 }
943
944 free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
945 if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
946 err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
947 if (err)
948 return err;
949 }
950 if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
951 err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
952 if (err)
953 return err;
954 }
955 if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
956 err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
957 if (err)
958 return err;
959 }
960 if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
961 err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
962 if (err)
963 return err;
964 }
965 if (free_dis & BIT(NPA_INPQ_SSO)) {
966 err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
967 if (err)
968 return err;
969 }
970 if (free_dis & BIT(NPA_INPQ_TIM)) {
971 err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
972 if (err)
973 return err;
974 }
975 if (free_dis & BIT(NPA_INPQ_DPI)) {
976 err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
977 if (err)
978 return err;
979 }
980 if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
981 err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
982 if (err)
983 return err;
984 }
985
986 alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
987 if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
988 err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
989 if (err)
990 return err;
991 }
992 if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
993 err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
994 if (err)
995 return err;
996 }
997 if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
998 err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
999 if (err)
1000 return err;
1001 }
1002 if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
1003 err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
1004 if (err)
1005 return err;
1006 }
1007 if (alloc_dis & BIT(NPA_INPQ_SSO)) {
1008 err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
1009 if (err)
1010 return err;
1011 }
1012 if (alloc_dis & BIT(NPA_INPQ_TIM)) {
1013 err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
1014 if (err)
1015 return err;
1016 }
1017 if (alloc_dis & BIT(NPA_INPQ_DPI)) {
1018 err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
1019 if (err)
1020 return err;
1021 }
1022 if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
1023 err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
1024 if (err)
1025 return err;
1026 }
1027 err = rvu_report_pair_end(fmsg);
1028 if (err)
1029 return err;
1030 break;
1031 case NPA_AF_RVU_ERR:
1032 err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
1033 if (err)
1034 return err;
1035 err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
1036 npa_event_context->npa_af_rvu_err);
1037 if (err)
1038 return err;
1039
1040 if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
1041 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
1042 if (err)
1043 return err;
1044 }
1045 if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
1046 err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
1047 if (err)
1048 return err;
1049 }
1050 if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
1051 err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
1052 if (err)
1053 return err;
1054 }
1055 err = rvu_report_pair_end(fmsg);
1056 if (err)
1057 return err;
1058 break;
1059 case NPA_AF_RVU_RAS:
1060 err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
1061 if (err)
1062 return err;
1063 err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
1064 npa_event_context->npa_af_rvu_ras);
1065 if (err)
1066 return err;
1067 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
1068 err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
1069 if (err)
1070 return err;
1071 }
1072 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
1073 err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
1074 if (err)
1075 return err;
1076 }
1077 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
1078 err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
1079 if (err)
1080 return err;
1081 }
1082 err = rvu_report_pair_end(fmsg);
1083 if (err)
1084 return err;
1085 break;
1086 case NPA_AF_RVU_INTR:
1087 err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
1088 if (err)
1089 return err;
1090 err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
1091 npa_event_context->npa_af_rvu_int);
1092 if (err)
1093 return err;
1094 if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
1095 err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
1096 if (err)
1097 return err;
1098 }
1099 return rvu_report_pair_end(fmsg);
1100 default:
1101 return -EINVAL;
1102 }
1103
1104 return 0;
1105 }
1106
rvu_hw_npa_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1107 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
1108 struct devlink_fmsg *fmsg, void *ctx,
1109 struct netlink_ext_ack *netlink_extack)
1110 {
1111 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1112 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1113 struct rvu_npa_event_ctx *npa_ctx;
1114
1115 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1116
1117 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
1118 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
1119 }
1120
rvu_hw_npa_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1121 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
1122 void *ctx, struct netlink_ext_ack *netlink_extack)
1123 {
1124 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1125 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1126 int blkaddr;
1127
1128 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1129 if (blkaddr < 0)
1130 return blkaddr;
1131
1132 if (npa_event_ctx->npa_af_rvu_int)
1133 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
1134
1135 return 0;
1136 }
1137
rvu_hw_npa_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1138 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
1139 struct devlink_fmsg *fmsg, void *ctx,
1140 struct netlink_ext_ack *netlink_extack)
1141 {
1142 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1143 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1144 struct rvu_npa_event_ctx *npa_ctx;
1145
1146 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1147
1148 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
1149 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
1150 }
1151
rvu_hw_npa_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1152 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
1153 void *ctx, struct netlink_ext_ack *netlink_extack)
1154 {
1155 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1156 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1157 int blkaddr;
1158
1159 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1160 if (blkaddr < 0)
1161 return blkaddr;
1162
1163 if (npa_event_ctx->npa_af_rvu_gen)
1164 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
1165
1166 return 0;
1167 }
1168
rvu_hw_npa_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1169 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
1170 struct devlink_fmsg *fmsg, void *ctx,
1171 struct netlink_ext_ack *netlink_extack)
1172 {
1173 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1174 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1175 struct rvu_npa_event_ctx *npa_ctx;
1176
1177 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1178
1179 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
1180 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
1181 }
1182
rvu_hw_npa_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1183 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
1184 void *ctx, struct netlink_ext_ack *netlink_extack)
1185 {
1186 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1187 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1188 int blkaddr;
1189
1190 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1191 if (blkaddr < 0)
1192 return blkaddr;
1193
1194 if (npa_event_ctx->npa_af_rvu_err)
1195 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
1196
1197 return 0;
1198 }
1199
rvu_hw_npa_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1200 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1201 struct devlink_fmsg *fmsg, void *ctx,
1202 struct netlink_ext_ack *netlink_extack)
1203 {
1204 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1205 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1206 struct rvu_npa_event_ctx *npa_ctx;
1207
1208 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1209
1210 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1211 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1212 }
1213
rvu_hw_npa_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1214 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1215 void *ctx, struct netlink_ext_ack *netlink_extack)
1216 {
1217 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1218 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1219 int blkaddr;
1220
1221 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1222 if (blkaddr < 0)
1223 return blkaddr;
1224
1225 if (npa_event_ctx->npa_af_rvu_ras)
1226 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1227
1228 return 0;
1229 }
1230
1231 RVU_REPORTERS(hw_npa_intr);
1232 RVU_REPORTERS(hw_npa_gen);
1233 RVU_REPORTERS(hw_npa_err);
1234 RVU_REPORTERS(hw_npa_ras);
1235
1236 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1237
rvu_npa_register_reporters(struct rvu_devlink * rvu_dl)1238 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1239 {
1240 struct rvu_npa_health_reporters *rvu_reporters;
1241 struct rvu_npa_event_ctx *npa_event_context;
1242 struct rvu *rvu = rvu_dl->rvu;
1243
1244 rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1245 if (!rvu_reporters)
1246 return -ENOMEM;
1247
1248 rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1249 npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1250 if (!npa_event_context)
1251 return -ENOMEM;
1252
1253 rvu_reporters->npa_event_ctx = npa_event_context;
1254 rvu_reporters->rvu_hw_npa_intr_reporter =
1255 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
1256 if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1257 dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1258 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1259 return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1260 }
1261
1262 rvu_reporters->rvu_hw_npa_gen_reporter =
1263 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
1264 if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1265 dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1266 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1267 return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1268 }
1269
1270 rvu_reporters->rvu_hw_npa_err_reporter =
1271 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
1272 if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1273 dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1274 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1275 return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1276 }
1277
1278 rvu_reporters->rvu_hw_npa_ras_reporter =
1279 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
1280 if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1281 dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1282 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1283 return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1284 }
1285
1286 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1287 if (!rvu_dl->devlink_wq)
1288 goto err;
1289
1290 INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1291 INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1292 INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1293 INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1294
1295 return 0;
1296 err:
1297 rvu_npa_health_reporters_destroy(rvu_dl);
1298 return -ENOMEM;
1299 }
1300
rvu_npa_health_reporters_create(struct rvu_devlink * rvu_dl)1301 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1302 {
1303 struct rvu *rvu = rvu_dl->rvu;
1304 int err;
1305
1306 err = rvu_npa_register_reporters(rvu_dl);
1307 if (err) {
1308 dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1309 err);
1310 return err;
1311 }
1312 rvu_npa_register_interrupts(rvu);
1313
1314 return 0;
1315 }
1316
rvu_npa_health_reporters_destroy(struct rvu_devlink * rvu_dl)1317 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1318 {
1319 struct rvu_npa_health_reporters *npa_reporters;
1320 struct rvu *rvu = rvu_dl->rvu;
1321
1322 npa_reporters = rvu_dl->rvu_npa_health_reporter;
1323
1324 if (!npa_reporters->rvu_hw_npa_ras_reporter)
1325 return;
1326 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1327 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1328
1329 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1330 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1331
1332 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1333 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1334
1335 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1336 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1337
1338 rvu_npa_unregister_interrupts(rvu);
1339 kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1340 kfree(rvu_dl->rvu_npa_health_reporter);
1341 }
1342
rvu_health_reporters_create(struct rvu * rvu)1343 static int rvu_health_reporters_create(struct rvu *rvu)
1344 {
1345 struct rvu_devlink *rvu_dl;
1346 int err;
1347
1348 rvu_dl = rvu->rvu_dl;
1349 err = rvu_npa_health_reporters_create(rvu_dl);
1350 if (err)
1351 return err;
1352
1353 return rvu_nix_health_reporters_create(rvu_dl);
1354 }
1355
rvu_health_reporters_destroy(struct rvu * rvu)1356 static void rvu_health_reporters_destroy(struct rvu *rvu)
1357 {
1358 struct rvu_devlink *rvu_dl;
1359
1360 if (!rvu->rvu_dl)
1361 return;
1362
1363 rvu_dl = rvu->rvu_dl;
1364 rvu_npa_health_reporters_destroy(rvu_dl);
1365 rvu_nix_health_reporters_destroy(rvu_dl);
1366 }
1367
1368 /* Devlink Params APIs */
rvu_af_dl_dwrr_mtu_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1369 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1370 union devlink_param_value val,
1371 struct netlink_ext_ack *extack)
1372 {
1373 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1374 struct rvu *rvu = rvu_dl->rvu;
1375 int dwrr_mtu = val.vu32;
1376 struct nix_txsch *txsch;
1377 struct nix_hw *nix_hw;
1378
1379 if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1380 NL_SET_ERR_MSG_MOD(extack,
1381 "Setting DWRR_MTU is not supported on this silicon");
1382 return -EOPNOTSUPP;
1383 }
1384
1385 if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1386 (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1387 NL_SET_ERR_MSG_MOD(extack,
1388 "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1389 return -EINVAL;
1390 }
1391
1392 nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1393 if (!nix_hw)
1394 return -ENODEV;
1395
1396 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1397 if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1398 NL_SET_ERR_MSG_MOD(extack,
1399 "Changing DWRR MTU is not supported when there are active NIXLFs");
1400 NL_SET_ERR_MSG_MOD(extack,
1401 "Make sure none of the PF/VF interfaces are initialized and retry");
1402 return -EOPNOTSUPP;
1403 }
1404
1405 return 0;
1406 }
1407
rvu_af_dl_dwrr_mtu_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1408 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1409 struct devlink_param_gset_ctx *ctx)
1410 {
1411 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1412 struct rvu *rvu = rvu_dl->rvu;
1413 u64 dwrr_mtu;
1414
1415 dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1416 rvu_write64(rvu, BLKADDR_NIX0,
1417 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
1418
1419 return 0;
1420 }
1421
rvu_af_dl_dwrr_mtu_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1422 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1423 struct devlink_param_gset_ctx *ctx)
1424 {
1425 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1426 struct rvu *rvu = rvu_dl->rvu;
1427 u64 dwrr_mtu;
1428
1429 if (!rvu->hw->cap.nix_common_dwrr_mtu)
1430 return -EOPNOTSUPP;
1431
1432 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1433 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
1434 ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1435
1436 return 0;
1437 }
1438
1439 enum rvu_af_dl_param_id {
1440 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1441 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1442 RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1443 RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1444 };
1445
rvu_af_npc_exact_feature_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1446 static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1447 struct devlink_param_gset_ctx *ctx)
1448 {
1449 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1450 struct rvu *rvu = rvu_dl->rvu;
1451 bool enabled;
1452
1453 enabled = rvu_npc_exact_has_match_table(rvu);
1454
1455 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1456 enabled ? "enabled" : "disabled");
1457
1458 return 0;
1459 }
1460
rvu_af_npc_exact_feature_disable(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1461 static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1462 struct devlink_param_gset_ctx *ctx)
1463 {
1464 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1465 struct rvu *rvu = rvu_dl->rvu;
1466
1467 rvu_npc_exact_disable_feature(rvu);
1468
1469 return 0;
1470 }
1471
rvu_af_npc_exact_feature_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1472 static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1473 union devlink_param_value val,
1474 struct netlink_ext_ack *extack)
1475 {
1476 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1477 struct rvu *rvu = rvu_dl->rvu;
1478 u64 enable;
1479
1480 if (kstrtoull(val.vstr, 10, &enable)) {
1481 NL_SET_ERR_MSG_MOD(extack,
1482 "Only 1 value is supported");
1483 return -EINVAL;
1484 }
1485
1486 if (enable != 1) {
1487 NL_SET_ERR_MSG_MOD(extack,
1488 "Only disabling exact match feature is supported");
1489 return -EINVAL;
1490 }
1491
1492 if (rvu_npc_exact_can_disable_feature(rvu))
1493 return 0;
1494
1495 NL_SET_ERR_MSG_MOD(extack,
1496 "Can't disable exact match feature; Please try before any configuration");
1497 return -EFAULT;
1498 }
1499
rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1500 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1501 struct devlink_param_gset_ctx *ctx)
1502 {
1503 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1504 struct rvu *rvu = rvu_dl->rvu;
1505 struct npc_mcam *mcam;
1506 u32 percent;
1507
1508 mcam = &rvu->hw->mcam;
1509 percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1510 ctx->val.vu8 = (u8)percent;
1511
1512 return 0;
1513 }
1514
rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1515 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1516 struct devlink_param_gset_ctx *ctx)
1517 {
1518 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1519 struct rvu *rvu = rvu_dl->rvu;
1520 struct npc_mcam *mcam;
1521 u32 percent;
1522
1523 percent = ctx->val.vu8;
1524 mcam = &rvu->hw->mcam;
1525 mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1526 mcam->hprio_end = mcam->hprio_count;
1527 mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1528 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1529
1530 return 0;
1531 }
1532
rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1533 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1534 union devlink_param_value val,
1535 struct netlink_ext_ack *extack)
1536 {
1537 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1538 struct rvu *rvu = rvu_dl->rvu;
1539 struct npc_mcam *mcam;
1540
1541 /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1542 if (val.vu8 < 12 || val.vu8 > 100) {
1543 NL_SET_ERR_MSG_MOD(extack,
1544 "mcam high zone percent must be between 12% to 100%");
1545 return -EINVAL;
1546 }
1547
1548 /* Do not allow user to modify the high priority zone entries while mcam entries
1549 * have already been assigned.
1550 */
1551 mcam = &rvu->hw->mcam;
1552 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1553 NL_SET_ERR_MSG_MOD(extack,
1554 "mcam entries have already been assigned, can't resize");
1555 return -EPERM;
1556 }
1557
1558 return 0;
1559 }
1560
1561 static const struct devlink_param rvu_af_dl_params[] = {
1562 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1563 "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1564 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1565 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1566 rvu_af_dl_dwrr_mtu_validate),
1567 };
1568
1569 static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1570 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1571 "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1572 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1573 rvu_af_npc_exact_feature_get,
1574 rvu_af_npc_exact_feature_disable,
1575 rvu_af_npc_exact_feature_validate),
1576 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1577 "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1578 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1579 rvu_af_dl_npc_mcam_high_zone_percent_get,
1580 rvu_af_dl_npc_mcam_high_zone_percent_set,
1581 rvu_af_dl_npc_mcam_high_zone_percent_validate),
1582 };
1583
1584 /* Devlink switch mode */
rvu_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)1585 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1586 {
1587 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1588 struct rvu *rvu = rvu_dl->rvu;
1589 struct rvu_switch *rswitch;
1590
1591 rswitch = &rvu->rswitch;
1592 *mode = rswitch->mode;
1593
1594 return 0;
1595 }
1596
rvu_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)1597 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1598 struct netlink_ext_ack *extack)
1599 {
1600 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1601 struct rvu *rvu = rvu_dl->rvu;
1602 struct rvu_switch *rswitch;
1603
1604 rswitch = &rvu->rswitch;
1605 switch (mode) {
1606 case DEVLINK_ESWITCH_MODE_LEGACY:
1607 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1608 if (rswitch->mode == mode)
1609 return 0;
1610 rswitch->mode = mode;
1611 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1612 rvu_switch_enable(rvu);
1613 else
1614 rvu_switch_disable(rvu);
1615 break;
1616 default:
1617 return -EINVAL;
1618 }
1619
1620 return 0;
1621 }
1622
1623 static const struct devlink_ops rvu_devlink_ops = {
1624 .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1625 .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1626 };
1627
rvu_register_dl(struct rvu * rvu)1628 int rvu_register_dl(struct rvu *rvu)
1629 {
1630 struct rvu_devlink *rvu_dl;
1631 struct devlink *dl;
1632 int err;
1633
1634 dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1635 rvu->dev);
1636 if (!dl) {
1637 dev_warn(rvu->dev, "devlink_alloc failed\n");
1638 return -ENOMEM;
1639 }
1640
1641 rvu_dl = devlink_priv(dl);
1642 rvu_dl->dl = dl;
1643 rvu_dl->rvu = rvu;
1644 rvu->rvu_dl = rvu_dl;
1645
1646 err = rvu_health_reporters_create(rvu);
1647 if (err) {
1648 dev_err(rvu->dev,
1649 "devlink health reporter creation failed with error %d\n", err);
1650 goto err_dl_health;
1651 }
1652
1653 err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1654 if (err) {
1655 dev_err(rvu->dev,
1656 "devlink params register failed with error %d", err);
1657 goto err_dl_health;
1658 }
1659
1660 /* Register exact match devlink only for CN10K-B */
1661 if (!rvu_npc_exact_has_match_table(rvu))
1662 goto done;
1663
1664 err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1665 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1666 if (err) {
1667 dev_err(rvu->dev,
1668 "devlink exact match params register failed with error %d", err);
1669 goto err_dl_exact_match;
1670 }
1671
1672 done:
1673 devlink_register(dl);
1674 return 0;
1675
1676 err_dl_exact_match:
1677 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1678
1679 err_dl_health:
1680 rvu_health_reporters_destroy(rvu);
1681 devlink_free(dl);
1682 return err;
1683 }
1684
rvu_unregister_dl(struct rvu * rvu)1685 void rvu_unregister_dl(struct rvu *rvu)
1686 {
1687 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1688 struct devlink *dl = rvu_dl->dl;
1689
1690 devlink_unregister(dl);
1691
1692 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1693
1694 /* Unregister exact match devlink only for CN10K-B */
1695 if (rvu_npc_exact_has_match_table(rvu))
1696 devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1697 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1698
1699 rvu_health_reporters_destroy(rvu);
1700 devlink_free(dl);
1701 }
1702