1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <linux/bitfield.h>
9
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
14 #include "cn20k/npc.h"
15
16 #define DRV_NAME "octeontx2-af"
17
rvu_report_pair_start(struct devlink_fmsg * fmsg,const char * name)18 static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
19 {
20 devlink_fmsg_pair_nest_start(fmsg, name);
21 devlink_fmsg_obj_nest_start(fmsg);
22 }
23
rvu_report_pair_end(struct devlink_fmsg * fmsg)24 static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
25 {
26 devlink_fmsg_obj_nest_end(fmsg);
27 devlink_fmsg_pair_nest_end(fmsg);
28 }
29
rvu_common_request_irq(struct rvu * rvu,int offset,const char * name,irq_handler_t fn)30 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
31 const char *name, irq_handler_t fn)
32 {
33 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
34 int rc;
35
36 sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
37 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
38 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
39 if (rc)
40 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
41 else
42 rvu->irq_allocated[offset] = true;
43
44 return rvu->irq_allocated[offset];
45 }
46
rvu_nix_intr_work(struct work_struct * work)47 static void rvu_nix_intr_work(struct work_struct *work)
48 {
49 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
50
51 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
52 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
53 "NIX_AF_RVU Error",
54 rvu_nix_health_reporter->nix_event_ctx);
55 }
56
rvu_nix_af_rvu_intr_handler(int irq,void * rvu_irq)57 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
58 {
59 struct rvu_nix_event_ctx *nix_event_context;
60 struct rvu_devlink *rvu_dl = rvu_irq;
61 struct rvu *rvu;
62 int blkaddr;
63 u64 intr;
64
65 rvu = rvu_dl->rvu;
66 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
67 if (blkaddr < 0)
68 return IRQ_NONE;
69
70 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
71 intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
72 nix_event_context->nix_af_rvu_int = intr;
73
74 /* Clear interrupts */
75 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
76 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
77 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
78
79 return IRQ_HANDLED;
80 }
81
rvu_nix_gen_work(struct work_struct * work)82 static void rvu_nix_gen_work(struct work_struct *work)
83 {
84 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
85
86 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
87 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
88 "NIX_AF_GEN Error",
89 rvu_nix_health_reporter->nix_event_ctx);
90 }
91
rvu_nix_af_rvu_gen_handler(int irq,void * rvu_irq)92 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
93 {
94 struct rvu_nix_event_ctx *nix_event_context;
95 struct rvu_devlink *rvu_dl = rvu_irq;
96 struct rvu *rvu;
97 int blkaddr;
98 u64 intr;
99
100 rvu = rvu_dl->rvu;
101 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
102 if (blkaddr < 0)
103 return IRQ_NONE;
104
105 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
106 intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
107 nix_event_context->nix_af_rvu_gen = intr;
108
109 /* Clear interrupts */
110 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
111 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
112 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
113
114 return IRQ_HANDLED;
115 }
116
rvu_nix_err_work(struct work_struct * work)117 static void rvu_nix_err_work(struct work_struct *work)
118 {
119 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
120
121 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
122 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
123 "NIX_AF_ERR Error",
124 rvu_nix_health_reporter->nix_event_ctx);
125 }
126
rvu_nix_af_rvu_err_handler(int irq,void * rvu_irq)127 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
128 {
129 struct rvu_nix_event_ctx *nix_event_context;
130 struct rvu_devlink *rvu_dl = rvu_irq;
131 struct rvu *rvu;
132 int blkaddr;
133 u64 intr;
134
135 rvu = rvu_dl->rvu;
136 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
137 if (blkaddr < 0)
138 return IRQ_NONE;
139
140 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
141 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
142 nix_event_context->nix_af_rvu_err = intr;
143
144 /* Clear interrupts */
145 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
146 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
147 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
148
149 return IRQ_HANDLED;
150 }
151
rvu_nix_ras_work(struct work_struct * work)152 static void rvu_nix_ras_work(struct work_struct *work)
153 {
154 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
155
156 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
157 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
158 "NIX_AF_RAS Error",
159 rvu_nix_health_reporter->nix_event_ctx);
160 }
161
rvu_nix_af_rvu_ras_handler(int irq,void * rvu_irq)162 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
163 {
164 struct rvu_nix_event_ctx *nix_event_context;
165 struct rvu_devlink *rvu_dl = rvu_irq;
166 struct rvu *rvu;
167 int blkaddr;
168 u64 intr;
169
170 rvu = rvu_dl->rvu;
171 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
172 if (blkaddr < 0)
173 return IRQ_NONE;
174
175 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
176 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
177 nix_event_context->nix_af_rvu_ras = intr;
178
179 /* Clear interrupts */
180 rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
181 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
182 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
183
184 return IRQ_HANDLED;
185 }
186
rvu_nix_unregister_interrupts(struct rvu * rvu)187 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
188 {
189 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
190 int offs, i, blkaddr;
191
192 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
193 if (blkaddr < 0)
194 return;
195
196 offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
197 if (!offs)
198 return;
199
200 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
201 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
202 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
203 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
204
205 if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
206 free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
207 rvu_dl);
208 rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
209 }
210
211 for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
212 if (rvu->irq_allocated[offs + i]) {
213 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
214 rvu->irq_allocated[offs + i] = false;
215 }
216 }
217
rvu_nix_register_interrupts(struct rvu * rvu)218 static int rvu_nix_register_interrupts(struct rvu *rvu)
219 {
220 int blkaddr, base;
221 bool rc;
222
223 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
224 if (blkaddr < 0)
225 return blkaddr;
226
227 /* Get NIX AF MSIX vectors offset. */
228 base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
229 if (!base) {
230 dev_warn(rvu->dev,
231 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
232 blkaddr - BLKADDR_NIX0);
233 return 0;
234 }
235 /* Register and enable NIX_AF_RVU_INT interrupt */
236 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
237 "NIX_AF_RVU_INT",
238 rvu_nix_af_rvu_intr_handler);
239 if (!rc)
240 goto err;
241 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
242
243 /* Register and enable NIX_AF_GEN_INT interrupt */
244 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
245 "NIX_AF_GEN_INT",
246 rvu_nix_af_rvu_gen_handler);
247 if (!rc)
248 goto err;
249 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
250
251 /* Register and enable NIX_AF_ERR_INT interrupt */
252 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
253 "NIX_AF_ERR_INT",
254 rvu_nix_af_rvu_err_handler);
255 if (!rc)
256 goto err;
257 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
258
259 /* Register and enable NIX_AF_RAS interrupt */
260 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
261 "NIX_AF_RAS",
262 rvu_nix_af_rvu_ras_handler);
263 if (!rc)
264 goto err;
265 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
266
267 return 0;
268 err:
269 rvu_nix_unregister_interrupts(rvu);
270 return rc;
271 }
272
rvu_nix_report_show(struct devlink_fmsg * fmsg,void * ctx,enum nix_af_rvu_health health_reporter)273 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
274 enum nix_af_rvu_health health_reporter)
275 {
276 struct rvu_nix_event_ctx *nix_event_context;
277 u64 intr_val;
278
279 nix_event_context = ctx;
280 switch (health_reporter) {
281 case NIX_AF_RVU_INTR:
282 intr_val = nix_event_context->nix_af_rvu_int;
283 rvu_report_pair_start(fmsg, "NIX_AF_RVU");
284 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
285 nix_event_context->nix_af_rvu_int);
286 if (intr_val & BIT_ULL(0))
287 devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
288 rvu_report_pair_end(fmsg);
289 break;
290 case NIX_AF_RVU_GEN:
291 intr_val = nix_event_context->nix_af_rvu_gen;
292 rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
293 devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
294 nix_event_context->nix_af_rvu_gen);
295 if (intr_val & BIT_ULL(0))
296 devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
297 if (intr_val & BIT_ULL(1))
298 devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
299 if (intr_val & BIT_ULL(4))
300 devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
301 rvu_report_pair_end(fmsg);
302 break;
303 case NIX_AF_RVU_ERR:
304 intr_val = nix_event_context->nix_af_rvu_err;
305 rvu_report_pair_start(fmsg, "NIX_AF_ERR");
306 devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
307 nix_event_context->nix_af_rvu_err);
308 if (intr_val & BIT_ULL(14))
309 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
310 if (intr_val & BIT_ULL(13))
311 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
312 if (intr_val & BIT_ULL(12))
313 devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
314 if (intr_val & BIT_ULL(6))
315 devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
316 if (intr_val & BIT_ULL(5))
317 devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
318 if (intr_val & BIT_ULL(4))
319 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
320 if (intr_val & BIT_ULL(3))
321 devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
322 if (intr_val & BIT_ULL(2))
323 devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
324 if (intr_val & BIT_ULL(1))
325 devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
326 if (intr_val & BIT_ULL(0))
327 devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
328 rvu_report_pair_end(fmsg);
329 break;
330 case NIX_AF_RVU_RAS:
331 intr_val = nix_event_context->nix_af_rvu_ras;
332 rvu_report_pair_start(fmsg, "NIX_AF_RAS");
333 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
334 nix_event_context->nix_af_rvu_ras);
335 devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
336 if (intr_val & BIT_ULL(34))
337 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
338 if (intr_val & BIT_ULL(33))
339 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
340 if (intr_val & BIT_ULL(32))
341 devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
342 if (intr_val & BIT_ULL(4))
343 devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
344 if (intr_val & BIT_ULL(3))
345 devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
346 if (intr_val & BIT_ULL(2))
347 devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
348 if (intr_val & BIT_ULL(1))
349 devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
350 if (intr_val & BIT_ULL(0))
351 devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
352 rvu_report_pair_end(fmsg);
353 break;
354 default:
355 return -EINVAL;
356 }
357
358 return 0;
359 }
360
rvu_hw_nix_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)361 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
362 struct devlink_fmsg *fmsg, void *ctx,
363 struct netlink_ext_ack *netlink_extack)
364 {
365 struct rvu *rvu = devlink_health_reporter_priv(reporter);
366 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
367 struct rvu_nix_event_ctx *nix_ctx;
368
369 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
370
371 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
372 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
373 }
374
rvu_hw_nix_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)375 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
376 void *ctx, struct netlink_ext_ack *netlink_extack)
377 {
378 struct rvu *rvu = devlink_health_reporter_priv(reporter);
379 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
380 int blkaddr;
381
382 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
383 if (blkaddr < 0)
384 return blkaddr;
385
386 if (nix_event_ctx->nix_af_rvu_int)
387 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
388
389 return 0;
390 }
391
rvu_hw_nix_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)392 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
393 struct devlink_fmsg *fmsg, void *ctx,
394 struct netlink_ext_ack *netlink_extack)
395 {
396 struct rvu *rvu = devlink_health_reporter_priv(reporter);
397 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
398 struct rvu_nix_event_ctx *nix_ctx;
399
400 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
401
402 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
403 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
404 }
405
rvu_hw_nix_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)406 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
407 void *ctx, struct netlink_ext_ack *netlink_extack)
408 {
409 struct rvu *rvu = devlink_health_reporter_priv(reporter);
410 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
411 int blkaddr;
412
413 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
414 if (blkaddr < 0)
415 return blkaddr;
416
417 if (nix_event_ctx->nix_af_rvu_gen)
418 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
419
420 return 0;
421 }
422
rvu_hw_nix_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)423 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
424 struct devlink_fmsg *fmsg, void *ctx,
425 struct netlink_ext_ack *netlink_extack)
426 {
427 struct rvu *rvu = devlink_health_reporter_priv(reporter);
428 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
429 struct rvu_nix_event_ctx *nix_ctx;
430
431 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
432
433 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
434 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
435 }
436
rvu_hw_nix_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)437 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
438 void *ctx, struct netlink_ext_ack *netlink_extack)
439 {
440 struct rvu *rvu = devlink_health_reporter_priv(reporter);
441 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
442 int blkaddr;
443
444 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
445 if (blkaddr < 0)
446 return blkaddr;
447
448 if (nix_event_ctx->nix_af_rvu_err)
449 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
450
451 return 0;
452 }
453
rvu_hw_nix_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)454 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
455 struct devlink_fmsg *fmsg, void *ctx,
456 struct netlink_ext_ack *netlink_extack)
457 {
458 struct rvu *rvu = devlink_health_reporter_priv(reporter);
459 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
460 struct rvu_nix_event_ctx *nix_ctx;
461
462 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
463
464 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
465 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
466 }
467
rvu_hw_nix_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)468 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
469 void *ctx, struct netlink_ext_ack *netlink_extack)
470 {
471 struct rvu *rvu = devlink_health_reporter_priv(reporter);
472 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
473 int blkaddr;
474
475 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
476 if (blkaddr < 0)
477 return blkaddr;
478
479 if (nix_event_ctx->nix_af_rvu_ras)
480 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
481
482 return 0;
483 }
484
485 RVU_REPORTERS(hw_nix_intr);
486 RVU_REPORTERS(hw_nix_gen);
487 RVU_REPORTERS(hw_nix_err);
488 RVU_REPORTERS(hw_nix_ras);
489
490 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
491
rvu_nix_register_reporters(struct rvu_devlink * rvu_dl)492 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
493 {
494 struct rvu_nix_health_reporters *rvu_reporters;
495 struct rvu_nix_event_ctx *nix_event_context;
496 struct rvu *rvu = rvu_dl->rvu;
497
498 rvu_reporters = kzalloc_obj(*rvu_reporters);
499 if (!rvu_reporters)
500 return -ENOMEM;
501
502 rvu_dl->rvu_nix_health_reporter = rvu_reporters;
503 nix_event_context = kzalloc_obj(*nix_event_context);
504 if (!nix_event_context)
505 return -ENOMEM;
506
507 rvu_reporters->nix_event_ctx = nix_event_context;
508 rvu_reporters->rvu_hw_nix_intr_reporter =
509 devlink_health_reporter_create(rvu_dl->dl,
510 &rvu_hw_nix_intr_reporter_ops,
511 rvu);
512 if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
513 dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
514 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
515 return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
516 }
517
518 rvu_reporters->rvu_hw_nix_gen_reporter =
519 devlink_health_reporter_create(rvu_dl->dl,
520 &rvu_hw_nix_gen_reporter_ops,
521 rvu);
522 if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
523 dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
524 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
525 return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
526 }
527
528 rvu_reporters->rvu_hw_nix_err_reporter =
529 devlink_health_reporter_create(rvu_dl->dl,
530 &rvu_hw_nix_err_reporter_ops,
531 rvu);
532 if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
533 dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
534 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
535 return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
536 }
537
538 rvu_reporters->rvu_hw_nix_ras_reporter =
539 devlink_health_reporter_create(rvu_dl->dl,
540 &rvu_hw_nix_ras_reporter_ops,
541 rvu);
542 if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
543 dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
544 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
545 return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
546 }
547
548 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
549 if (!rvu_dl->devlink_wq)
550 return -ENOMEM;
551
552 INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
553 INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
554 INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
555 INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
556
557 return 0;
558 }
559
rvu_nix_health_reporters_create(struct rvu_devlink * rvu_dl)560 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
561 {
562 struct rvu *rvu = rvu_dl->rvu;
563 int err;
564
565 err = rvu_nix_register_reporters(rvu_dl);
566 if (err) {
567 dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
568 err);
569 return err;
570 }
571 rvu_nix_register_interrupts(rvu);
572
573 return 0;
574 }
575
rvu_nix_health_reporters_destroy(struct rvu_devlink * rvu_dl)576 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
577 {
578 struct rvu_nix_health_reporters *nix_reporters;
579 struct rvu *rvu = rvu_dl->rvu;
580
581 nix_reporters = rvu_dl->rvu_nix_health_reporter;
582
583 if (!nix_reporters->rvu_hw_nix_ras_reporter)
584 return;
585 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
586 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
587
588 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
589 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
590
591 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
592 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
593
594 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
595 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
596
597 rvu_nix_unregister_interrupts(rvu);
598 kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
599 kfree(rvu_dl->rvu_nix_health_reporter);
600 }
601
rvu_npa_intr_work(struct work_struct * work)602 static void rvu_npa_intr_work(struct work_struct *work)
603 {
604 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
605
606 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
607 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
608 "NPA_AF_RVU Error",
609 rvu_npa_health_reporter->npa_event_ctx);
610 }
611
rvu_npa_af_rvu_intr_handler(int irq,void * rvu_irq)612 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
613 {
614 struct rvu_npa_event_ctx *npa_event_context;
615 struct rvu_devlink *rvu_dl = rvu_irq;
616 struct rvu *rvu;
617 int blkaddr;
618 u64 intr;
619
620 rvu = rvu_dl->rvu;
621 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
622 if (blkaddr < 0)
623 return IRQ_NONE;
624
625 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
626 intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
627 npa_event_context->npa_af_rvu_int = intr;
628
629 /* Clear interrupts */
630 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
631 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
632 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
633
634 return IRQ_HANDLED;
635 }
636
rvu_npa_gen_work(struct work_struct * work)637 static void rvu_npa_gen_work(struct work_struct *work)
638 {
639 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
640
641 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
642 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
643 "NPA_AF_GEN Error",
644 rvu_npa_health_reporter->npa_event_ctx);
645 }
646
rvu_npa_af_gen_intr_handler(int irq,void * rvu_irq)647 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
648 {
649 struct rvu_npa_event_ctx *npa_event_context;
650 struct rvu_devlink *rvu_dl = rvu_irq;
651 struct rvu *rvu;
652 int blkaddr;
653 u64 intr;
654
655 rvu = rvu_dl->rvu;
656 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
657 if (blkaddr < 0)
658 return IRQ_NONE;
659
660 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
661 intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
662 npa_event_context->npa_af_rvu_gen = intr;
663
664 /* Clear interrupts */
665 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
666 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
667 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
668
669 return IRQ_HANDLED;
670 }
671
rvu_npa_err_work(struct work_struct * work)672 static void rvu_npa_err_work(struct work_struct *work)
673 {
674 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
675
676 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
677 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
678 "NPA_AF_ERR Error",
679 rvu_npa_health_reporter->npa_event_ctx);
680 }
681
rvu_npa_af_err_intr_handler(int irq,void * rvu_irq)682 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
683 {
684 struct rvu_npa_event_ctx *npa_event_context;
685 struct rvu_devlink *rvu_dl = rvu_irq;
686 struct rvu *rvu;
687 int blkaddr;
688 u64 intr;
689
690 rvu = rvu_dl->rvu;
691 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
692 if (blkaddr < 0)
693 return IRQ_NONE;
694 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
695 intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
696 npa_event_context->npa_af_rvu_err = intr;
697
698 /* Clear interrupts */
699 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
700 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
701 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
702
703 return IRQ_HANDLED;
704 }
705
rvu_npa_ras_work(struct work_struct * work)706 static void rvu_npa_ras_work(struct work_struct *work)
707 {
708 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
709
710 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
711 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
712 "HW NPA_AF_RAS Error reported",
713 rvu_npa_health_reporter->npa_event_ctx);
714 }
715
rvu_npa_af_ras_intr_handler(int irq,void * rvu_irq)716 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
717 {
718 struct rvu_npa_event_ctx *npa_event_context;
719 struct rvu_devlink *rvu_dl = rvu_irq;
720 struct rvu *rvu;
721 int blkaddr;
722 u64 intr;
723
724 rvu = rvu_dl->rvu;
725 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
726 if (blkaddr < 0)
727 return IRQ_NONE;
728
729 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
730 intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
731 npa_event_context->npa_af_rvu_ras = intr;
732
733 /* Clear interrupts */
734 rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
735 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
736 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
737
738 return IRQ_HANDLED;
739 }
740
rvu_npa_unregister_interrupts(struct rvu * rvu)741 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
742 {
743 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
744 int i, offs, blkaddr;
745 u64 reg;
746
747 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
748 if (blkaddr < 0)
749 return;
750
751 reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
752 offs = reg & 0x3FF;
753
754 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
755 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
756 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
757 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
758
759 for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
760 if (rvu->irq_allocated[offs + i]) {
761 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
762 rvu->irq_allocated[offs + i] = false;
763 }
764 }
765
rvu_npa_register_interrupts(struct rvu * rvu)766 static int rvu_npa_register_interrupts(struct rvu *rvu)
767 {
768 int blkaddr, base;
769 bool rc;
770
771 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
772 if (blkaddr < 0)
773 return blkaddr;
774
775 /* Get NPA AF MSIX vectors offset. */
776 base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
777 if (!base) {
778 dev_warn(rvu->dev,
779 "Failed to get NPA_AF_INT vector offsets\n");
780 return 0;
781 }
782
783 /* Register and enable NPA_AF_RVU_INT interrupt */
784 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
785 "NPA_AF_RVU_INT",
786 rvu_npa_af_rvu_intr_handler);
787 if (!rc)
788 goto err;
789 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
790
791 /* Register and enable NPA_AF_GEN_INT interrupt */
792 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
793 "NPA_AF_RVU_GEN",
794 rvu_npa_af_gen_intr_handler);
795 if (!rc)
796 goto err;
797 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
798
799 /* Register and enable NPA_AF_ERR_INT interrupt */
800 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
801 "NPA_AF_ERR_INT",
802 rvu_npa_af_err_intr_handler);
803 if (!rc)
804 goto err;
805 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
806
807 /* Register and enable NPA_AF_RAS interrupt */
808 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
809 "NPA_AF_RAS",
810 rvu_npa_af_ras_intr_handler);
811 if (!rc)
812 goto err;
813 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
814
815 return 0;
816 err:
817 rvu_npa_unregister_interrupts(rvu);
818 return rc;
819 }
820
rvu_npa_report_show(struct devlink_fmsg * fmsg,void * ctx,enum npa_af_rvu_health health_reporter)821 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
822 enum npa_af_rvu_health health_reporter)
823 {
824 struct rvu_npa_event_ctx *npa_event_context;
825 unsigned int alloc_dis, free_dis;
826 u64 intr_val;
827
828 npa_event_context = ctx;
829 switch (health_reporter) {
830 case NPA_AF_RVU_GEN:
831 intr_val = npa_event_context->npa_af_rvu_gen;
832 rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
833 devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
834 npa_event_context->npa_af_rvu_gen);
835 if (intr_val & BIT_ULL(32))
836 devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
837
838 free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
839 if (free_dis & BIT(NPA_INPQ_NIX0_RX))
840 devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
841 if (free_dis & BIT(NPA_INPQ_NIX0_TX))
842 devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
843 if (free_dis & BIT(NPA_INPQ_NIX1_RX))
844 devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
845 if (free_dis & BIT(NPA_INPQ_NIX1_TX))
846 devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
847 if (free_dis & BIT(NPA_INPQ_SSO))
848 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
849 if (free_dis & BIT(NPA_INPQ_TIM))
850 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
851 if (free_dis & BIT(NPA_INPQ_DPI))
852 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
853 if (free_dis & BIT(NPA_INPQ_AURA_OP))
854 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
855
856 alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
857 if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
858 devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
859 if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
860 devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
861 if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
862 devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
863 if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
864 devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
865 if (alloc_dis & BIT(NPA_INPQ_SSO))
866 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
867 if (alloc_dis & BIT(NPA_INPQ_TIM))
868 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
869 if (alloc_dis & BIT(NPA_INPQ_DPI))
870 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
871 if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
872 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
873
874 rvu_report_pair_end(fmsg);
875 break;
876 case NPA_AF_RVU_ERR:
877 rvu_report_pair_start(fmsg, "NPA_AF_ERR");
878 devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
879 npa_event_context->npa_af_rvu_err);
880 if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
881 devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
882 if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
883 devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
884 if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
885 devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
886 rvu_report_pair_end(fmsg);
887 break;
888 case NPA_AF_RVU_RAS:
889 rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
890 devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
891 npa_event_context->npa_af_rvu_ras);
892 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
893 devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
894 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
895 devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
896 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
897 devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
898 rvu_report_pair_end(fmsg);
899 break;
900 case NPA_AF_RVU_INTR:
901 rvu_report_pair_start(fmsg, "NPA_AF_RVU");
902 devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
903 npa_event_context->npa_af_rvu_int);
904 if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
905 devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
906 rvu_report_pair_end(fmsg);
907 break;
908 default:
909 return -EINVAL;
910 }
911
912 return 0;
913 }
914
rvu_hw_npa_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)915 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
916 struct devlink_fmsg *fmsg, void *ctx,
917 struct netlink_ext_ack *netlink_extack)
918 {
919 struct rvu *rvu = devlink_health_reporter_priv(reporter);
920 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
921 struct rvu_npa_event_ctx *npa_ctx;
922
923 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
924
925 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
926 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
927 }
928
rvu_hw_npa_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)929 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
930 void *ctx, struct netlink_ext_ack *netlink_extack)
931 {
932 struct rvu *rvu = devlink_health_reporter_priv(reporter);
933 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
934 int blkaddr;
935
936 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
937 if (blkaddr < 0)
938 return blkaddr;
939
940 if (npa_event_ctx->npa_af_rvu_int)
941 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
942
943 return 0;
944 }
945
rvu_hw_npa_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)946 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
947 struct devlink_fmsg *fmsg, void *ctx,
948 struct netlink_ext_ack *netlink_extack)
949 {
950 struct rvu *rvu = devlink_health_reporter_priv(reporter);
951 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
952 struct rvu_npa_event_ctx *npa_ctx;
953
954 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
955
956 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
957 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
958 }
959
rvu_hw_npa_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)960 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
961 void *ctx, struct netlink_ext_ack *netlink_extack)
962 {
963 struct rvu *rvu = devlink_health_reporter_priv(reporter);
964 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
965 int blkaddr;
966
967 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
968 if (blkaddr < 0)
969 return blkaddr;
970
971 if (npa_event_ctx->npa_af_rvu_gen)
972 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
973
974 return 0;
975 }
976
rvu_hw_npa_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)977 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
978 struct devlink_fmsg *fmsg, void *ctx,
979 struct netlink_ext_ack *netlink_extack)
980 {
981 struct rvu *rvu = devlink_health_reporter_priv(reporter);
982 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
983 struct rvu_npa_event_ctx *npa_ctx;
984
985 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
986
987 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
988 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
989 }
990
rvu_hw_npa_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)991 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
992 void *ctx, struct netlink_ext_ack *netlink_extack)
993 {
994 struct rvu *rvu = devlink_health_reporter_priv(reporter);
995 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
996 int blkaddr;
997
998 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
999 if (blkaddr < 0)
1000 return blkaddr;
1001
1002 if (npa_event_ctx->npa_af_rvu_err)
1003 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
1004
1005 return 0;
1006 }
1007
rvu_hw_npa_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1008 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1009 struct devlink_fmsg *fmsg, void *ctx,
1010 struct netlink_ext_ack *netlink_extack)
1011 {
1012 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1013 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1014 struct rvu_npa_event_ctx *npa_ctx;
1015
1016 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1017
1018 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1019 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1020 }
1021
rvu_hw_npa_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1022 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1023 void *ctx, struct netlink_ext_ack *netlink_extack)
1024 {
1025 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1026 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1027 int blkaddr;
1028
1029 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1030 if (blkaddr < 0)
1031 return blkaddr;
1032
1033 if (npa_event_ctx->npa_af_rvu_ras)
1034 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1035
1036 return 0;
1037 }
1038
1039 RVU_REPORTERS(hw_npa_intr);
1040 RVU_REPORTERS(hw_npa_gen);
1041 RVU_REPORTERS(hw_npa_err);
1042 RVU_REPORTERS(hw_npa_ras);
1043
1044 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1045
rvu_npa_register_reporters(struct rvu_devlink * rvu_dl)1046 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1047 {
1048 struct rvu_npa_health_reporters *rvu_reporters;
1049 struct rvu_npa_event_ctx *npa_event_context;
1050 struct rvu *rvu = rvu_dl->rvu;
1051
1052 rvu_reporters = kzalloc_obj(*rvu_reporters);
1053 if (!rvu_reporters)
1054 return -ENOMEM;
1055
1056 rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1057 npa_event_context = kzalloc_obj(*npa_event_context);
1058 if (!npa_event_context)
1059 return -ENOMEM;
1060
1061 rvu_reporters->npa_event_ctx = npa_event_context;
1062 rvu_reporters->rvu_hw_npa_intr_reporter =
1063 devlink_health_reporter_create(rvu_dl->dl,
1064 &rvu_hw_npa_intr_reporter_ops,
1065 rvu);
1066 if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1067 dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1068 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1069 return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1070 }
1071
1072 rvu_reporters->rvu_hw_npa_gen_reporter =
1073 devlink_health_reporter_create(rvu_dl->dl,
1074 &rvu_hw_npa_gen_reporter_ops,
1075 rvu);
1076 if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1077 dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1078 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1079 return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1080 }
1081
1082 rvu_reporters->rvu_hw_npa_err_reporter =
1083 devlink_health_reporter_create(rvu_dl->dl,
1084 &rvu_hw_npa_err_reporter_ops,
1085 rvu);
1086 if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1087 dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1088 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1089 return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1090 }
1091
1092 rvu_reporters->rvu_hw_npa_ras_reporter =
1093 devlink_health_reporter_create(rvu_dl->dl,
1094 &rvu_hw_npa_ras_reporter_ops,
1095 rvu);
1096 if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1097 dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1098 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1099 return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1100 }
1101
1102 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1103 if (!rvu_dl->devlink_wq)
1104 return -ENOMEM;
1105
1106 INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1107 INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1108 INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1109 INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1110
1111 return 0;
1112 }
1113
rvu_npa_health_reporters_create(struct rvu_devlink * rvu_dl)1114 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1115 {
1116 struct rvu *rvu = rvu_dl->rvu;
1117 int err;
1118
1119 err = rvu_npa_register_reporters(rvu_dl);
1120 if (err) {
1121 dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1122 err);
1123 return err;
1124 }
1125 rvu_npa_register_interrupts(rvu);
1126
1127 return 0;
1128 }
1129
rvu_npa_health_reporters_destroy(struct rvu_devlink * rvu_dl)1130 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1131 {
1132 struct rvu_npa_health_reporters *npa_reporters;
1133 struct rvu *rvu = rvu_dl->rvu;
1134
1135 npa_reporters = rvu_dl->rvu_npa_health_reporter;
1136
1137 if (!npa_reporters->rvu_hw_npa_ras_reporter)
1138 return;
1139 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1140 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1141
1142 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1143 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1144
1145 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1146 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1147
1148 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1149 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1150
1151 rvu_npa_unregister_interrupts(rvu);
1152 kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1153 kfree(rvu_dl->rvu_npa_health_reporter);
1154 }
1155
rvu_health_reporters_create(struct rvu * rvu)1156 static int rvu_health_reporters_create(struct rvu *rvu)
1157 {
1158 struct rvu_devlink *rvu_dl;
1159 int err;
1160
1161 rvu_dl = rvu->rvu_dl;
1162 err = rvu_npa_health_reporters_create(rvu_dl);
1163 if (err)
1164 return err;
1165
1166 return rvu_nix_health_reporters_create(rvu_dl);
1167 }
1168
rvu_health_reporters_destroy(struct rvu * rvu)1169 static void rvu_health_reporters_destroy(struct rvu *rvu)
1170 {
1171 struct rvu_devlink *rvu_dl;
1172
1173 if (!rvu->rvu_dl)
1174 return;
1175
1176 rvu_dl = rvu->rvu_dl;
1177 rvu_npa_health_reporters_destroy(rvu_dl);
1178 rvu_nix_health_reporters_destroy(rvu_dl);
1179 }
1180
1181 /* Devlink Params APIs */
rvu_af_dl_dwrr_mtu_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1182 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1183 union devlink_param_value val,
1184 struct netlink_ext_ack *extack)
1185 {
1186 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1187 struct rvu *rvu = rvu_dl->rvu;
1188 int dwrr_mtu = val.vu32;
1189 struct nix_txsch *txsch;
1190 struct nix_hw *nix_hw;
1191
1192 if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1193 NL_SET_ERR_MSG_MOD(extack,
1194 "Setting DWRR_MTU is not supported on this silicon");
1195 return -EOPNOTSUPP;
1196 }
1197
1198 if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1199 (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1200 NL_SET_ERR_MSG_MOD(extack,
1201 "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1202 return -EINVAL;
1203 }
1204
1205 nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1206 if (!nix_hw)
1207 return -ENODEV;
1208
1209 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1210 if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1211 NL_SET_ERR_MSG_MOD(extack,
1212 "Changing DWRR MTU is not supported when there are active NIXLFs");
1213 NL_SET_ERR_MSG_MOD(extack,
1214 "Make sure none of the PF/VF interfaces are initialized and retry");
1215 return -EOPNOTSUPP;
1216 }
1217
1218 return 0;
1219 }
1220
rvu_af_dl_dwrr_mtu_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1221 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1222 struct devlink_param_gset_ctx *ctx,
1223 struct netlink_ext_ack *extack)
1224 {
1225 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1226 struct rvu *rvu = rvu_dl->rvu;
1227 u64 dwrr_mtu;
1228
1229 dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1230 rvu_write64(rvu, BLKADDR_NIX0,
1231 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
1232
1233 return 0;
1234 }
1235
rvu_af_dl_dwrr_mtu_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1236 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1237 struct devlink_param_gset_ctx *ctx,
1238 struct netlink_ext_ack *extack)
1239 {
1240 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1241 struct rvu *rvu = rvu_dl->rvu;
1242 u64 dwrr_mtu;
1243
1244 if (!rvu->hw->cap.nix_common_dwrr_mtu)
1245 return -EOPNOTSUPP;
1246
1247 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1248 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
1249 ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1250
1251 return 0;
1252 }
1253
1254 enum rvu_af_dl_param_id {
1255 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1256 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1257 RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1258 RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1259 RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
1260 RVU_AF_DEVLINK_PARAM_ID_NPC_DEFRAG,
1261 RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1262 };
1263
rvu_af_npc_defrag_feature_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1264 static int rvu_af_npc_defrag_feature_get(struct devlink *devlink, u32 id,
1265 struct devlink_param_gset_ctx *ctx,
1266 struct netlink_ext_ack *extack)
1267 {
1268 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1269 struct rvu *rvu = rvu_dl->rvu;
1270 bool enabled;
1271
1272 enabled = is_cn20k(rvu->pdev);
1273
1274 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1275 enabled ? "enabled" : "disabled");
1276
1277 return 0;
1278 }
1279
rvu_af_npc_defrag(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1280 static int rvu_af_npc_defrag(struct devlink *devlink, u32 id,
1281 struct devlink_param_gset_ctx *ctx,
1282 struct netlink_ext_ack *extack)
1283 {
1284 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1285 struct rvu *rvu = rvu_dl->rvu;
1286
1287 /* It is hard to roll back if defrag process fails.
1288 * print a error message and return fault.
1289 */
1290 if (npc_cn20k_defrag(rvu)) {
1291 dev_err(rvu->dev, "Defrag process failed\n");
1292 return -EFAULT;
1293 }
1294 return 0;
1295 }
1296
rvu_af_npc_defrag_feature_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1297 static int rvu_af_npc_defrag_feature_validate(struct devlink *devlink, u32 id,
1298 union devlink_param_value val,
1299 struct netlink_ext_ack *extack)
1300 {
1301 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1302 struct rvu *rvu = rvu_dl->rvu;
1303 u64 enable;
1304
1305 if (kstrtoull(val.vstr, 10, &enable)) {
1306 NL_SET_ERR_MSG_MOD(extack,
1307 "Only 1 value is supported");
1308 return -EINVAL;
1309 }
1310
1311 if (enable != 1) {
1312 NL_SET_ERR_MSG_MOD(extack,
1313 "Only initiating defrag is supported");
1314 return -EINVAL;
1315 }
1316
1317 if (is_cn20k(rvu->pdev))
1318 return 0;
1319
1320 NL_SET_ERR_MSG_MOD(extack,
1321 "Can defrag NPC only in cn20k silicon");
1322 return -EFAULT;
1323 }
1324
rvu_af_npc_exact_feature_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1325 static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1326 struct devlink_param_gset_ctx *ctx,
1327 struct netlink_ext_ack *extack)
1328 {
1329 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1330 struct rvu *rvu = rvu_dl->rvu;
1331 bool enabled;
1332
1333 enabled = rvu_npc_exact_has_match_table(rvu);
1334
1335 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1336 enabled ? "enabled" : "disabled");
1337
1338 return 0;
1339 }
1340
rvu_af_npc_exact_feature_disable(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1341 static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1342 struct devlink_param_gset_ctx *ctx,
1343 struct netlink_ext_ack *extack)
1344 {
1345 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1346 struct rvu *rvu = rvu_dl->rvu;
1347
1348 rvu_npc_exact_disable_feature(rvu);
1349
1350 return 0;
1351 }
1352
rvu_af_npc_exact_feature_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1353 static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1354 union devlink_param_value val,
1355 struct netlink_ext_ack *extack)
1356 {
1357 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1358 struct rvu *rvu = rvu_dl->rvu;
1359 u64 enable;
1360
1361 if (kstrtoull(val.vstr, 10, &enable)) {
1362 NL_SET_ERR_MSG_MOD(extack,
1363 "Only 1 value is supported");
1364 return -EINVAL;
1365 }
1366
1367 if (enable != 1) {
1368 NL_SET_ERR_MSG_MOD(extack,
1369 "Only disabling exact match feature is supported");
1370 return -EINVAL;
1371 }
1372
1373 if (rvu_npc_exact_can_disable_feature(rvu))
1374 return 0;
1375
1376 NL_SET_ERR_MSG_MOD(extack,
1377 "Can't disable exact match feature; Please try before any configuration");
1378 return -EFAULT;
1379 }
1380
rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1381 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1382 struct devlink_param_gset_ctx *ctx,
1383 struct netlink_ext_ack *extack)
1384 {
1385 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1386 struct rvu *rvu = rvu_dl->rvu;
1387 struct npc_mcam *mcam;
1388 u32 percent;
1389
1390 mcam = &rvu->hw->mcam;
1391 percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1392 ctx->val.vu8 = (u8)percent;
1393
1394 return 0;
1395 }
1396
rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1397 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1398 struct devlink_param_gset_ctx *ctx,
1399 struct netlink_ext_ack *extack)
1400 {
1401 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1402 struct rvu *rvu = rvu_dl->rvu;
1403 struct npc_mcam *mcam;
1404 u32 percent;
1405
1406 percent = ctx->val.vu8;
1407 mcam = &rvu->hw->mcam;
1408 mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1409 mcam->hprio_end = mcam->hprio_count;
1410 mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1411 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1412
1413 return 0;
1414 }
1415
rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1416 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1417 union devlink_param_value val,
1418 struct netlink_ext_ack *extack)
1419 {
1420 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1421 struct rvu *rvu = rvu_dl->rvu;
1422 struct npc_mcam *mcam;
1423
1424 /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1425 if (val.vu8 < 12 || val.vu8 > 100) {
1426 NL_SET_ERR_MSG_MOD(extack,
1427 "mcam high zone percent must be between 12% to 100%");
1428 return -EINVAL;
1429 }
1430
1431 /* Do not allow user to modify the high priority zone entries while mcam entries
1432 * have already been assigned.
1433 */
1434 mcam = &rvu->hw->mcam;
1435 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1436 NL_SET_ERR_MSG_MOD(extack,
1437 "mcam entries have already been assigned, can't resize");
1438 return -EPERM;
1439 }
1440
1441 return 0;
1442 }
1443
rvu_af_dl_npc_def_rule_cntr_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1444 static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
1445 struct devlink_param_gset_ctx *ctx,
1446 struct netlink_ext_ack *extack)
1447 {
1448 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1449 struct rvu *rvu = rvu_dl->rvu;
1450
1451 ctx->val.vbool = rvu->def_rule_cntr_en;
1452
1453 return 0;
1454 }
1455
rvu_af_dl_npc_def_rule_cntr_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1456 static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
1457 struct devlink_param_gset_ctx *ctx,
1458 struct netlink_ext_ack *extack)
1459 {
1460 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1461 struct rvu *rvu = rvu_dl->rvu;
1462 int err;
1463
1464 err = npc_config_cntr_default_entries(rvu, ctx->val.vbool);
1465 if (!err)
1466 rvu->def_rule_cntr_en = ctx->val.vbool;
1467
1468 return err;
1469 }
1470
rvu_af_dl_nix_maxlf_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1471 static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
1472 struct devlink_param_gset_ctx *ctx,
1473 struct netlink_ext_ack *extack)
1474 {
1475 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1476 struct rvu *rvu = rvu_dl->rvu;
1477
1478 ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
1479
1480 return 0;
1481 }
1482
rvu_af_dl_nix_maxlf_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1483 static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
1484 struct devlink_param_gset_ctx *ctx,
1485 struct netlink_ext_ack *extack)
1486 {
1487 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1488 struct rvu *rvu = rvu_dl->rvu;
1489 struct rvu_block *block;
1490 int blkaddr = 0;
1491
1492 npc_mcam_rsrcs_deinit(rvu);
1493 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1494 while (blkaddr) {
1495 block = &rvu->hw->block[blkaddr];
1496 block->lf.max = ctx->val.vu16;
1497 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1498 }
1499
1500 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1501 npc_mcam_rsrcs_init(rvu, blkaddr);
1502
1503 return 0;
1504 }
1505
rvu_af_dl_nix_maxlf_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1506 static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
1507 union devlink_param_value val,
1508 struct netlink_ext_ack *extack)
1509 {
1510 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1511 struct rvu *rvu = rvu_dl->rvu;
1512 u16 max_nix0_lf, max_nix1_lf;
1513 struct npc_mcam *mcam;
1514 u64 cfg;
1515
1516 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
1517 max_nix0_lf = cfg & 0xFFF;
1518 cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2);
1519 max_nix1_lf = cfg & 0xFFF;
1520
1521 /* Do not allow user to modify maximum NIX LFs while mcam entries
1522 * have already been assigned.
1523 */
1524 mcam = &rvu->hw->mcam;
1525 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1526 NL_SET_ERR_MSG_MOD(extack,
1527 "mcam entries have already been assigned, can't resize");
1528 return -EPERM;
1529 }
1530
1531 if (max_nix0_lf && val.vu16 > max_nix0_lf) {
1532 NL_SET_ERR_MSG_MOD(extack,
1533 "requested nixlf is greater than the max supported nix0_lf");
1534 return -EPERM;
1535 }
1536
1537 if (max_nix1_lf && val.vu16 > max_nix1_lf) {
1538 NL_SET_ERR_MSG_MOD(extack,
1539 "requested nixlf is greater than the max supported nix1_lf");
1540 return -EINVAL;
1541 }
1542
1543 return 0;
1544 }
1545
1546 static const struct devlink_param rvu_af_dl_params[] = {
1547 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1548 "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1549 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1550 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1551 rvu_af_dl_dwrr_mtu_validate),
1552 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1553 "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1554 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1555 rvu_af_dl_npc_mcam_high_zone_percent_get,
1556 rvu_af_dl_npc_mcam_high_zone_percent_set,
1557 rvu_af_dl_npc_mcam_high_zone_percent_validate),
1558 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
1559 "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL,
1560 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1561 rvu_af_dl_npc_def_rule_cntr_get,
1562 rvu_af_dl_npc_def_rule_cntr_set, NULL),
1563 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1564 "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
1565 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1566 rvu_af_dl_nix_maxlf_get,
1567 rvu_af_dl_nix_maxlf_set,
1568 rvu_af_dl_nix_maxlf_validate),
1569 };
1570
1571 static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1572 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1573 "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1574 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1575 rvu_af_npc_exact_feature_get,
1576 rvu_af_npc_exact_feature_disable,
1577 rvu_af_npc_exact_feature_validate),
1578 };
1579
1580 /* Devlink switch mode */
rvu_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)1581 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1582 {
1583 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1584 struct rvu *rvu = rvu_dl->rvu;
1585 struct rvu_switch *rswitch;
1586
1587 if (rvu->rep_mode)
1588 return -EOPNOTSUPP;
1589
1590 rswitch = &rvu->rswitch;
1591 *mode = rswitch->mode;
1592
1593 return 0;
1594 }
1595
rvu_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)1596 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1597 struct netlink_ext_ack *extack)
1598 {
1599 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1600 struct rvu *rvu = rvu_dl->rvu;
1601 struct rvu_switch *rswitch;
1602
1603 rswitch = &rvu->rswitch;
1604 switch (mode) {
1605 case DEVLINK_ESWITCH_MODE_LEGACY:
1606 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1607 if (rswitch->mode == mode)
1608 return 0;
1609 rswitch->mode = mode;
1610 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1611 rvu_switch_enable(rvu);
1612 else
1613 rvu_switch_disable(rvu);
1614 break;
1615 default:
1616 return -EINVAL;
1617 }
1618
1619 return 0;
1620 }
1621
1622 static const struct devlink_ops rvu_devlink_ops = {
1623 .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1624 .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1625 };
1626
1627 static const struct devlink_param rvu_af_dl_param_defrag[] = {
1628 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEFRAG,
1629 "npc_defrag", DEVLINK_PARAM_TYPE_STRING,
1630 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1631 rvu_af_npc_defrag_feature_get,
1632 rvu_af_npc_defrag,
1633 rvu_af_npc_defrag_feature_validate),
1634 };
1635
rvu_register_dl(struct rvu * rvu)1636 int rvu_register_dl(struct rvu *rvu)
1637 {
1638 struct rvu_devlink *rvu_dl;
1639 struct devlink *dl;
1640 int err;
1641
1642 dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1643 rvu->dev);
1644 if (!dl) {
1645 dev_warn(rvu->dev, "devlink_alloc failed\n");
1646 return -ENOMEM;
1647 }
1648
1649 rvu_dl = devlink_priv(dl);
1650 rvu_dl->dl = dl;
1651 rvu_dl->rvu = rvu;
1652 rvu->rvu_dl = rvu_dl;
1653
1654 err = rvu_health_reporters_create(rvu);
1655 if (err) {
1656 dev_err(rvu->dev,
1657 "devlink health reporter creation failed with error %d\n", err);
1658 goto err_dl_health;
1659 }
1660
1661 err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1662 if (err) {
1663 dev_err(rvu->dev,
1664 "devlink params register failed with error %d", err);
1665 goto err_dl_health;
1666 }
1667
1668 if (is_cn20k(rvu->pdev)) {
1669 err = devlink_params_register(dl, rvu_af_dl_param_defrag,
1670 ARRAY_SIZE(rvu_af_dl_param_defrag));
1671 if (err) {
1672 dev_err(rvu->dev,
1673 "devlink defrag params register failed with error %d",
1674 err);
1675 goto err_dl_defrag;
1676 }
1677 }
1678
1679 /* Register exact match devlink only for CN10K-B */
1680 if (!rvu_npc_exact_has_match_table(rvu))
1681 goto done;
1682
1683 err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1684 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1685 if (err) {
1686 dev_err(rvu->dev,
1687 "devlink exact match params register failed with error %d",
1688 err);
1689 goto err_dl_exact_match;
1690 }
1691
1692 done:
1693 devlink_register(dl);
1694 return 0;
1695
1696 err_dl_exact_match:
1697 if (is_cn20k(rvu->pdev))
1698 devlink_params_unregister(dl, rvu_af_dl_param_defrag,
1699 ARRAY_SIZE(rvu_af_dl_param_defrag));
1700
1701 err_dl_defrag:
1702 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1703
1704 err_dl_health:
1705 rvu_health_reporters_destroy(rvu);
1706 devlink_free(dl);
1707 return err;
1708 }
1709
rvu_unregister_dl(struct rvu * rvu)1710 void rvu_unregister_dl(struct rvu *rvu)
1711 {
1712 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1713 struct devlink *dl = rvu_dl->dl;
1714
1715 devlink_unregister(dl);
1716
1717 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1718
1719 if (is_cn20k(rvu->pdev))
1720 devlink_params_unregister(dl, rvu_af_dl_param_defrag,
1721 ARRAY_SIZE(rvu_af_dl_param_defrag));
1722
1723 /* Unregister exact match devlink only for CN10K-B */
1724 if (rvu_npc_exact_has_match_table(rvu))
1725 devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1726 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1727
1728 rvu_health_reporters_destroy(rvu);
1729 devlink_free(dl);
1730 }
1731