1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4 #include <linux/pci.h>
5 #include <linux/vmalloc.h>
6
7 #include "core.h"
8
9 static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
10
pdsc_register_notify(struct notifier_block * nb)11 int pdsc_register_notify(struct notifier_block *nb)
12 {
13 return blocking_notifier_chain_register(&pds_notify_chain, nb);
14 }
15 EXPORT_SYMBOL_GPL(pdsc_register_notify);
16
pdsc_unregister_notify(struct notifier_block * nb)17 void pdsc_unregister_notify(struct notifier_block *nb)
18 {
19 blocking_notifier_chain_unregister(&pds_notify_chain, nb);
20 }
21 EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
22
pdsc_notify(unsigned long event,void * data)23 void pdsc_notify(unsigned long event, void *data)
24 {
25 blocking_notifier_call_chain(&pds_notify_chain, event, data);
26 }
27
pdsc_intr_free(struct pdsc * pdsc,int index)28 void pdsc_intr_free(struct pdsc *pdsc, int index)
29 {
30 struct pdsc_intr_info *intr_info;
31
32 if (index >= pdsc->nintrs || index < 0) {
33 WARN(true, "bad intr index %d\n", index);
34 return;
35 }
36
37 intr_info = &pdsc->intr_info[index];
38 if (!intr_info->vector)
39 return;
40 dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
41 __func__, index, intr_info->vector, intr_info->name);
42
43 pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
44 pds_core_intr_clean(&pdsc->intr_ctrl[index]);
45
46 free_irq(intr_info->vector, intr_info->data);
47
48 memset(intr_info, 0, sizeof(*intr_info));
49 }
50
pdsc_intr_alloc(struct pdsc * pdsc,char * name,irq_handler_t handler,void * data)51 int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
52 irq_handler_t handler, void *data)
53 {
54 struct pdsc_intr_info *intr_info;
55 unsigned int index;
56 int err;
57
58 /* Find the first available interrupt */
59 for (index = 0; index < pdsc->nintrs; index++)
60 if (!pdsc->intr_info[index].vector)
61 break;
62 if (index >= pdsc->nintrs) {
63 dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
64 __func__, index, pdsc->nintrs);
65 return -ENOSPC;
66 }
67
68 pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
69 PDS_CORE_INTR_CRED_RESET_COALESCE);
70
71 intr_info = &pdsc->intr_info[index];
72
73 intr_info->index = index;
74 intr_info->data = data;
75 strscpy(intr_info->name, name, sizeof(intr_info->name));
76
77 /* Get the OS vector number for the interrupt */
78 err = pci_irq_vector(pdsc->pdev, index);
79 if (err < 0) {
80 dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
81 index, ERR_PTR(err));
82 goto err_out_free_intr;
83 }
84 intr_info->vector = err;
85
86 /* Init the device's intr mask */
87 pds_core_intr_clean(&pdsc->intr_ctrl[index]);
88 pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
89 pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
90
91 /* Register the isr with a name */
92 err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
93 if (err) {
94 dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
95 intr_info->vector, ERR_PTR(err));
96 goto err_out_free_intr;
97 }
98
99 return index;
100
101 err_out_free_intr:
102 pdsc_intr_free(pdsc, index);
103 return err;
104 }
105
pdsc_qcq_intr_free(struct pdsc * pdsc,struct pdsc_qcq * qcq)106 static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
107 {
108 if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
109 qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
110 return;
111
112 pdsc_intr_free(pdsc, qcq->intx);
113 qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
114 }
115
pdsc_qcq_intr_alloc(struct pdsc * pdsc,struct pdsc_qcq * qcq)116 static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
117 {
118 char name[PDSC_INTR_NAME_MAX_SZ];
119 int index;
120
121 if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
122 qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
123 return 0;
124 }
125
126 snprintf(name, sizeof(name), "%s-%d-%s",
127 PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
128 index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
129 if (index < 0)
130 return index;
131 qcq->intx = index;
132 qcq->cq.bound_intr = &pdsc->intr_info[index];
133
134 return 0;
135 }
136
pdsc_qcq_free(struct pdsc * pdsc,struct pdsc_qcq * qcq)137 void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
138 {
139 struct device *dev = pdsc->dev;
140
141 if (!(qcq && qcq->pdsc))
142 return;
143
144 pdsc_debugfs_del_qcq(qcq);
145
146 pdsc_qcq_intr_free(pdsc, qcq);
147
148 if (qcq->q_base)
149 dma_free_coherent(dev, qcq->q_size,
150 qcq->q_base, qcq->q_base_pa);
151
152 if (qcq->cq_base)
153 dma_free_coherent(dev, qcq->cq_size,
154 qcq->cq_base, qcq->cq_base_pa);
155
156 vfree(qcq->cq.info);
157 vfree(qcq->q.info);
158
159 memset(qcq, 0, sizeof(*qcq));
160 }
161
pdsc_q_map(struct pdsc_queue * q,void * base,dma_addr_t base_pa)162 static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
163 {
164 struct pdsc_q_info *cur;
165 unsigned int i;
166
167 q->base = base;
168 q->base_pa = base_pa;
169
170 for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
171 cur->desc = base + (i * q->desc_size);
172 init_completion(&cur->completion);
173 }
174 }
175
pdsc_cq_map(struct pdsc_cq * cq,void * base,dma_addr_t base_pa)176 static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
177 {
178 struct pdsc_cq_info *cur;
179 unsigned int i;
180
181 cq->base = base;
182 cq->base_pa = base_pa;
183
184 for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
185 cur->comp = base + (i * cq->desc_size);
186 }
187
pdsc_qcq_alloc(struct pdsc * pdsc,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int pid,struct pdsc_qcq * qcq)188 int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
189 const char *name, unsigned int flags, unsigned int num_descs,
190 unsigned int desc_size, unsigned int cq_desc_size,
191 unsigned int pid, struct pdsc_qcq *qcq)
192 {
193 struct device *dev = pdsc->dev;
194 void *q_base, *cq_base;
195 dma_addr_t cq_base_pa;
196 dma_addr_t q_base_pa;
197 int err;
198
199 qcq->q.info = vcalloc(num_descs, sizeof(*qcq->q.info));
200 if (!qcq->q.info) {
201 err = -ENOMEM;
202 goto err_out;
203 }
204
205 qcq->pdsc = pdsc;
206 qcq->flags = flags;
207 INIT_WORK(&qcq->work, pdsc_work_thread);
208
209 qcq->q.type = type;
210 qcq->q.index = index;
211 qcq->q.num_descs = num_descs;
212 qcq->q.desc_size = desc_size;
213 qcq->q.tail_idx = 0;
214 qcq->q.head_idx = 0;
215 qcq->q.pid = pid;
216 snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
217
218 err = pdsc_qcq_intr_alloc(pdsc, qcq);
219 if (err)
220 goto err_out_free_q_info;
221
222 qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq.info));
223 if (!qcq->cq.info) {
224 err = -ENOMEM;
225 goto err_out_free_irq;
226 }
227
228 qcq->cq.num_descs = num_descs;
229 qcq->cq.desc_size = cq_desc_size;
230 qcq->cq.tail_idx = 0;
231 qcq->cq.done_color = 1;
232
233 if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
234 /* q & cq need to be contiguous in case of notifyq */
235 qcq->q_size = PDS_PAGE_SIZE +
236 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
237 ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
238 qcq->q_base = dma_alloc_coherent(dev,
239 qcq->q_size + qcq->cq_size,
240 &qcq->q_base_pa,
241 GFP_KERNEL);
242 if (!qcq->q_base) {
243 err = -ENOMEM;
244 goto err_out_free_cq_info;
245 }
246 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
247 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
248 pdsc_q_map(&qcq->q, q_base, q_base_pa);
249
250 cq_base = PTR_ALIGN(q_base +
251 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
252 PDS_PAGE_SIZE);
253 cq_base_pa = ALIGN(qcq->q_base_pa +
254 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
255 PDS_PAGE_SIZE);
256
257 } else {
258 /* q DMA descriptors */
259 qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
260 qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
261 &qcq->q_base_pa,
262 GFP_KERNEL);
263 if (!qcq->q_base) {
264 err = -ENOMEM;
265 goto err_out_free_cq_info;
266 }
267 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
268 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
269 pdsc_q_map(&qcq->q, q_base, q_base_pa);
270
271 /* cq DMA descriptors */
272 qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
273 qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
274 &qcq->cq_base_pa,
275 GFP_KERNEL);
276 if (!qcq->cq_base) {
277 err = -ENOMEM;
278 goto err_out_free_q;
279 }
280 cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
281 cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
282 }
283
284 pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
285 qcq->cq.bound_q = &qcq->q;
286
287 pdsc_debugfs_add_qcq(pdsc, qcq);
288
289 return 0;
290
291 err_out_free_q:
292 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
293 err_out_free_cq_info:
294 vfree(qcq->cq.info);
295 err_out_free_irq:
296 pdsc_qcq_intr_free(pdsc, qcq);
297 err_out_free_q_info:
298 vfree(qcq->q.info);
299 memset(qcq, 0, sizeof(*qcq));
300 err_out:
301 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
302 return err;
303 }
304
pdsc_core_uninit(struct pdsc * pdsc)305 static void pdsc_core_uninit(struct pdsc *pdsc)
306 {
307 pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
308 pdsc_qcq_free(pdsc, &pdsc->adminqcq);
309
310 if (pdsc->kern_dbpage) {
311 iounmap(pdsc->kern_dbpage);
312 pdsc->kern_dbpage = NULL;
313 }
314 }
315
pdsc_core_init(struct pdsc * pdsc)316 static int pdsc_core_init(struct pdsc *pdsc)
317 {
318 union pds_core_dev_comp comp = {};
319 union pds_core_dev_cmd cmd = {
320 .init.opcode = PDS_CORE_CMD_INIT,
321 };
322 struct pds_core_dev_init_data_out cido;
323 struct pds_core_dev_init_data_in cidi;
324 u32 dbid_count;
325 u32 dbpage_num;
326 int numdescs;
327 size_t sz;
328 int err;
329
330 numdescs = PDSC_ADMINQ_MAX_LENGTH;
331 err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
332 PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
333 numdescs,
334 sizeof(union pds_core_adminq_cmd),
335 sizeof(union pds_core_adminq_comp),
336 0, &pdsc->adminqcq);
337 if (err)
338 return err;
339
340 err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
341 PDS_CORE_QCQ_F_NOTIFYQ,
342 PDSC_NOTIFYQ_LENGTH,
343 sizeof(struct pds_core_notifyq_cmd),
344 sizeof(union pds_core_notifyq_comp),
345 0, &pdsc->notifyqcq);
346 if (err)
347 goto err_out_uninit;
348
349 cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
350 cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
351 cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
352 cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
353 cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
354 cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
355 cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
356
357 mutex_lock(&pdsc->devcmd_lock);
358
359 sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
360 memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
361
362 err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
363 if (!err) {
364 sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
365 memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
366 }
367
368 mutex_unlock(&pdsc->devcmd_lock);
369 if (err) {
370 dev_err(pdsc->dev, "Device init command failed: %pe\n",
371 ERR_PTR(err));
372 goto err_out_uninit;
373 }
374
375 pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
376
377 dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
378 dbpage_num = pdsc->hw_index * dbid_count;
379 pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
380 if (!pdsc->kern_dbpage) {
381 dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
382 err = -ENOMEM;
383 goto err_out_uninit;
384 }
385
386 pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
387 pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
388 pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
389
390 pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
391 pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
392 pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
393
394 pdsc->last_eid = 0;
395
396 return 0;
397
398 err_out_uninit:
399 pdsc_core_uninit(pdsc);
400 return err;
401 }
402
403 static struct pdsc_viftype pdsc_viftype_defaults[] = {
404 [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
405 .vif_id = PDS_DEV_TYPE_FWCTL,
406 .dl_id = -1 },
407 [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
408 .vif_id = PDS_DEV_TYPE_VDPA,
409 .dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
410 [PDS_DEV_TYPE_MAX] = {}
411 };
412
pdsc_viftypes_init(struct pdsc * pdsc)413 static int pdsc_viftypes_init(struct pdsc *pdsc)
414 {
415 enum pds_core_vif_types vt;
416
417 pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
418 GFP_KERNEL);
419 if (!pdsc->viftype_status)
420 return -ENOMEM;
421
422 for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
423 bool vt_support;
424
425 if (!pdsc_viftype_defaults[vt].name)
426 continue;
427
428 /* Grab the defaults */
429 pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
430
431 /* See what the Core device has for support */
432 vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
433
434 if (vt == PDS_DEV_TYPE_FWCTL)
435 pdsc->viftype_status[vt].enabled = true;
436
437 dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
438 pdsc->viftype_status[vt].name,
439 vt_support ? "" : "not ");
440
441 pdsc->viftype_status[vt].supported = vt_support;
442 }
443
444 return 0;
445 }
446
pdsc_setup(struct pdsc * pdsc,bool init)447 int pdsc_setup(struct pdsc *pdsc, bool init)
448 {
449 int err;
450
451 err = pdsc_dev_init(pdsc);
452 if (err)
453 return err;
454
455 /* Set up the Core with the AdminQ and NotifyQ info */
456 err = pdsc_core_init(pdsc);
457 if (err)
458 goto err_out_teardown;
459
460 /* Set up the VIFs */
461 if (init) {
462 err = pdsc_viftypes_init(pdsc);
463 if (err)
464 goto err_out_teardown;
465
466 pdsc_debugfs_add_viftype(pdsc);
467 }
468
469 refcount_set(&pdsc->adminq_refcnt, 1);
470 clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
471 return 0;
472
473 err_out_teardown:
474 pdsc_teardown(pdsc, init);
475 return err;
476 }
477
pdsc_teardown(struct pdsc * pdsc,bool removing)478 void pdsc_teardown(struct pdsc *pdsc, bool removing)
479 {
480 if (!pdsc->pdev->is_virtfn)
481 pdsc_devcmd_reset(pdsc);
482 if (pdsc->adminqcq.work.func)
483 cancel_work_sync(&pdsc->adminqcq.work);
484
485 pdsc_core_uninit(pdsc);
486
487 if (removing) {
488 kfree(pdsc->viftype_status);
489 pdsc->viftype_status = NULL;
490 }
491
492 pdsc_dev_uninit(pdsc);
493
494 set_bit(PDSC_S_FW_DEAD, &pdsc->state);
495 }
496
pdsc_start(struct pdsc * pdsc)497 int pdsc_start(struct pdsc *pdsc)
498 {
499 pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
500 PDS_CORE_INTR_MASK_CLEAR);
501
502 return 0;
503 }
504
pdsc_stop(struct pdsc * pdsc)505 void pdsc_stop(struct pdsc *pdsc)
506 {
507 int i;
508
509 if (!pdsc->intr_info)
510 return;
511
512 /* Mask interrupts that are in use */
513 for (i = 0; i < pdsc->nintrs; i++)
514 if (pdsc->intr_info[i].vector)
515 pds_core_intr_mask(&pdsc->intr_ctrl[i],
516 PDS_CORE_INTR_MASK_SET);
517 }
518
pdsc_adminq_wait_and_dec_once_unused(struct pdsc * pdsc)519 static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
520 {
521 /* The driver initializes the adminq_refcnt to 1 when the adminq is
522 * allocated and ready for use. Other users/requesters will increment
523 * the refcnt while in use. If the refcnt is down to 1 then the adminq
524 * is not in use and the refcnt can be cleared and adminq freed. Before
525 * calling this function the driver will set PDSC_S_FW_DEAD, which
526 * prevent subsequent attempts to use the adminq and increment the
527 * refcnt to fail. This guarantees that this function will eventually
528 * exit.
529 */
530 while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
531 dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
532 __func__);
533 cpu_relax();
534 }
535 }
536
pdsc_fw_down(struct pdsc * pdsc)537 void pdsc_fw_down(struct pdsc *pdsc)
538 {
539 union pds_core_notifyq_comp reset_event = {
540 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
541 .reset.state = 0,
542 };
543
544 if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
545 dev_warn(pdsc->dev, "%s: already happening\n", __func__);
546 return;
547 }
548
549 if (pdsc->pdev->is_virtfn)
550 return;
551
552 pdsc_adminq_wait_and_dec_once_unused(pdsc);
553
554 /* Notify clients of fw_down */
555 if (pdsc->fw_reporter)
556 devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
557 pdsc_notify(PDS_EVENT_RESET, &reset_event);
558
559 pdsc_stop(pdsc);
560 pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
561 }
562
pdsc_fw_up(struct pdsc * pdsc)563 void pdsc_fw_up(struct pdsc *pdsc)
564 {
565 union pds_core_notifyq_comp reset_event = {
566 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
567 .reset.state = 1,
568 };
569 int err;
570
571 if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
572 dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
573 return;
574 }
575
576 if (pdsc->pdev->is_virtfn) {
577 clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
578 return;
579 }
580
581 err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
582 if (err)
583 goto err_out;
584
585 err = pdsc_start(pdsc);
586 if (err)
587 goto err_out;
588
589 /* Notify clients of fw_up */
590 pdsc->fw_recoveries++;
591 if (pdsc->fw_reporter)
592 devlink_health_reporter_state_update(pdsc->fw_reporter,
593 DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
594 pdsc_notify(PDS_EVENT_RESET, &reset_event);
595
596 return;
597
598 err_out:
599 pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
600 }
601
pdsc_pci_reset_thread(struct work_struct * work)602 void pdsc_pci_reset_thread(struct work_struct *work)
603 {
604 struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
605 struct pci_dev *pdev = pdsc->pdev;
606
607 pci_dev_get(pdev);
608 pci_reset_function(pdev);
609 pci_dev_put(pdev);
610 }
611
pdsc_check_pci_health(struct pdsc * pdsc)612 static void pdsc_check_pci_health(struct pdsc *pdsc)
613 {
614 u8 fw_status;
615
616 /* some sort of teardown already in progress */
617 if (!pdsc->info_regs)
618 return;
619
620 fw_status = ioread8(&pdsc->info_regs->fw_status);
621
622 /* is PCI broken? */
623 if (fw_status != PDS_RC_BAD_PCI)
624 return;
625
626 /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
627 queue_work(pdsc->wq, &pdsc->pci_reset_work);
628 }
629
pdsc_health_thread(struct work_struct * work)630 void pdsc_health_thread(struct work_struct *work)
631 {
632 struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
633 unsigned long mask;
634 bool healthy;
635
636 mutex_lock(&pdsc->config_lock);
637
638 /* Don't do a check when in a transition state */
639 mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
640 BIT_ULL(PDSC_S_STOPPING_DRIVER);
641 if (pdsc->state & mask)
642 goto out_unlock;
643
644 healthy = pdsc_is_fw_good(pdsc);
645 dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
646 __func__, healthy, pdsc->fw_status, pdsc->last_hb);
647
648 if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
649 if (healthy)
650 pdsc_fw_up(pdsc);
651 } else {
652 if (!healthy)
653 pdsc_fw_down(pdsc);
654 }
655
656 pdsc_check_pci_health(pdsc);
657
658 pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
659
660 out_unlock:
661 mutex_unlock(&pdsc->config_lock);
662 }
663