1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2025 Intel Corporation */
3 #include <linux/bitfield.h>
4 #include <linux/types.h>
5
6 #include "adf_common_drv.h"
7 #include "adf_gen6_ras.h"
8 #include "adf_sysfs_ras_counters.h"
9
enable_errsou_reporting(void __iomem * csr)10 static void enable_errsou_reporting(void __iomem *csr)
11 {
12 /* Enable correctable error reporting in ERRSOU0 */
13 ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, 0);
14
15 /* Enable uncorrectable error reporting in ERRSOU1 */
16 ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, 0);
17
18 /*
19 * Enable uncorrectable error reporting in ERRSOU2
20 * but disable PM interrupt by default
21 */
22 ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, ADF_GEN6_ERRSOU2_PM_INT_BIT);
23
24 /* Enable uncorrectable error reporting in ERRSOU3 */
25 ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, 0);
26 }
27
enable_ae_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)28 static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
29 {
30 u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
31
32 /* Enable acceleration engine correctable error reporting */
33 ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, ae_mask);
34
35 /* Enable acceleration engine uncorrectable error reporting */
36 ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, ae_mask);
37 }
38
enable_cpp_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)39 static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
40 {
41 /* Enable HI CPP agents command parity error reporting */
42 ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE,
43 ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK);
44
45 ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_MASK);
46 }
47
enable_ti_ri_error_reporting(void __iomem * csr)48 static void enable_ti_ri_error_reporting(void __iomem *csr)
49 {
50 u32 reg, mask;
51
52 /* Enable RI memory error reporting */
53 mask = ADF_GEN6_RIMEM_PARERR_FATAL_MASK | ADF_GEN6_RIMEM_PARERR_CERR_MASK;
54 ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, mask);
55
56 /* Enable IOSF primary command parity error reporting */
57 ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, ADF_GEN6_RIMISCSTS_BIT);
58
59 /* Enable TI internal memory parity error reporting */
60 reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK);
61 reg &= ~ADF_GEN6_TI_CI_PAR_STS_MASK;
62 ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, reg);
63
64 reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK);
65 reg &= ~ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
66 ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, reg);
67
68 reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK);
69 reg &= ~ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
70 ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, reg);
71
72 reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK);
73 reg &= ~ADF_GEN6_TI_CD_PAR_STS_MASK;
74 ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, reg);
75
76 reg = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK);
77 reg &= ~ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
78 ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, reg);
79
80 /* Enable error handling in RI, TI CPP interface control registers */
81 ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, ADF_GEN6_RICPPINTCTL_MASK);
82 ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, ADF_GEN6_TICPPINTCTL_MASK);
83
84 /*
85 * Enable error detection and reporting in TIMISCSTS
86 * with bits 1, 2 and 30 value preserved
87 */
88 reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
89 reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
90 reg |= ADF_GEN6_TIMISCCTL_BIT;
91 ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
92 }
93
enable_ssm_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)94 static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
95 void __iomem *csr)
96 {
97 /* Enable SSM interrupts */
98 ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, 0);
99 }
100
adf_gen6_enable_ras(struct adf_accel_dev * accel_dev)101 static void adf_gen6_enable_ras(struct adf_accel_dev *accel_dev)
102 {
103 void __iomem *csr = adf_get_pmisc_base(accel_dev);
104
105 enable_errsou_reporting(csr);
106 enable_ae_error_reporting(accel_dev, csr);
107 enable_cpp_error_reporting(accel_dev, csr);
108 enable_ti_ri_error_reporting(csr);
109 enable_ssm_error_reporting(accel_dev, csr);
110 }
111
disable_errsou_reporting(void __iomem * csr)112 static void disable_errsou_reporting(void __iomem *csr)
113 {
114 u32 val;
115
116 /* Disable correctable error reporting in ERRSOU0 */
117 ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, ADF_GEN6_ERRSOU0_MASK);
118
119 /* Disable uncorrectable error reporting in ERRSOU1 */
120 ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, ADF_GEN6_ERRMSK1_MASK);
121
122 /* Disable uncorrectable error reporting in ERRSOU2 */
123 val = ADF_CSR_RD(csr, ADF_GEN6_ERRMSK2);
124 val |= ADF_GEN6_ERRSOU2_DIS_MASK;
125 ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, val);
126
127 /* Disable uncorrectable error reporting in ERRSOU3 */
128 ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_ERRSOU3_DIS_MASK);
129 }
130
disable_ae_error_reporting(void __iomem * csr)131 static void disable_ae_error_reporting(void __iomem *csr)
132 {
133 /* Disable acceleration engine correctable error reporting */
134 ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, 0);
135
136 /* Disable acceleration engine uncorrectable error reporting */
137 ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, 0);
138 }
139
disable_cpp_error_reporting(void __iomem * csr)140 static void disable_cpp_error_reporting(void __iomem *csr)
141 {
142 /* Disable HI CPP agents command parity error reporting */
143 ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, 0);
144
145 ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK);
146 }
147
disable_ti_ri_error_reporting(void __iomem * csr)148 static void disable_ti_ri_error_reporting(void __iomem *csr)
149 {
150 u32 reg;
151
152 /* Disable RI memory error reporting */
153 ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, 0);
154
155 /* Disable IOSF primary command parity error reporting */
156 reg = ADF_CSR_RD(csr, ADF_GEN6_RIMISCCTL);
157 reg &= ~ADF_GEN6_RIMISCSTS_BIT;
158 ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, reg);
159
160 /* Disable TI internal memory parity error reporting */
161 ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, ADF_GEN6_TI_CI_PAR_STS_MASK);
162 ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK);
163 ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK);
164 ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, ADF_GEN6_TI_CD_PAR_STS_MASK);
165 ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, ADF_GEN6_TI_TRNSB_PAR_STS_MASK);
166
167 /* Disable error handling in RI, TI CPP interface control registers */
168 reg = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTCTL);
169 reg &= ~ADF_GEN6_RICPPINTCTL_MASK;
170 ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, reg);
171
172 reg = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTCTL);
173 reg &= ~ADF_GEN6_TICPPINTCTL_MASK;
174 ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, reg);
175
176 /*
177 * Disable error detection and reporting in TIMISCSTS
178 * with bits 1, 2 and 30 value preserved
179 */
180 reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
181 reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
182 ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
183 }
184
disable_ssm_error_reporting(void __iomem * csr)185 static void disable_ssm_error_reporting(void __iomem *csr)
186 {
187 /* Disable SSM interrupts */
188 ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, ADF_GEN6_INTMASKSSM_MASK);
189 }
190
adf_gen6_disable_ras(struct adf_accel_dev * accel_dev)191 static void adf_gen6_disable_ras(struct adf_accel_dev *accel_dev)
192 {
193 void __iomem *csr = adf_get_pmisc_base(accel_dev);
194
195 disable_errsou_reporting(csr);
196 disable_ae_error_reporting(csr);
197 disable_cpp_error_reporting(csr);
198 disable_ti_ri_error_reporting(csr);
199 disable_ssm_error_reporting(csr);
200 }
201
adf_gen6_process_errsou0(struct adf_accel_dev * accel_dev,void __iomem * csr)202 static void adf_gen6_process_errsou0(struct adf_accel_dev *accel_dev, void __iomem *csr)
203 {
204 u32 ae, errsou;
205
206 ae = ADF_CSR_RD(csr, ADF_GEN6_HIAECORERRLOG_CPP0);
207 ae &= GET_HW_DATA(accel_dev)->ae_mask;
208
209 dev_warn(&GET_DEV(accel_dev), "Correctable error detected: %#x\n", ae);
210
211 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
212
213 /* Clear interrupt from ERRSOU0 */
214 ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOG_CPP0, ae);
215
216 errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
217 if (errsou & ADF_GEN6_ERRSOU0_MASK)
218 dev_warn(&GET_DEV(accel_dev), "errsou0 still set: %#x\n", errsou);
219 }
220
adf_handle_cpp_ae_unc(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)221 static void adf_handle_cpp_ae_unc(struct adf_accel_dev *accel_dev, void __iomem *csr,
222 u32 errsou)
223 {
224 u32 ae;
225
226 if (!(errsou & ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT))
227 return;
228
229 ae = ADF_CSR_RD(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0);
230 ae &= GET_HW_DATA(accel_dev)->ae_mask;
231 if (ae) {
232 dev_err(&GET_DEV(accel_dev), "Uncorrectable error detected: %#x\n", ae);
233 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
234 ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0, ae);
235 }
236 }
237
adf_handle_cpp_cmd_par_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)238 static void adf_handle_cpp_cmd_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
239 u32 errsou)
240 {
241 u32 cmd_par_err;
242
243 if (!(errsou & ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT))
244 return;
245
246 cmd_par_err = ADF_CSR_RD(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG);
247 cmd_par_err &= ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK;
248 if (cmd_par_err) {
249 dev_err(&GET_DEV(accel_dev), "HI CPP agent command parity error: %#x\n",
250 cmd_par_err);
251 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
252 ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG, cmd_par_err);
253 }
254 }
255
adf_handle_ri_mem_par_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)256 static void adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
257 u32 errsou)
258 {
259 u32 rimem_parerr_sts;
260
261 if (!(errsou & ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT))
262 return;
263
264 rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN6_RIMEM_PARERR_STS);
265 rimem_parerr_sts &= ADF_GEN6_RIMEM_PARERR_CERR_MASK |
266 ADF_GEN6_RIMEM_PARERR_FATAL_MASK;
267 if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_CERR_MASK) {
268 dev_err(&GET_DEV(accel_dev), "RI memory parity correctable error: %#x\n",
269 rimem_parerr_sts);
270 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
271 }
272
273 if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_FATAL_MASK) {
274 dev_err(&GET_DEV(accel_dev), "RI memory parity fatal error: %#x\n",
275 rimem_parerr_sts);
276 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
277 }
278
279 ADF_CSR_WR(csr, ADF_GEN6_RIMEM_PARERR_STS, rimem_parerr_sts);
280 }
281
adf_handle_ti_ci_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr)282 static void adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
283 {
284 u32 ti_ci_par_sts;
285
286 ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_STS);
287 ti_ci_par_sts &= ADF_GEN6_TI_CI_PAR_STS_MASK;
288 if (ti_ci_par_sts) {
289 dev_err(&GET_DEV(accel_dev), "TI memory parity error: %#x\n", ti_ci_par_sts);
290 ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_STS, ti_ci_par_sts);
291 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
292 }
293 }
294
adf_handle_ti_pullfub_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr)295 static void adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
296 {
297 u32 ti_pullfub_par_sts;
298
299 ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS);
300 ti_pullfub_par_sts &= ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
301 if (ti_pullfub_par_sts) {
302 dev_err(&GET_DEV(accel_dev), "TI pull parity error: %#x\n", ti_pullfub_par_sts);
303 ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts);
304 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
305 }
306 }
307
adf_handle_ti_pushfub_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr)308 static void adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
309 {
310 u32 ti_pushfub_par_sts;
311
312 ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS);
313 ti_pushfub_par_sts &= ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
314 if (ti_pushfub_par_sts) {
315 dev_err(&GET_DEV(accel_dev), "TI push parity error: %#x\n", ti_pushfub_par_sts);
316 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
317 ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts);
318 }
319 }
320
adf_handle_ti_cd_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr)321 static void adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
322 {
323 u32 ti_cd_par_sts;
324
325 ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_STS);
326 ti_cd_par_sts &= ADF_GEN6_TI_CD_PAR_STS_MASK;
327 if (ti_cd_par_sts) {
328 dev_err(&GET_DEV(accel_dev), "TI CD parity error: %#x\n", ti_cd_par_sts);
329 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
330 ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_STS, ti_cd_par_sts);
331 }
332 }
333
adf_handle_ti_trnsb_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr)334 static void adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
335 {
336 u32 ti_trnsb_par_sts;
337
338 ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_STS);
339 ti_trnsb_par_sts &= ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
340 if (ti_trnsb_par_sts) {
341 dev_err(&GET_DEV(accel_dev), "TI TRNSB parity error: %#x\n", ti_trnsb_par_sts);
342 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
343 ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_STS, ti_trnsb_par_sts);
344 }
345 }
346
adf_handle_iosfp_cmd_parerr(struct adf_accel_dev * accel_dev,void __iomem * csr)347 static void adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr)
348 {
349 u32 rimiscsts;
350
351 rimiscsts = ADF_CSR_RD(csr, ADF_GEN6_RIMISCSTS);
352 rimiscsts &= ADF_GEN6_RIMISCSTS_BIT;
353 if (rimiscsts) {
354 dev_err(&GET_DEV(accel_dev), "Command parity error detected on IOSFP: %#x\n",
355 rimiscsts);
356 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
357 ADF_CSR_WR(csr, ADF_GEN6_RIMISCSTS, rimiscsts);
358 }
359 }
360
adf_handle_ti_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)361 static void adf_handle_ti_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
362 u32 errsou)
363 {
364 if (!(errsou & ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT))
365 return;
366
367 adf_handle_ti_ci_par_sts(accel_dev, csr);
368 adf_handle_ti_pullfub_par_sts(accel_dev, csr);
369 adf_handle_ti_pushfub_par_sts(accel_dev, csr);
370 adf_handle_ti_cd_par_sts(accel_dev, csr);
371 adf_handle_ti_trnsb_par_sts(accel_dev, csr);
372 adf_handle_iosfp_cmd_parerr(accel_dev, csr);
373 }
374
adf_handle_sfi_cmd_parerr(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)375 static void adf_handle_sfi_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr,
376 u32 errsou)
377 {
378 if (!(errsou & ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
379 return;
380
381 dev_err(&GET_DEV(accel_dev),
382 "Command parity error detected on streaming fabric interface\n");
383
384 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
385 }
386
adf_gen6_process_errsou1(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)387 static void adf_gen6_process_errsou1(struct adf_accel_dev *accel_dev, void __iomem *csr,
388 u32 errsou)
389 {
390 adf_handle_cpp_ae_unc(accel_dev, csr, errsou);
391 adf_handle_cpp_cmd_par_err(accel_dev, csr, errsou);
392 adf_handle_ri_mem_par_err(accel_dev, csr, errsou);
393 adf_handle_ti_err(accel_dev, csr, errsou);
394 adf_handle_sfi_cmd_parerr(accel_dev, csr, errsou);
395
396 errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
397 if (errsou & ADF_GEN6_ERRSOU1_MASK)
398 dev_warn(&GET_DEV(accel_dev), "errsou1 still set: %#x\n", errsou);
399 }
400
adf_handle_cerrssmsh(struct adf_accel_dev * accel_dev,void __iomem * csr)401 static void adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr)
402 {
403 u32 reg;
404
405 reg = ADF_CSR_RD(csr, ADF_GEN6_CERRSSMSH);
406 reg &= ADF_GEN6_CERRSSMSH_ERROR_BIT;
407 if (reg) {
408 dev_warn(&GET_DEV(accel_dev),
409 "Correctable error on ssm shared memory: %#x\n", reg);
410 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
411 ADF_CSR_WR(csr, ADF_GEN6_CERRSSMSH, reg);
412 }
413 }
414
adf_handle_uerrssmsh(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)415 static void adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr,
416 u32 iastatssm)
417 {
418 u32 reg;
419
420 if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT))
421 return;
422
423 reg = ADF_CSR_RD(csr, ADF_GEN6_UERRSSMSH);
424 reg &= ADF_GEN6_UERRSSMSH_MASK;
425 if (reg) {
426 dev_err(&GET_DEV(accel_dev),
427 "Fatal error on ssm shared memory: %#x\n", reg);
428 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
429 ADF_CSR_WR(csr, ADF_GEN6_UERRSSMSH, reg);
430 }
431 }
432
adf_handle_pperr_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)433 static void adf_handle_pperr_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
434 u32 iastatssm)
435 {
436 u32 reg;
437
438 if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_PPERR_BIT))
439 return;
440
441 reg = ADF_CSR_RD(csr, ADF_GEN6_PPERR);
442 reg &= ADF_GEN6_PPERR_MASK;
443 if (reg) {
444 dev_err(&GET_DEV(accel_dev),
445 "Fatal push or pull data error: %#x\n", reg);
446 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
447 ADF_CSR_WR(csr, ADF_GEN6_PPERR, reg);
448 }
449 }
450
adf_handle_scmpar_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)451 static void adf_handle_scmpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
452 u32 iastatssm)
453 {
454 u32 reg;
455
456 if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT))
457 return;
458
459 reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
460 reg &= ADF_GEN6_SCM_PAR_ERR_MASK;
461 if (reg) {
462 dev_err(&GET_DEV(accel_dev), "Fatal error on SCM: %#x\n", reg);
463 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
464 ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
465 }
466 }
467
adf_handle_cpppar_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)468 static void adf_handle_cpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
469 u32 iastatssm)
470 {
471 u32 reg;
472
473 if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT))
474 return;
475
476 reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
477 reg &= ADF_GEN6_CPP_PAR_ERR_MASK;
478 if (reg) {
479 dev_err(&GET_DEV(accel_dev), "Fatal error on CPP: %#x\n", reg);
480 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
481 ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
482 }
483 }
484
adf_handle_rfpar_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)485 static void adf_handle_rfpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
486 u32 iastatssm)
487 {
488 u32 reg;
489
490 if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT))
491 return;
492
493 reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
494 reg &= ADF_GEN6_RF_PAR_ERR_MASK;
495 if (reg) {
496 dev_err(&GET_DEV(accel_dev), "Fatal error on RF Parity: %#x\n", reg);
497 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
498 ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
499 }
500 }
501
adf_handle_unexp_cpl_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)502 static void adf_handle_unexp_cpl_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
503 u32 iastatssm)
504 {
505 u32 reg;
506
507 if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT))
508 return;
509
510 reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
511 reg &= ADF_GEN6_UNEXP_CPL_ERR_MASK;
512 if (reg) {
513 dev_err(&GET_DEV(accel_dev),
514 "Fatal error for AXI unexpected tag/length: %#x\n", reg);
515 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
516 ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
517 }
518 }
519
adf_handle_iaintstatssm(struct adf_accel_dev * accel_dev,void __iomem * csr)520 static void adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, void __iomem *csr)
521 {
522 u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN6_IAINTSTATSSM);
523
524 iastatssm &= ADF_GEN6_IAINTSTATSSM_MASK;
525 if (!iastatssm)
526 return;
527
528 adf_handle_uerrssmsh(accel_dev, csr, iastatssm);
529 adf_handle_pperr_err(accel_dev, csr, iastatssm);
530 adf_handle_scmpar_err(accel_dev, csr, iastatssm);
531 adf_handle_cpppar_err(accel_dev, csr, iastatssm);
532 adf_handle_rfpar_err(accel_dev, csr, iastatssm);
533 adf_handle_unexp_cpl_err(accel_dev, csr, iastatssm);
534
535 ADF_CSR_WR(csr, ADF_GEN6_IAINTSTATSSM, iastatssm);
536 }
537
adf_handle_ssm(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)538 static void adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
539 {
540 if (!(errsou & ADF_GEN6_ERRSOU2_SSM_ERR_BIT))
541 return;
542
543 adf_handle_cerrssmsh(accel_dev, csr);
544 adf_handle_iaintstatssm(accel_dev, csr);
545 }
546
adf_handle_cpp_cfc_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)547 static void adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
548 u32 errsou)
549 {
550 u32 reg;
551
552 if (!(errsou & ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT))
553 return;
554
555 reg = ADF_CSR_RD(csr, ADF_GEN6_CPP_CFC_ERR_STATUS);
556 if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT) {
557 dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: %#x", reg);
558 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
559 }
560
561 if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT) {
562 dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: %#x", reg);
563 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
564 }
565
566 if (reg & ADF_GEN6_CPP_CFC_FATAL_ERR_BIT) {
567 dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: errors: %#x", reg);
568 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
569 }
570
571 ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_STATUS_CLR,
572 ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK);
573 }
574
adf_gen6_process_errsou2(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)575 static void adf_gen6_process_errsou2(struct adf_accel_dev *accel_dev, void __iomem *csr,
576 u32 errsou)
577 {
578 adf_handle_ssm(accel_dev, csr, errsou);
579 adf_handle_cpp_cfc_err(accel_dev, csr, errsou);
580
581 errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
582 if (errsou & ADF_GEN6_ERRSOU2_MASK)
583 dev_warn(&GET_DEV(accel_dev), "errsou2 still set: %#x\n", errsou);
584 }
585
adf_handle_timiscsts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)586 static void adf_handle_timiscsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
587 u32 errsou)
588 {
589 u32 timiscsts;
590
591 if (!(errsou & ADF_GEN6_ERRSOU3_TIMISCSTS_BIT))
592 return;
593
594 timiscsts = ADF_CSR_RD(csr, ADF_GEN6_TIMISCSTS);
595 if (timiscsts) {
596 dev_err(&GET_DEV(accel_dev), "Fatal error in transmit interface: %#x\n",
597 timiscsts);
598 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
599 }
600 }
601
adf_handle_ricppintsts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)602 static void adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
603 u32 errsou)
604 {
605 u32 ricppintsts;
606
607 if (!(errsou & ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK))
608 return;
609
610 ricppintsts = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTSTS);
611 ricppintsts &= ADF_GEN6_RICPPINTSTS_MASK;
612 if (ricppintsts) {
613 dev_err(&GET_DEV(accel_dev), "RI push pull error: %#x\n", ricppintsts);
614 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
615 ADF_CSR_WR(csr, ADF_GEN6_RICPPINTSTS, ricppintsts);
616 }
617 }
618
adf_handle_ticppintsts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)619 static void adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
620 u32 errsou)
621 {
622 u32 ticppintsts;
623
624 if (!(errsou & ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK))
625 return;
626
627 ticppintsts = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTSTS);
628 ticppintsts &= ADF_GEN6_TICPPINTSTS_MASK;
629 if (ticppintsts) {
630 dev_err(&GET_DEV(accel_dev), "TI push pull error: %#x\n", ticppintsts);
631 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
632 ADF_CSR_WR(csr, ADF_GEN6_TICPPINTSTS, ticppintsts);
633 }
634 }
635
adf_handle_atufaultstatus(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)636 static void adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, void __iomem *csr,
637 u32 errsou)
638 {
639 u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks;
640 u32 atufaultstatus;
641 u32 i;
642
643 if (!(errsou & ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT))
644 return;
645
646 for (i = 0; i < max_rp_num; i++) {
647 atufaultstatus = ADF_CSR_RD(csr, ADF_GEN6_ATUFAULTSTATUS(i));
648
649 atufaultstatus &= ADF_GEN6_ATUFAULTSTATUS_BIT;
650 if (atufaultstatus) {
651 dev_err(&GET_DEV(accel_dev), "Ring pair (%u) ATU detected fault: %#x\n", i,
652 atufaultstatus);
653 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
654 ADF_CSR_WR(csr, ADF_GEN6_ATUFAULTSTATUS(i), atufaultstatus);
655 }
656 }
657 }
658
adf_handle_rlterror(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)659 static void adf_handle_rlterror(struct adf_accel_dev *accel_dev, void __iomem *csr,
660 u32 errsou)
661 {
662 u32 rlterror;
663
664 if (!(errsou & ADF_GEN6_ERRSOU3_RLTERROR_BIT))
665 return;
666
667 rlterror = ADF_CSR_RD(csr, ADF_GEN6_RLT_ERRLOG);
668 rlterror &= ADF_GEN6_RLT_ERRLOG_MASK;
669 if (rlterror) {
670 dev_err(&GET_DEV(accel_dev), "Error in rate limiting block: %#x\n", rlterror);
671 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
672 ADF_CSR_WR(csr, ADF_GEN6_RLT_ERRLOG, rlterror);
673 }
674 }
675
adf_handle_vflr(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)676 static void adf_handle_vflr(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
677 {
678 if (!(errsou & ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT))
679 return;
680
681 dev_err(&GET_DEV(accel_dev), "Uncorrectable error in VF\n");
682 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
683 }
684
adf_handle_tc_vc_map_error(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)685 static void adf_handle_tc_vc_map_error(struct adf_accel_dev *accel_dev, void __iomem *csr,
686 u32 errsou)
687 {
688 if (!(errsou & ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
689 return;
690
691 dev_err(&GET_DEV(accel_dev), "Violation of PCIe TC VC mapping\n");
692 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
693 }
694
adf_handle_pcie_devhalt(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)695 static void adf_handle_pcie_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
696 u32 errsou)
697 {
698 if (!(errsou & ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT))
699 return;
700
701 dev_err(&GET_DEV(accel_dev),
702 "DEVHALT due to an error in an incoming transaction\n");
703 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
704 }
705
adf_handle_pg_req_devhalt(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)706 static void adf_handle_pg_req_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
707 u32 errsou)
708 {
709 if (!(errsou & ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT))
710 return;
711
712 dev_err(&GET_DEV(accel_dev),
713 "Error due to response failure in response to a page request\n");
714 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
715 }
716
adf_handle_xlt_cpl_devhalt(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)717 static void adf_handle_xlt_cpl_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
718 u32 errsou)
719 {
720 if (!(errsou & ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT))
721 return;
722
723 dev_err(&GET_DEV(accel_dev), "Error status for a address translation request\n");
724 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
725 }
726
adf_handle_ti_int_err_devhalt(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)727 static void adf_handle_ti_int_err_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
728 u32 errsou)
729 {
730 if (!(errsou & ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
731 return;
732
733 dev_err(&GET_DEV(accel_dev), "DEVHALT due to a TI internal memory error\n");
734 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
735 }
736
adf_gen6_process_errsou3(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)737 static void adf_gen6_process_errsou3(struct adf_accel_dev *accel_dev, void __iomem *csr,
738 u32 errsou)
739 {
740 adf_handle_timiscsts(accel_dev, csr, errsou);
741 adf_handle_ricppintsts(accel_dev, csr, errsou);
742 adf_handle_ticppintsts(accel_dev, csr, errsou);
743 adf_handle_atufaultstatus(accel_dev, csr, errsou);
744 adf_handle_rlterror(accel_dev, csr, errsou);
745 adf_handle_vflr(accel_dev, csr, errsou);
746 adf_handle_tc_vc_map_error(accel_dev, csr, errsou);
747 adf_handle_pcie_devhalt(accel_dev, csr, errsou);
748 adf_handle_pg_req_devhalt(accel_dev, csr, errsou);
749 adf_handle_xlt_cpl_devhalt(accel_dev, csr, errsou);
750 adf_handle_ti_int_err_devhalt(accel_dev, csr, errsou);
751
752 errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
753 if (errsou & ADF_GEN6_ERRSOU3_MASK)
754 dev_warn(&GET_DEV(accel_dev), "errsou3 still set: %#x\n", errsou);
755 }
756
adf_gen6_is_reset_required(struct adf_accel_dev * accel_dev,void __iomem * csr,bool * reset_required)757 static void adf_gen6_is_reset_required(struct adf_accel_dev *accel_dev, void __iomem *csr,
758 bool *reset_required)
759 {
760 u8 reset, dev_state;
761 u32 gensts;
762
763 gensts = ADF_CSR_RD(csr, ADF_GEN6_GENSTS);
764 dev_state = FIELD_GET(ADF_GEN6_GENSTS_DEVICE_STATE_MASK, gensts);
765 reset = FIELD_GET(ADF_GEN6_GENSTS_RESET_TYPE_MASK, gensts);
766 if (dev_state == ADF_GEN6_GENSTS_DEVHALT && reset == ADF_GEN6_GENSTS_PFLR) {
767 *reset_required = true;
768 return;
769 }
770
771 if (reset == ADF_GEN6_GENSTS_COLD_RESET)
772 dev_err(&GET_DEV(accel_dev), "Fatal error, cold reset required\n");
773
774 *reset_required = false;
775 }
776
adf_gen6_handle_interrupt(struct adf_accel_dev * accel_dev,bool * reset_required)777 static bool adf_gen6_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required)
778 {
779 void __iomem *csr = adf_get_pmisc_base(accel_dev);
780 bool handled = false;
781 u32 errsou;
782
783 errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
784 if (errsou & ADF_GEN6_ERRSOU0_MASK) {
785 adf_gen6_process_errsou0(accel_dev, csr);
786 handled = true;
787 }
788
789 errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
790 if (errsou & ADF_GEN6_ERRSOU1_MASK) {
791 adf_gen6_process_errsou1(accel_dev, csr, errsou);
792 handled = true;
793 }
794
795 errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
796 if (errsou & ADF_GEN6_ERRSOU2_MASK) {
797 adf_gen6_process_errsou2(accel_dev, csr, errsou);
798 handled = true;
799 }
800
801 errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
802 if (errsou & ADF_GEN6_ERRSOU3_MASK) {
803 adf_gen6_process_errsou3(accel_dev, csr, errsou);
804 handled = true;
805 }
806
807 adf_gen6_is_reset_required(accel_dev, csr, reset_required);
808
809 return handled;
810 }
811
adf_gen6_init_ras_ops(struct adf_ras_ops * ras_ops)812 void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops)
813 {
814 ras_ops->enable_ras_errors = adf_gen6_enable_ras;
815 ras_ops->disable_ras_errors = adf_gen6_disable_ras;
816 ras_ops->handle_interrupt = adf_gen6_handle_interrupt;
817 }
818 EXPORT_SYMBOL_GPL(adf_gen6_init_ras_ops);
819