1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Contains CPU specific errata definitions
4 *
5 * Copyright (C) 2014 ARM Ltd.
6 */
7
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
16
17 static u64 target_impl_cpu_num;
18 static struct target_impl_cpu *target_impl_cpus;
19
cpu_errata_set_target_impl(u64 num,void * impl_cpus)20 bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
21 {
22 if (target_impl_cpu_num || !num || !impl_cpus)
23 return false;
24
25 target_impl_cpu_num = num;
26 target_impl_cpus = impl_cpus;
27 return true;
28 }
29
is_midr_in_range(struct midr_range const * range)30 static inline bool is_midr_in_range(struct midr_range const *range)
31 {
32 int i;
33
34 if (!target_impl_cpu_num)
35 return midr_is_cpu_model_range(read_cpuid_id(), range->model,
36 range->rv_min, range->rv_max);
37
38 for (i = 0; i < target_impl_cpu_num; i++) {
39 if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
40 range->model,
41 range->rv_min, range->rv_max))
42 return true;
43 }
44 return false;
45 }
46
is_midr_in_range_list(struct midr_range const * ranges)47 bool is_midr_in_range_list(struct midr_range const *ranges)
48 {
49 while (ranges->model)
50 if (is_midr_in_range(ranges++))
51 return true;
52 return false;
53 }
54 EXPORT_SYMBOL_GPL(is_midr_in_range_list);
55
56 static bool __maybe_unused
__is_affected_midr_range(const struct arm64_cpu_capabilities * entry,u32 midr,u32 revidr)57 __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
58 u32 midr, u32 revidr)
59 {
60 const struct arm64_midr_revidr *fix;
61 if (!is_midr_in_range(&entry->midr_range))
62 return false;
63
64 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
65 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
66 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
67 return false;
68 return true;
69 }
70
71 static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities * entry,int scope)72 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
73 {
74 int i;
75
76 if (!target_impl_cpu_num) {
77 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
78 return __is_affected_midr_range(entry, read_cpuid_id(),
79 read_cpuid(REVIDR_EL1));
80 }
81
82 for (i = 0; i < target_impl_cpu_num; i++) {
83 if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
84 target_impl_cpus[i].midr))
85 return true;
86 }
87 return false;
88 }
89
90 static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities * entry,int scope)91 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
92 int scope)
93 {
94 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
95 return is_midr_in_range_list(entry->midr_range_list);
96 }
97
98 static bool __maybe_unused
is_kryo_midr(const struct arm64_cpu_capabilities * entry,int scope)99 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
100 {
101 u32 model;
102
103 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
104
105 model = read_cpuid_id();
106 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
107 MIDR_ARCHITECTURE_MASK;
108
109 return model == entry->midr_range.model;
110 }
111
112 static bool
has_mismatched_cache_type(const struct arm64_cpu_capabilities * entry,int scope)113 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
114 int scope)
115 {
116 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
117 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
118 u64 ctr_raw, ctr_real;
119
120 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
121
122 /*
123 * We want to make sure that all the CPUs in the system expose
124 * a consistent CTR_EL0 to make sure that applications behaves
125 * correctly with migration.
126 *
127 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
128 *
129 * 1) It is safe if the system doesn't support IDC, as CPU anyway
130 * reports IDC = 0, consistent with the rest.
131 *
132 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
133 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
134 *
135 * So, we need to make sure either the raw CTR_EL0 or the effective
136 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
137 */
138 ctr_raw = read_cpuid_cachetype() & mask;
139 ctr_real = read_cpuid_effective_cachetype() & mask;
140
141 return (ctr_real != sys) && (ctr_raw != sys);
142 }
143
144 static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities * cap)145 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
146 {
147 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
148 bool enable_uct_trap = false;
149
150 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
151 if ((read_cpuid_cachetype() & mask) !=
152 (arm64_ftr_reg_ctrel0.sys_val & mask))
153 enable_uct_trap = true;
154
155 /* ... or if the system is affected by an erratum */
156 if (cap->capability == ARM64_WORKAROUND_1542419)
157 enable_uct_trap = true;
158
159 if (enable_uct_trap)
160 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
161 }
162
163 #ifdef CONFIG_ARM64_ERRATUM_1463225
164 static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities * entry,int scope)165 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
166 int scope)
167 {
168 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
169 }
170 #endif
171
172 static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities * __unused)173 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
174 {
175 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
176 }
177
178 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
179 .matches = is_affected_midr_range, \
180 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
181
182 #define CAP_MIDR_ALL_VERSIONS(model) \
183 .matches = is_affected_midr_range, \
184 .midr_range = MIDR_ALL_VERSIONS(model)
185
186 #define MIDR_FIXED(rev, revidr_mask) \
187 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
188
189 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
190 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
191 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
192
193 #define CAP_MIDR_RANGE_LIST(list) \
194 .matches = is_affected_midr_range_list, \
195 .midr_range_list = list
196
197 /* Errata affecting a range of revisions of given model variant */
198 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
199 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
200
201 /* Errata affecting a single variant/revision of a model */
202 #define ERRATA_MIDR_REV(model, var, rev) \
203 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
204
205 /* Errata affecting all variants/revisions of a given a model */
206 #define ERRATA_MIDR_ALL_VERSIONS(model) \
207 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
208 CAP_MIDR_ALL_VERSIONS(model)
209
210 /* Errata affecting a list of midr ranges, with same work around */
211 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
212 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
213 CAP_MIDR_RANGE_LIST(midr_list)
214
215 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
216 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
217 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
218 {},
219 };
220
221 static bool __maybe_unused
needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities * entry,int scope)222 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
223 int scope)
224 {
225 int i;
226
227 if (!is_affected_midr_range_list(entry, scope) ||
228 !is_hyp_mode_available())
229 return false;
230
231 for_each_possible_cpu(i) {
232 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
233 return true;
234 }
235
236 return false;
237 }
238
239 static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities * entry,int scope)240 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
241 int scope)
242 {
243 bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
244 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
245
246 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
247 return is_midr_in_range(&range) && has_dic;
248 }
249
250 static const struct midr_range impdef_pmuv3_cpus[] = {
251 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
252 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
253 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
254 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
255 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
256 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
257 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
258 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
259 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
260 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
261 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
262 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
263 {},
264 };
265
has_impdef_pmuv3(const struct arm64_cpu_capabilities * entry,int scope)266 static bool has_impdef_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
267 {
268 u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
269 unsigned int pmuver;
270
271 if (!is_kernel_in_hyp_mode())
272 return false;
273
274 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
275 ID_AA64DFR0_EL1_PMUVer_SHIFT);
276 if (pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
277 return false;
278
279 return is_midr_in_range_list(impdef_pmuv3_cpus);
280 }
281
cpu_enable_impdef_pmuv3_traps(const struct arm64_cpu_capabilities * __unused)282 static void cpu_enable_impdef_pmuv3_traps(const struct arm64_cpu_capabilities *__unused)
283 {
284 sysreg_clear_set_s(SYS_HACR_EL2, 0, BIT(56));
285 }
286
287 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
288 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
289 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
290 {
291 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
292 },
293 {
294 .midr_range.model = MIDR_QCOM_KRYO,
295 .matches = is_kryo_midr,
296 },
297 #endif
298 #ifdef CONFIG_ARM64_ERRATUM_1286807
299 {
300 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
301 },
302 {
303 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
304 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
305 },
306 #endif
307 #ifdef CONFIG_ARM64_ERRATUM_2441007
308 {
309 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
310 },
311 #endif
312 #ifdef CONFIG_ARM64_ERRATUM_2441009
313 {
314 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
315 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
316 },
317 #endif
318 {},
319 };
320 #endif
321
322 #ifdef CONFIG_CAVIUM_ERRATUM_23154
323 static const struct midr_range cavium_erratum_23154_cpus[] = {
324 MIDR_ALL_VERSIONS(MIDR_THUNDERX),
325 MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
326 MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
327 MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
328 MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
329 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
330 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
331 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
332 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
333 {},
334 };
335 #endif
336
337 #ifdef CONFIG_CAVIUM_ERRATUM_27456
338 static const struct midr_range cavium_erratum_27456_cpus[] = {
339 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
340 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
341 /* Cavium ThunderX, T81 pass 1.0 */
342 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
343 {},
344 };
345 #endif
346
347 #ifdef CONFIG_CAVIUM_ERRATUM_30115
348 static const struct midr_range cavium_erratum_30115_cpus[] = {
349 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
350 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
351 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
352 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
353 /* Cavium ThunderX, T83 pass 1.0 */
354 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
355 {},
356 };
357 #endif
358
359 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
360 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
361 {
362 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
363 },
364 {
365 .midr_range.model = MIDR_QCOM_KRYO,
366 .matches = is_kryo_midr,
367 },
368 {},
369 };
370 #endif
371
372 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
373 static const struct midr_range workaround_clean_cache[] = {
374 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
375 defined(CONFIG_ARM64_ERRATUM_827319) || \
376 defined(CONFIG_ARM64_ERRATUM_824069)
377 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
378 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
379 #endif
380 #ifdef CONFIG_ARM64_ERRATUM_819472
381 /* Cortex-A53 r0p[01] : ARM errata 819472 */
382 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
383 #endif
384 {},
385 };
386 #endif
387
388 #ifdef CONFIG_ARM64_ERRATUM_1418040
389 /*
390 * - 1188873 affects r0p0 to r2p0
391 * - 1418040 affects r0p0 to r3p1
392 */
393 static const struct midr_range erratum_1418040_list[] = {
394 /* Cortex-A76 r0p0 to r3p1 */
395 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
396 /* Neoverse-N1 r0p0 to r3p1 */
397 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
398 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
399 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
400 {},
401 };
402 #endif
403
404 #ifdef CONFIG_ARM64_ERRATUM_845719
405 static const struct midr_range erratum_845719_list[] = {
406 /* Cortex-A53 r0p[01234] */
407 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
408 /* Brahma-B53 r0p[0] */
409 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
410 /* Kryo2XX Silver rAp4 */
411 MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
412 {},
413 };
414 #endif
415
416 #ifdef CONFIG_ARM64_ERRATUM_843419
417 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
418 {
419 /* Cortex-A53 r0p[01234] */
420 .matches = is_affected_midr_range,
421 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
422 MIDR_FIXED(0x4, BIT(8)),
423 },
424 {
425 /* Brahma-B53 r0p[0] */
426 .matches = is_affected_midr_range,
427 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
428 },
429 {},
430 };
431 #endif
432
433 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
434 static const struct midr_range erratum_speculative_at_list[] = {
435 #ifdef CONFIG_ARM64_ERRATUM_1165522
436 /* Cortex A76 r0p0 to r2p0 */
437 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
438 #endif
439 #ifdef CONFIG_ARM64_ERRATUM_1319367
440 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
441 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
442 #endif
443 #ifdef CONFIG_ARM64_ERRATUM_1530923
444 /* Cortex A55 r0p0 to r2p0 */
445 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
446 /* Kryo4xx Silver (rdpe => r1p0) */
447 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
448 #endif
449 {},
450 };
451 #endif
452
453 #ifdef CONFIG_ARM64_ERRATUM_1463225
454 static const struct midr_range erratum_1463225[] = {
455 /* Cortex-A76 r0p0 - r3p1 */
456 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
457 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
458 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
459 {},
460 };
461 #endif
462
463 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
464 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
465 #ifdef CONFIG_ARM64_ERRATUM_2139208
466 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
467 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
468 #endif
469 #ifdef CONFIG_ARM64_ERRATUM_2119858
470 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
471 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
472 #endif
473 {},
474 };
475 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
476
477 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
478 static const struct midr_range tsb_flush_fail_cpus[] = {
479 #ifdef CONFIG_ARM64_ERRATUM_2067961
480 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
481 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
482 #endif
483 #ifdef CONFIG_ARM64_ERRATUM_2054223
484 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
485 #endif
486 {},
487 };
488 #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
489
490 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
491 static struct midr_range trbe_write_out_of_range_cpus[] = {
492 #ifdef CONFIG_ARM64_ERRATUM_2253138
493 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
494 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
495 #endif
496 #ifdef CONFIG_ARM64_ERRATUM_2224489
497 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
498 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
499 #endif
500 {},
501 };
502 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
503
504 #ifdef CONFIG_ARM64_ERRATUM_1742098
505 static struct midr_range broken_aarch32_aes[] = {
506 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
507 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
508 {},
509 };
510 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
511
512 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
513 static const struct midr_range erratum_spec_unpriv_load_list[] = {
514 #ifdef CONFIG_ARM64_ERRATUM_3117295
515 MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
516 #endif
517 #ifdef CONFIG_ARM64_ERRATUM_2966298
518 /* Cortex-A520 r0p0 to r0p1 */
519 MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
520 #endif
521 {},
522 };
523 #endif
524
525 #ifdef CONFIG_ARM64_ERRATUM_3194386
526 static const struct midr_range erratum_spec_ssbs_list[] = {
527 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
528 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
529 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
530 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
531 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
532 MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
533 MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
534 MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
535 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
536 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
537 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
538 MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
539 MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
540 MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
541 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
542 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
543 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
544 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
545 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
546 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
547 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
548 {}
549 };
550 #endif
551
552 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
553 static const struct midr_range erratum_ac03_cpu_38_list[] = {
554 MIDR_ALL_VERSIONS(MIDR_AMPERE1),
555 MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
556 {},
557 };
558 #endif
559
560 const struct arm64_cpu_capabilities arm64_errata[] = {
561 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
562 {
563 .desc = "ARM errata 826319, 827319, 824069, or 819472",
564 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
565 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
566 .cpu_enable = cpu_enable_cache_maint_trap,
567 },
568 #endif
569 #ifdef CONFIG_ARM64_ERRATUM_832075
570 {
571 /* Cortex-A57 r0p0 - r1p2 */
572 .desc = "ARM erratum 832075",
573 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
574 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
575 0, 0,
576 1, 2),
577 },
578 #endif
579 #ifdef CONFIG_ARM64_ERRATUM_834220
580 {
581 /* Cortex-A57 r0p0 - r1p2 */
582 .desc = "ARM erratum 834220",
583 .capability = ARM64_WORKAROUND_834220,
584 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
585 0, 0,
586 1, 2),
587 },
588 #endif
589 #ifdef CONFIG_ARM64_ERRATUM_843419
590 {
591 .desc = "ARM erratum 843419",
592 .capability = ARM64_WORKAROUND_843419,
593 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
594 .matches = cpucap_multi_entry_cap_matches,
595 .match_list = erratum_843419_list,
596 },
597 #endif
598 #ifdef CONFIG_ARM64_ERRATUM_845719
599 {
600 .desc = "ARM erratum 845719",
601 .capability = ARM64_WORKAROUND_845719,
602 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
603 },
604 #endif
605 #ifdef CONFIG_CAVIUM_ERRATUM_23154
606 {
607 .desc = "Cavium errata 23154 and 38545",
608 .capability = ARM64_WORKAROUND_CAVIUM_23154,
609 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
610 ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
611 },
612 #endif
613 #ifdef CONFIG_CAVIUM_ERRATUM_27456
614 {
615 .desc = "Cavium erratum 27456",
616 .capability = ARM64_WORKAROUND_CAVIUM_27456,
617 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
618 },
619 #endif
620 #ifdef CONFIG_CAVIUM_ERRATUM_30115
621 {
622 .desc = "Cavium erratum 30115",
623 .capability = ARM64_WORKAROUND_CAVIUM_30115,
624 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
625 },
626 #endif
627 {
628 .desc = "Mismatched cache type (CTR_EL0)",
629 .capability = ARM64_MISMATCHED_CACHE_TYPE,
630 .matches = has_mismatched_cache_type,
631 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
632 .cpu_enable = cpu_enable_trap_ctr_access,
633 },
634 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
635 {
636 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
637 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
638 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
639 .matches = cpucap_multi_entry_cap_matches,
640 .match_list = qcom_erratum_1003_list,
641 },
642 #endif
643 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
644 {
645 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
646 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
647 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
648 .matches = cpucap_multi_entry_cap_matches,
649 .match_list = arm64_repeat_tlbi_list,
650 },
651 #endif
652 #ifdef CONFIG_ARM64_ERRATUM_858921
653 {
654 /* Cortex-A73 all versions */
655 .desc = "ARM erratum 858921",
656 .capability = ARM64_WORKAROUND_858921,
657 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
658 },
659 #endif
660 {
661 .desc = "Spectre-v2",
662 .capability = ARM64_SPECTRE_V2,
663 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
664 .matches = has_spectre_v2,
665 .cpu_enable = spectre_v2_enable_mitigation,
666 },
667 #ifdef CONFIG_RANDOMIZE_BASE
668 {
669 /* Must come after the Spectre-v2 entry */
670 .desc = "Spectre-v3a",
671 .capability = ARM64_SPECTRE_V3A,
672 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
673 .matches = has_spectre_v3a,
674 .cpu_enable = spectre_v3a_enable_mitigation,
675 },
676 #endif
677 {
678 .desc = "Spectre-v4",
679 .capability = ARM64_SPECTRE_V4,
680 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
681 .matches = has_spectre_v4,
682 .cpu_enable = spectre_v4_enable_mitigation,
683 },
684 {
685 .desc = "Spectre-BHB",
686 .capability = ARM64_SPECTRE_BHB,
687 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
688 .matches = is_spectre_bhb_affected,
689 .cpu_enable = spectre_bhb_enable_mitigation,
690 },
691 #ifdef CONFIG_ARM64_ERRATUM_1418040
692 {
693 .desc = "ARM erratum 1418040",
694 .capability = ARM64_WORKAROUND_1418040,
695 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
696 /*
697 * We need to allow affected CPUs to come in late, but
698 * also need the non-affected CPUs to be able to come
699 * in at any point in time. Wonderful.
700 */
701 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
702 },
703 #endif
704 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
705 {
706 .desc = "ARM errata 1165522, 1319367, or 1530923",
707 .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
708 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
709 },
710 #endif
711 #ifdef CONFIG_ARM64_ERRATUM_1463225
712 {
713 .desc = "ARM erratum 1463225",
714 .capability = ARM64_WORKAROUND_1463225,
715 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
716 .matches = has_cortex_a76_erratum_1463225,
717 .midr_range_list = erratum_1463225,
718 },
719 #endif
720 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
721 {
722 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
723 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
724 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
725 .matches = needs_tx2_tvm_workaround,
726 },
727 {
728 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
729 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
730 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
731 },
732 #endif
733 #ifdef CONFIG_ARM64_ERRATUM_1542419
734 {
735 /* we depend on the firmware portion for correctness */
736 .desc = "ARM erratum 1542419 (kernel portion)",
737 .capability = ARM64_WORKAROUND_1542419,
738 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
739 .matches = has_neoverse_n1_erratum_1542419,
740 .cpu_enable = cpu_enable_trap_ctr_access,
741 },
742 #endif
743 #ifdef CONFIG_ARM64_ERRATUM_1508412
744 {
745 /* we depend on the firmware portion for correctness */
746 .desc = "ARM erratum 1508412 (kernel portion)",
747 .capability = ARM64_WORKAROUND_1508412,
748 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
749 0, 0,
750 1, 0),
751 },
752 #endif
753 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
754 {
755 /* NVIDIA Carmel */
756 .desc = "NVIDIA Carmel CNP erratum",
757 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
758 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
759 },
760 #endif
761 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
762 {
763 /*
764 * The erratum work around is handled within the TRBE
765 * driver and can be applied per-cpu. So, we can allow
766 * a late CPU to come online with this erratum.
767 */
768 .desc = "ARM erratum 2119858 or 2139208",
769 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
770 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
771 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
772 },
773 #endif
774 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
775 {
776 .desc = "ARM erratum 2067961 or 2054223",
777 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
778 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
779 },
780 #endif
781 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
782 {
783 .desc = "ARM erratum 2253138 or 2224489",
784 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
785 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
786 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
787 },
788 #endif
789 #ifdef CONFIG_ARM64_ERRATUM_2645198
790 {
791 .desc = "ARM erratum 2645198",
792 .capability = ARM64_WORKAROUND_2645198,
793 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
794 },
795 #endif
796 #ifdef CONFIG_ARM64_ERRATUM_2077057
797 {
798 .desc = "ARM erratum 2077057",
799 .capability = ARM64_WORKAROUND_2077057,
800 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
801 },
802 #endif
803 #ifdef CONFIG_ARM64_ERRATUM_2064142
804 {
805 .desc = "ARM erratum 2064142",
806 .capability = ARM64_WORKAROUND_2064142,
807
808 /* Cortex-A510 r0p0 - r0p2 */
809 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
810 },
811 #endif
812 #ifdef CONFIG_ARM64_ERRATUM_2457168
813 {
814 .desc = "ARM erratum 2457168",
815 .capability = ARM64_WORKAROUND_2457168,
816 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
817
818 /* Cortex-A510 r0p0-r1p1 */
819 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
820 },
821 #endif
822 #ifdef CONFIG_ARM64_ERRATUM_2038923
823 {
824 .desc = "ARM erratum 2038923",
825 .capability = ARM64_WORKAROUND_2038923,
826
827 /* Cortex-A510 r0p0 - r0p2 */
828 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
829 },
830 #endif
831 #ifdef CONFIG_ARM64_ERRATUM_1902691
832 {
833 .desc = "ARM erratum 1902691",
834 .capability = ARM64_WORKAROUND_1902691,
835
836 /* Cortex-A510 r0p0 - r0p1 */
837 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
838 },
839 #endif
840 #ifdef CONFIG_ARM64_ERRATUM_1742098
841 {
842 .desc = "ARM erratum 1742098",
843 .capability = ARM64_WORKAROUND_1742098,
844 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
845 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
846 },
847 #endif
848 #ifdef CONFIG_ARM64_ERRATUM_2658417
849 {
850 .desc = "ARM erratum 2658417",
851 .capability = ARM64_WORKAROUND_2658417,
852 /* Cortex-A510 r0p0 - r1p1 */
853 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
854 MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
855 },
856 #endif
857 #ifdef CONFIG_ARM64_ERRATUM_3194386
858 {
859 .desc = "SSBS not fully self-synchronizing",
860 .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
861 ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
862 },
863 #endif
864 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
865 {
866 .desc = "ARM errata 2966298, 3117295",
867 .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
868 /* Cortex-A520 r0p0 - r0p1 */
869 ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
870 },
871 #endif
872 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
873 {
874 .desc = "AmpereOne erratum AC03_CPU_38",
875 .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
876 ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
877 },
878 #endif
879 {
880 .desc = "Broken CNTVOFF_EL2",
881 .capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
882 ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) {
883 MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1),
884 {}
885 })),
886 },
887 {
888 .desc = "Apple IMPDEF PMUv3 Traps",
889 .capability = ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS,
890 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
891 .matches = has_impdef_pmuv3,
892 .cpu_enable = cpu_enable_impdef_pmuv3_traps,
893 },
894 {
895 }
896 };
897