Lines Matching +full:4 +full:- +full:cpu

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Contains CPU feature definitions
9 * there's a little bit of over-abstraction that tends to obscure what's going
14 * user-visible instructions are available only on a subset of the available
16 * boot CPU and comparing these with the feature registers of each secondary
17 * CPU when bringing them up. If there is a mismatch, then we update the
18 * snapshot state to indicate the lowest-common denominator of the feature,
27 * may prevent a CPU from being onlined at all.
31 * - Mismatched features are *always* sanitised to a "safe" value, which
34 * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
35 * warning when onlining an offending CPU and the kernel will be tainted
38 * - Features marked as FTR_VISIBLE have their sanitised value visible to
43 * - A "feature" is typically a 4-bit register field. A "capability" is the
44 * high-level description derived from the sanitised field value.
46 * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
50 * - KVM exposes its own view of the feature registers to guest operating
57 * - If the arm64_ftr_bits[] for a register has a missing field, then this
63 #define pr_fmt(fmt) "CPU features: " fmt
75 #include <linux/cpu.h>
79 #include <asm/cpu.h>
120 * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
127 * seen at least one CPU capable of 32-bit EL0.
132 * Mask of CPUs supporting 32-bit EL0.
139 /* file-wide pr_fmt adds "CPU features: " prefix */ in dump_cpu_features()
180 * sync with the documentation of the CPU feature register ABI.
183 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0),
184 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0),
185 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0),
186 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0),
187 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0),
188 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0),
189 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0),
190 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0),
191 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0),
192 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0),
193 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0),
194 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0),
195 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0),
196 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0),
201 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
202 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
203 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0),
204 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SPECRES_SHIFT, 4, 0),
205 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SB_SHIFT, 4, 0),
206 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, 0),
208 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPI_SHIFT, 4, 0),
210 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPA_SHIFT, 4, 0),
211 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, 0),
212 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, 0),
213 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, 0),
215 FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_API_SHIFT, 4, 0),
217 FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_APA_SHIFT, 4, 0),
218 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, 0),
223 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
224 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
225 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
226 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
227 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
229 FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
231 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0),
232 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0),
233 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0),
238 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0),
239 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0),
240 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0),
241 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0),
242 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0),
243 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0),
245 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0),
246 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0),
247 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0),
248 …S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA6…
249 …S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0…
250 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0),
251 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0),
252 …ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR…
253 …ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR…
259 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
260 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
261 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0),
263 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI),
264 …ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64P…
266 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0),
272 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0),
274 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, 0),
276 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, 0),
278 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, 0),
280 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0),
282 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_B16B16_SHIFT, 4, 0),
284 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0),
286 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0),
288 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_AES_SHIFT, 4, 0),
290 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, 0),
298 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0),
300 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0),
304 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0),
310 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0),
323 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0),
324 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0),
325 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0),
327 * Page size not being supported at Stage-2 is not fatal. You
332 * advertises a given granule size at Stage-2 (value 2) on some
333 * vCPUs, and uses the fallback to Stage-1 (value 0) for other
341 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1),
342 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1),
343 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1),
349 * along with it and treat them as non-strict.
351 …S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_A…
352 …S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_…
353 …ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA…
355 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0),
357 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0),
358 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0),
359 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0),
364 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0),
369 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
370 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
371 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0),
372 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0),
373 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0),
374 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0),
375 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0),
376 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0),
377 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0),
378 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0),
379 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0),
380 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0),
381 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0),
386 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0),
387 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0),
388 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0),
389 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0),
390 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0),
391 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0),
392 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0),
393 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0),
394 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0),
395 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0),
396 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0),
397 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0),
398 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0),
399 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0),
400 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0),
405 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0),
406 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0),
414 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_CWG_SHIFT, 4, 0),
415 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_ERG_SHIFT, 4, 0),
416 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DminLine_SHIFT, 4, 1),
418 * Linux can handle differing I-cache policies. Userspace JITs will
420 * If we have differing I-cache policies, report it as the weakest - VIPT.
423 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IminLine_SHIFT, 4, 0),
436 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_InnerShr_SHIFT, 4, 0xf),
437 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_FCSE_SHIFT, 4, 0),
438 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_AuxReg_SHIFT, 4, 0),
439 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_TCM_SHIFT, 4, 0),
440 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_ShareLvl_SHIFT, 4, 0),
441 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_OuterShr_SHIFT, 4, 0xf),
442 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_PMSA_SHIFT, 4, 0),
443 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_VMSA_SHIFT, 4, 0),
448 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0),
449 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0),
450 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0),
451 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0),
452 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0),
457 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0),
458 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6),
463 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPRound_SHIFT, 4, 0),
464 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPShVec_SHIFT, 4, 0),
465 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSqrt_SHIFT, 4, 0),
466 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDivide_SHIFT, 4, 0),
467 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPTrap_SHIFT, 4, 0),
468 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDP_SHIFT, 4, 0),
469 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSP_SHIFT, 4, 0),
470 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_SIMDReg_SHIFT, 4, 0),
475 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0),
476 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0),
477 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0),
478 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0),
479 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0),
480 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0),
481 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPDNaN_SHIFT, 4, 0),
482 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPFtZ_SHIFT, 4, 0),
487 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_FPMisc_SHIFT, 4, 0),
488 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_SIMDMisc_SHIFT, 4, 0),
494 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_EL0_BS_SHIFT, 4, 0),
499 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, GMID_EL1_BS_SHIFT, 4, 0),
504 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Divide_SHIFT, 4, 0),
505 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Debug_SHIFT, 4, 0),
506 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Coproc_SHIFT, 4, 0),
507 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_CmpBranch_SHIFT, 4, 0),
508 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitField_SHIFT, 4, 0),
509 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitCount_SHIFT, 4, 0),
510 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Swap_SHIFT, 4, 0),
515 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_RDM_SHIFT, 4, 0),
516 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_CRC32_SHIFT, 4, 0),
517 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA2_SHIFT, 4, 0),
518 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA1_SHIFT, 4, 0),
519 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_AES_SHIFT, 4, 0),
520 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SEVL_SHIFT, 4, 0),
525 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_EVT_SHIFT, 4, 0),
526 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CCIDX_SHIFT, 4, 0),
527 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_LSM_SHIFT, 4, 0),
528 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_HPDS_SHIFT, 4, 0),
529 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CnP_SHIFT, 4, 0),
530 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_XNX_SHIFT, 4, 0),
531 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_AC2_SHIFT, 4, 0),
539 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_EL1_SpecSEI_SHIFT, 4, 0),
544 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SWP_frac_SHIFT, 4, 0),
545 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_PSR_M_SHIFT, 4, 0),
546 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SynchPrim_frac_SHIFT, 4, 0),
547 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Barrier_SHIFT, 4, 0),
548 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SMC_SHIFT, 4, 0),
549 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Writeback_SHIFT, 4, 0),
550 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_WithShifts_SHIFT, 4, 0),
551 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Unpriv_SHIFT, 4, 0),
556 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_EL1_ETS_SHIFT, 4, 0),
561 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0),
562 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0),
563 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0),
564 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0),
565 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0),
566 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0),
567 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0),
572 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_DIT_SHIFT, 4, 0),
573 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_CSV2_SHIFT, 4, 0),
574 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State3_SHIFT, 4, 0),
575 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State2_SHIFT, 4, 0),
576 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State1_SHIFT, 4, 0),
577 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State0_SHIFT, 4, 0),
582 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GIC_SHIFT, 4, 0),
583 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virt_frac_SHIFT, 4, 0),
584 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Sec_frac_SHIFT, 4, 0),
585 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GenTimer_SHIFT, 4, 0),
586 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virtualization_SHIFT, 4, 0),
587 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_MProgMod_SHIFT, 4, 0),
588 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Security_SHIFT, 4, 0),
589 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_ProgMod_SHIFT, 4, 0),
594 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0),
595 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0),
601 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_EL1_PerfMon_SHIFT, 4, 0),
602 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MProfDbg_SHIFT, 4, 0),
603 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapTrc_SHIFT, 4, 0),
604 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopTrc_SHIFT, 4, 0),
605 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapDbg_SHIFT, 4, 0),
606 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopSDbg_SHIFT, 4, 0),
607 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopDbg_SHIFT, 4, 0),
612 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_EL1_MTPMU_SHIFT, 4, 0),
618 * attributes, with 4bit feature fields and a default safe value of
620 * id_isar[1-3], id_mmfr[1-3]
623 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
624 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
625 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
626 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
627 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
628 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
629 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
630 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
700 /* Op1 = 0, CRn = 0, CRm = 4 */
741 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id; in search_cmp_ftr_reg()
745 * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
750 * returns - Upon success, matching ftr_reg entry for id.
751 * - NULL on failure. It is upto the caller to decide
764 return ret->reg; in get_arm64_ftr_reg_nowarn()
769 * get_arm64_ftr_reg - Looks up a feature register entry using
772 * returns - Upon success, matching ftr_reg entry for id.
773 * - NULL on failure but with an WARN_ON().
782 * Requesting a non-existent register search is an error. Warn in get_arm64_ftr_reg()
795 reg |= (ftr_val << ftrp->shift) & mask; in arm64_ftr_set_value()
804 switch (ftrp->type) { in arm64_ftr_safe_value()
806 ret = ftrp->safe_val; in arm64_ftr_safe_value()
831 const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits; in sort_ftr_regs()
838 for (; ftr_bits->width != 0; ftr_bits++, j++) { in sort_ftr_regs()
839 unsigned int width = ftr_reg->ftr_bits[j].width; in sort_ftr_regs()
840 unsigned int shift = ftr_reg->ftr_bits[j].shift; in sort_ftr_regs()
845 ftr_reg->name, shift); in sort_ftr_regs()
854 prev_shift = ftr_reg->ftr_bits[j - 1].shift; in sort_ftr_regs()
857 ftr_reg->name, shift); in sort_ftr_regs()
871 BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id); in sort_ftr_regs()
876 * Initialise the CPU feature register from Boot CPU values.
879 * RES0 for the system-wide value, and must strictly match.
894 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { in init_cpu_ftr_reg()
897 s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val); in init_cpu_ftr_reg()
899 if ((ftr_mask & reg->override->mask) == ftr_mask) { in init_cpu_ftr_reg()
905 reg->override->mask &= ~ftr_mask; in init_cpu_ftr_reg()
906 reg->override->val &= ~ftr_mask; in init_cpu_ftr_reg()
920 reg->name, in init_cpu_ftr_reg()
921 ftrp->shift + ftrp->width - 1, in init_cpu_ftr_reg()
922 ftrp->shift, str, tmp); in init_cpu_ftr_reg()
923 } else if ((ftr_mask & reg->override->val) == ftr_mask) { in init_cpu_ftr_reg()
924 reg->override->val &= ~ftr_mask; in init_cpu_ftr_reg()
926 reg->name, in init_cpu_ftr_reg()
927 ftrp->shift + ftrp->width - 1, in init_cpu_ftr_reg()
928 ftrp->shift); in init_cpu_ftr_reg()
934 if (!ftrp->strict) in init_cpu_ftr_reg()
936 if (ftrp->visible) in init_cpu_ftr_reg()
939 reg->user_val = arm64_ftr_set_value(ftrp, in init_cpu_ftr_reg()
940 reg->user_val, in init_cpu_ftr_reg()
941 ftrp->safe_val); in init_cpu_ftr_reg()
946 reg->sys_val = val; in init_cpu_ftr_reg()
947 reg->strict_mask = strict_mask; in init_cpu_ftr_reg()
948 reg->user_mask = user_mask; in init_cpu_ftr_reg()
957 for (; caps->matches; caps++) { in init_cpucap_indirect_list_from_array()
958 if (WARN(caps->capability >= ARM64_NCAPS, in init_cpucap_indirect_list_from_array()
959 "Invalid capability %d\n", caps->capability)) in init_cpucap_indirect_list_from_array()
961 if (WARN(cpucap_ptrs[caps->capability], in init_cpucap_indirect_list_from_array()
963 caps->capability)) in init_cpucap_indirect_list_from_array()
965 cpucap_ptrs[caps->capability] = caps; in init_cpucap_indirect_list_from_array()
979 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); in init_32bit_cpu_features()
980 init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); in init_32bit_cpu_features()
981 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); in init_32bit_cpu_features()
982 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); in init_32bit_cpu_features()
983 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); in init_32bit_cpu_features()
984 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); in init_32bit_cpu_features()
985 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); in init_32bit_cpu_features()
986 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); in init_32bit_cpu_features()
987 init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); in init_32bit_cpu_features()
988 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); in init_32bit_cpu_features()
989 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); in init_32bit_cpu_features()
990 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); in init_32bit_cpu_features()
991 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); in init_32bit_cpu_features()
992 init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); in init_32bit_cpu_features()
993 init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); in init_32bit_cpu_features()
994 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); in init_32bit_cpu_features()
995 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); in init_32bit_cpu_features()
996 init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); in init_32bit_cpu_features()
997 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); in init_32bit_cpu_features()
998 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); in init_32bit_cpu_features()
999 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); in init_32bit_cpu_features()
1022 np = of_find_compatible_node(NULL, NULL, "arm,gic-v3"); in detect_system_supports_pseudo_nmi()
1023 if (np && of_property_read_bool(np, "mediatek,broken-save-restore-fw")) { in detect_system_supports_pseudo_nmi()
1024 pr_info("Pseudo-NMI disabled due to MediaTek Chromebook GICR save problem\n"); in detect_system_supports_pseudo_nmi()
1038 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); in init_cpu_features()
1039 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); in init_cpu_features()
1040 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); in init_cpu_features()
1041 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); in init_cpu_features()
1042 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); in init_cpu_features()
1043 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); in init_cpu_features()
1044 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); in init_cpu_features()
1045 init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); in init_cpu_features()
1046 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); in init_cpu_features()
1047 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); in init_cpu_features()
1048 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); in init_cpu_features()
1049 init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3); in init_cpu_features()
1050 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); in init_cpu_features()
1051 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); in init_cpu_features()
1052 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); in init_cpu_features()
1053 init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0); in init_cpu_features()
1055 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) in init_cpu_features()
1056 init_32bit_cpu_features(&info->aarch32); in init_cpu_features()
1076 info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS; in init_cpu_features()
1082 if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) in init_cpu_features()
1083 init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); in init_cpu_features()
1090 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { in update_cpu_ftr_reg()
1091 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); in update_cpu_ftr_reg()
1098 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); in update_cpu_ftr_reg()
1103 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) in check_update_ftr_reg() argument
1111 if ((boot & regp->strict_mask) == (val & regp->strict_mask)) in check_update_ftr_reg()
1113 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", in check_update_ftr_reg()
1114 regp->name, boot, cpu, val); in check_update_ftr_reg()
1126 for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) { in relax_cpu_ftr_reg()
1127 if (ftrp->shift == field) { in relax_cpu_ftr_reg()
1128 regp->strict_mask &= ~arm64_ftr_mask(ftrp); in relax_cpu_ftr_reg()
1134 WARN_ON(!ftrp->width); in relax_cpu_ftr_reg()
1145 if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0)) in lazy_init_32bit_cpu_features()
1148 boot->aarch32 = info->aarch32; in lazy_init_32bit_cpu_features()
1149 init_32bit_cpu_features(&boot->aarch32); in lazy_init_32bit_cpu_features()
1153 static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info, in update_32bit_cpu_features() argument
1161 * EL1-dependent register fields to avoid spurious sanity check fails. in update_32bit_cpu_features()
1172 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, in update_32bit_cpu_features()
1173 info->reg_id_dfr0, boot->reg_id_dfr0); in update_32bit_cpu_features()
1174 taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu, in update_32bit_cpu_features()
1175 info->reg_id_dfr1, boot->reg_id_dfr1); in update_32bit_cpu_features()
1176 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, in update_32bit_cpu_features()
1177 info->reg_id_isar0, boot->reg_id_isar0); in update_32bit_cpu_features()
1178 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, in update_32bit_cpu_features()
1179 info->reg_id_isar1, boot->reg_id_isar1); in update_32bit_cpu_features()
1180 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, in update_32bit_cpu_features()
1181 info->reg_id_isar2, boot->reg_id_isar2); in update_32bit_cpu_features()
1182 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, in update_32bit_cpu_features()
1183 info->reg_id_isar3, boot->reg_id_isar3); in update_32bit_cpu_features()
1184 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, in update_32bit_cpu_features()
1185 info->reg_id_isar4, boot->reg_id_isar4); in update_32bit_cpu_features()
1186 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, in update_32bit_cpu_features()
1187 info->reg_id_isar5, boot->reg_id_isar5); in update_32bit_cpu_features()
1188 taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu, in update_32bit_cpu_features()
1189 info->reg_id_isar6, boot->reg_id_isar6); in update_32bit_cpu_features()
1196 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, in update_32bit_cpu_features()
1197 info->reg_id_mmfr0, boot->reg_id_mmfr0); in update_32bit_cpu_features()
1198 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, in update_32bit_cpu_features()
1199 info->reg_id_mmfr1, boot->reg_id_mmfr1); in update_32bit_cpu_features()
1200 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, in update_32bit_cpu_features()
1201 info->reg_id_mmfr2, boot->reg_id_mmfr2); in update_32bit_cpu_features()
1202 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, in update_32bit_cpu_features()
1203 info->reg_id_mmfr3, boot->reg_id_mmfr3); in update_32bit_cpu_features()
1204 taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu, in update_32bit_cpu_features()
1205 info->reg_id_mmfr4, boot->reg_id_mmfr4); in update_32bit_cpu_features()
1206 taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu, in update_32bit_cpu_features()
1207 info->reg_id_mmfr5, boot->reg_id_mmfr5); in update_32bit_cpu_features()
1208 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, in update_32bit_cpu_features()
1209 info->reg_id_pfr0, boot->reg_id_pfr0); in update_32bit_cpu_features()
1210 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, in update_32bit_cpu_features()
1211 info->reg_id_pfr1, boot->reg_id_pfr1); in update_32bit_cpu_features()
1212 taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu, in update_32bit_cpu_features()
1213 info->reg_id_pfr2, boot->reg_id_pfr2); in update_32bit_cpu_features()
1214 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, in update_32bit_cpu_features()
1215 info->reg_mvfr0, boot->reg_mvfr0); in update_32bit_cpu_features()
1216 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, in update_32bit_cpu_features()
1217 info->reg_mvfr1, boot->reg_mvfr1); in update_32bit_cpu_features()
1218 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, in update_32bit_cpu_features()
1219 info->reg_mvfr2, boot->reg_mvfr2); in update_32bit_cpu_features()
1225 * Update system wide CPU feature registers with the values from a
1226 * non-boot CPU. Also performs SANITY checks to make sure that there
1227 * aren't any insane variations from that of the boot CPU.
1229 void update_cpu_features(int cpu, in update_cpu_features() argument
1236 * The kernel can handle differing I-cache policies, but otherwise in update_cpu_features()
1240 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, in update_cpu_features()
1241 info->reg_ctr, boot->reg_ctr); in update_cpu_features()
1248 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, in update_cpu_features()
1249 info->reg_dczid, boot->reg_dczid); in update_cpu_features()
1252 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, in update_cpu_features()
1253 info->reg_cntfrq, boot->reg_cntfrq); in update_cpu_features()
1256 * The kernel uses self-hosted debug features and expects CPUs to in update_cpu_features()
1261 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, in update_cpu_features()
1262 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); in update_cpu_features()
1263 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, in update_cpu_features()
1264 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); in update_cpu_features()
1266 * Even in big.LITTLE, processors should be identical instruction-set in update_cpu_features()
1269 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, in update_cpu_features()
1270 info->reg_id_aa64isar0, boot->reg_id_aa64isar0); in update_cpu_features()
1271 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, in update_cpu_features()
1272 info->reg_id_aa64isar1, boot->reg_id_aa64isar1); in update_cpu_features()
1273 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, in update_cpu_features()
1274 info->reg_id_aa64isar2, boot->reg_id_aa64isar2); in update_cpu_features()
1281 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, in update_cpu_features()
1282 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); in update_cpu_features()
1283 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, in update_cpu_features()
1284 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); in update_cpu_features()
1285 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, in update_cpu_features()
1286 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); in update_cpu_features()
1287 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR3_EL1, cpu, in update_cpu_features()
1288 info->reg_id_aa64mmfr3, boot->reg_id_aa64mmfr3); in update_cpu_features()
1290 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, in update_cpu_features()
1291 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); in update_cpu_features()
1292 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, in update_cpu_features()
1293 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); in update_cpu_features()
1295 taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, in update_cpu_features()
1296 info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); in update_cpu_features()
1298 taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu, in update_cpu_features()
1299 info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0); in update_cpu_features()
1322 info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS; in update_cpu_features()
1337 id_aa64pfr1_mte(info->reg_id_aa64pfr1)) { in update_cpu_features()
1338 taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu, in update_cpu_features()
1339 info->reg_gmid, boot->reg_gmid); in update_cpu_features()
1350 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { in update_cpu_features()
1352 taint |= update_32bit_cpu_features(cpu, &info->aarch32, in update_cpu_features()
1353 &boot->aarch32); in update_cpu_features()
1357 * Mismatched CPU features are a recipe for disaster. Don't even in update_cpu_features()
1361 pr_warn_once("Unsupported CPU feature variation detected.\n"); in update_cpu_features()
1372 return regp->sys_val; in read_sanitised_ftr_reg()
1380 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
1381 * Read the system register on the current CPU
1436 val &= ~regp->override->mask; in __read_sysreg_by_encoding()
1437 val |= (regp->override->val & regp->override->mask); in __read_sysreg_by_encoding()
1443 #include <linux/irqchip/arm-gic-v3.h>
1454 int val = cpuid_feature_extract_field_width(reg, entry->field_pos, in feature_matches()
1455 entry->field_width, in feature_matches()
1456 entry->sign); in feature_matches()
1458 return val >= entry->min_field_value; in feature_matches()
1466 return read_sanitised_ftr_reg(entry->sys_reg); in read_scoped_sysreg()
1468 return __read_sysreg_by_encoding(entry->sys_reg); in read_scoped_sysreg()
1478 regp = get_arm64_ftr_reg(entry->sys_reg); in has_user_cpuid_feature()
1482 mask = cpuid_feature_extract_unsigned_field_width(regp->user_mask, in has_user_cpuid_feature()
1483 entry->field_pos, in has_user_cpuid_feature()
1484 entry->field_width); in has_user_cpuid_feature()
1548 pr_info("detected: 32-bit EL0 Support\n"); in has_32bit_el0()
1563 entry->desc); in has_useable_gicv3_cpuif()
1584 * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively in cpu_emulate_effective_ctr()
1586 * to the CTR_EL0 on this CPU and emulate it with the real/safe in cpu_emulate_effective_ctr()
1610 * Kdump isn't guaranteed to power-off all secondary CPUs, CNP in has_useable_cnp()
1611 * may share TLB entries with a CPU stuck in the crashed in has_useable_cnp()
1625 * is initialised. Checking the status on the local CPU allows the boot
1626 * CPU to detect the need for non-global mappings and thus avoiding a
1627 * pagetable re-write after all the CPUs are booted. This check will be
1629 * state once the SMP CPUs are up and thus make the switch to non-global
1693 /* Defer to CPU feature registers */ in unmap_kernel_at_el0()
1702 * ThunderX leads to apparent I-cache corruption of kernel text, which in unmap_kernel_at_el0()
1704 * on the cpus_have_*cap() helpers here to detect the CPU erratum in unmap_kernel_at_el0()
1711 __kpti_forced = -1; in unmap_kernel_at_el0()
1724 __kpti_forced = -1; in unmap_kernel_at_el0()
1776 #define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
1787 kpti_ng_temp_alloc -= PAGE_SIZE; in kpti_ng_pgd_alloc()
1797 int cpu = smp_processor_id(); in __kpti_install_ng_mappings() local
1806 if (!cpu) { in __kpti_install_ng_mappings()
1808 kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE); in __kpti_install_ng_mappings()
1817 // +--------+-/-------+-/------ +-\\--------+ in __kpti_install_ng_mappings()
1819 // +--------+-\-------+-\------ +-//--------+ in __kpti_install_ng_mappings()
1825 // to be used as a ad-hoc fixmap. in __kpti_install_ng_mappings()
1833 remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA); in __kpti_install_ng_mappings()
1836 if (!cpu) { in __kpti_install_ng_mappings()
1851 * We don't need to rewrite the page-tables if either we've done in kpti_install_ng_mappings()
1885 __kpti_forced = enabled ? 1 : -1; in parse_kpti()
1938 * DBM is a non-conflicting feature. i.e, the kernel can safely in has_hw_dbm()
1940 * unconditionally enable the capability to allow any late CPU in has_hw_dbm()
1942 * CPU, if it is supported. in has_hw_dbm()
1953 * The "amu_cpus" cpumask only signals that the CPU implementation for the
1955 * information regarding all the events that it supports. When a CPU bit is
1957 * of the 4 fixed counters for that CPU. But this does not guarantee that the
1963 bool cpu_has_amu_feat(int cpu) in cpu_has_amu_feat() argument
1965 return cpumask_test_cpu(cpu, &amu_cpus); in cpu_has_amu_feat()
1988 * The AMU extension is a non-conflicting feature: the kernel can in has_amu()
1991 * the capability to allow any late CPU to use the feature. in has_amu()
1996 * present on that respective CPU. The enable function will also in has_amu()
2021 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to in cpu_copy_el2regs()
2035 pr_warn("unavailable: %s\n", cap->desc); in has_nested_virt_support()
2085 * The ptr-auth feature levels are not intercompatible with lower in has_address_auth_cpucap()
2086 * levels. Hence we must match ptr-auth feature level of the secondary in has_address_auth_cpucap()
2087 * CPUs with that of the boot CPU. The level of boot cpu is fetched in has_address_auth_cpucap()
2091 * boot CPU as a mismatched secondary CPU is parked before it gets in has_address_auth_cpucap()
2094 boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg), in has_address_auth_cpucap()
2095 entry->field_pos, entry->sign); in has_address_auth_cpucap()
2097 return boot_val >= entry->min_field_value; in has_address_auth_cpucap()
2099 sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg), in has_address_auth_cpucap()
2100 entry->field_pos, entry->sign); in has_address_auth_cpucap()
2101 return (sec_val >= entry->min_field_value) && (sec_val == boot_val); in has_address_auth_cpucap()
2138 * ARM64_HAS_GIC_CPUIF_SYSREGS has a lower index, and is a boot CPU in can_use_gic_priorities()
2157 * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU in has_gic_prio_relaxed_sync()
2181 * Use of X16/X17 for tail-calls and trampolines that jump to in bti_enable()
2219 regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK; in user_feature_fixup()
2253 /* Internal helper functions to match cpu capability type */
2257 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); in cpucap_late_cpu_optional()
2263 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); in cpucap_late_cpu_permitted()
2269 return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); in cpucap_panic_on_conflict()
2284 .desc = "GIC system register CPU interface",
2354 .desc = "32-bit EL1 Support",
2382 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
2455 .desc = "Stage-2 Force Write-Back",
2462 .desc = "ARMv8.4 Translation Table Level",
2639 .desc = "RCpc load-acquire (LDAPR)",
2717 .desc = "Stage-1 Permission Indirection Extension (S1PIE)",
2737 .desc = "52-bit Virtual Addressing for KVM (LPA2)",
2900 * check is future proof, by making sure value is non-zero. in compat_has_neon()
2942 switch (cap->hwcap_type) { in cap_set_elf_hwcap()
2944 cpu_set_feature(cap->hwcap); in cap_set_elf_hwcap()
2948 compat_elf_hwcap |= (u32)cap->hwcap; in cap_set_elf_hwcap()
2951 compat_elf_hwcap2 |= (u32)cap->hwcap; in cap_set_elf_hwcap()
2965 switch (cap->hwcap_type) { in cpus_have_elf_hwcap()
2967 rc = cpu_have_feature(cap->hwcap); in cpus_have_elf_hwcap()
2971 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; in cpus_have_elf_hwcap()
2974 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; in cpus_have_elf_hwcap()
2987 /* We support emulation of accesses to CPU ID feature registers */ in setup_elf_hwcaps()
2989 for (; hwcaps->matches; hwcaps++) in setup_elf_hwcaps()
2990 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) in setup_elf_hwcaps()
3002 if (!caps || !(caps->type & scope_mask) || in update_cpu_capabilities()
3003 cpus_have_cap(caps->capability) || in update_cpu_capabilities()
3004 !caps->matches(caps, cpucap_default_scope(caps))) in update_cpu_capabilities()
3007 if (caps->desc && !caps->cpus) in update_cpu_capabilities()
3008 pr_info("detected: %s\n", caps->desc); in update_cpu_capabilities()
3010 __set_bit(caps->capability, system_cpucaps); in update_cpu_capabilities()
3012 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) in update_cpu_capabilities()
3013 set_bit(caps->capability, boot_cpucaps); in update_cpu_capabilities()
3018 * Enable all the available capabilities on this CPU. The capabilities
3032 if (!(cap->type & non_boot_scope)) in cpu_enable_non_boot_scope_capabilities()
3035 if (cap->cpu_enable) in cpu_enable_non_boot_scope_capabilities()
3036 cap->cpu_enable(cap); in cpu_enable_non_boot_scope_capabilities()
3058 if (!caps || !(caps->type & scope_mask)) in enable_cpu_capabilities()
3060 num = caps->capability; in enable_cpu_capabilities()
3064 if (boot_scope && caps->cpu_enable) in enable_cpu_capabilities()
3067 * before any secondary CPU boots. Thus, each secondary in enable_cpu_capabilities()
3070 * the boot CPU, for which the capability must be in enable_cpu_capabilities()
3074 caps->cpu_enable(caps); in enable_cpu_capabilities()
3078 * For all non-boot scope capabilities, use stop_machine() in enable_cpu_capabilities()
3091 * action on this CPU.
3103 if (!caps || !(caps->type & scope_mask)) in verify_local_cpu_caps()
3106 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); in verify_local_cpu_caps()
3107 system_has_cap = cpus_have_cap(caps->capability); in verify_local_cpu_caps()
3111 * Check if the new CPU misses an advertised feature, in verify_local_cpu_caps()
3118 * whether the CPU has it or not, as it is enabeld in verify_local_cpu_caps()
3120 * appropriate action on this CPU. in verify_local_cpu_caps()
3122 if (caps->cpu_enable) in verify_local_cpu_caps()
3123 caps->cpu_enable(caps); in verify_local_cpu_caps()
3126 * Check if the CPU has this capability if it isn't in verify_local_cpu_caps()
3135 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", in verify_local_cpu_caps()
3136 smp_processor_id(), caps->capability, in verify_local_cpu_caps()
3137 caps->desc, system_has_cap, cpu_has_cap); in verify_local_cpu_caps()
3147 * Check for CPU features that are used in early boot
3148 * based on the Boot CPU value.
3161 for (; caps->matches; caps++) in __verify_local_elf_hwcaps()
3162 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) { in __verify_local_elf_hwcaps()
3163 pr_crit("CPU%d: missing HWCAP: %s\n", in __verify_local_elf_hwcaps()
3164 smp_processor_id(), caps->desc); in __verify_local_elf_hwcaps()
3182 pr_crit("CPU%d: SVE: vector length support mismatch\n", in verify_sve_features()
3195 pr_crit("CPU%d: SME: vector length support mismatch\n", in verify_sme_features()
3220 pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id()); in verify_hyp_capabilities()
3229 pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id()); in verify_hyp_capabilities()
3235 * Run through the enabled system capabilities and enable() it on this CPU.
3237 * Any new CPU should match the system wide status of the capability. If the
3238 * new CPU doesn't have a capability which the system now has enabled, we
3240 * we park the CPU.
3265 * All secondary CPUs should conform to the early CPU features in check_local_cpu_capabilities()
3266 * in use by the kernel based on boot CPU. in check_local_cpu_capabilities()
3271 * If we haven't finalised the system capabilities, this CPU gets in check_local_cpu_capabilities()
3273 * Otherwise, this CPU should verify that it has all the system in check_local_cpu_capabilities()
3288 return cap->matches(cap, SCOPE_LOCAL_CPU); in this_cpu_has_cap()
3297 * - The system wide safe registers are set with all the SMP CPUs and,
3298 * - The SYSTEM_FEATURE system_cpucaps may not have been set.
3306 return cap->matches(cap, SCOPE_SYSTEM); in __system_matches_cap()
3340 * The boot CPU's feature register values have been recorded. Detect in setup_boot_cpu_capabilities()
3341 * boot cpucaps and local cpucaps for the boot CPU, then enable and in setup_boot_cpu_capabilities()
3352 * Initialize the indirect array of CPU capabilities pointers before we in setup_boot_cpu_features()
3353 * handle the boot CPU. in setup_boot_cpu_features()
3358 * Detect broken pseudo-NMI. Must be called _before_ the call to in setup_boot_cpu_features()
3370 * The system-wide safe feature register values have been finalized. in setup_system_capabilities()
3385 if (caps && caps->cpus && caps->desc && in setup_system_capabilities()
3386 cpumask_any(caps->cpus) < nr_cpu_ids) in setup_system_capabilities()
3387 pr_info("detected: %s on CPU%*pbl\n", in setup_system_capabilities()
3388 caps->desc, cpumask_pr_args(caps->cpus)); in setup_system_capabilities()
3429 static int enable_mismatched_32bit_el0(unsigned int cpu) in enable_mismatched_32bit_el0() argument
3432 * The first 32-bit-capable CPU we detected and so can no longer in enable_mismatched_32bit_el0()
3433 * be offlined by userspace. -1 indicates we haven't yet onlined in enable_mismatched_32bit_el0()
3434 * a 32-bit-capable CPU. in enable_mismatched_32bit_el0()
3436 static int lucky_winner = -1; in enable_mismatched_32bit_el0()
3438 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); in enable_mismatched_32bit_el0()
3439 bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0); in enable_mismatched_32bit_el0()
3442 cpumask_set_cpu(cpu, cpu_32bit_el0_mask); in enable_mismatched_32bit_el0()
3454 * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting in enable_mismatched_32bit_el0()
3455 * every CPU in the system for a 32-bit task. in enable_mismatched_32bit_el0()
3457 lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask, in enable_mismatched_32bit_el0()
3459 get_cpu_device(lucky_winner)->offline_disabled = true; in enable_mismatched_32bit_el0()
3462 pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n", in enable_mismatched_32bit_el0()
3463 cpu, lucky_winner); in enable_mismatched_32bit_el0()
3473 return -ENOMEM; in init_32bit_el0_mask()
3488 * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7]
3489 * See Table C5-6 System instruction encodings for System register accesses,
3519 return -EINVAL; in emulate_id_reg()
3530 return -EINVAL; in emulate_sys_reg()