xref: /qemu/target/arm/tcg/hflags.c (revision 08b462dd9970a88d7f0e7c61ca48502463b0b78d)
1 /*
2  * ARM hflags
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "internals.h"
11 #include "cpu-features.h"
12 #include "exec/helper-proto.h"
13 #include "cpregs.h"
14 
15 static inline bool fgt_svc(CPUARMState *env, int el)
16 {
17     /*
18      * Assuming fine-grained-traps are active, return true if we
19      * should be trapping on SVC instructions. Only AArch64 can
20      * trap on an SVC at EL1, but we don't need to special-case this
21      * because if this is AArch32 EL1 then arm_fgt_active() is false.
22      * We also know el is 0 or 1.
23      */
24     return el == 0 ?
25         FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) :
26         FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1);
27 }
28 
29 /* Return true if memory alignment should be enforced. */
30 static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr)
31 {
32 #ifdef CONFIG_USER_ONLY
33     return false;
34 #else
35     /* Check the alignment enable bit. */
36     if (sctlr & SCTLR_A) {
37         return true;
38     }
39 
40     /*
41      * With PMSA, when the MPU is disabled, all memory types in the
42      * default map are Normal, so don't need aligment enforcing.
43      */
44     if (arm_feature(env, ARM_FEATURE_PMSA)) {
45         return false;
46     }
47 
48     /*
49      * With VMSA, if translation is disabled, then the default memory type
50      * is Device(-nGnRnE) instead of Normal, which requires that alignment
51      * be enforced.  Since this affects all ram, it is most efficient
52      * to handle this during translation.
53      */
54     if (sctlr & SCTLR_M) {
55         /* Translation enabled: memory type in PTE via MAIR_ELx. */
56         return false;
57     }
58     if (el < 2 && (arm_hcr_el2_eff(env) & (HCR_DC | HCR_VM))) {
59         /* Stage 2 translation enabled: memory type in PTE. */
60         return false;
61     }
62     return true;
63 #endif
64 }
65 
66 bool access_secure_reg(CPUARMState *env)
67 {
68     bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
69                 !arm_el_is_aa64(env, 3) &&
70                 !(env->cp15.scr_el3 & SCR_NS));
71 
72     return ret;
73 }
74 
75 static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
76                                            ARMMMUIdx mmu_idx,
77                                            CPUARMTBFlags flags)
78 {
79     DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
80     DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
81 
82     if (arm_singlestep_active(env)) {
83         DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
84     }
85 
86     return flags;
87 }
88 
89 static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
90                                               ARMMMUIdx mmu_idx,
91                                               CPUARMTBFlags flags)
92 {
93     bool sctlr_b = arm_sctlr_b(env);
94 
95     if (sctlr_b) {
96         DP_TBFLAG_A32(flags, SCTLR__B, 1);
97     }
98     if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
99         DP_TBFLAG_ANY(flags, BE_DATA, 1);
100     }
101     DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
102 
103     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
104 }
105 
106 static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
107                                         ARMMMUIdx mmu_idx)
108 {
109     CPUARMTBFlags flags = {};
110     uint32_t ccr = env->v7m.ccr[env->v7m.secure];
111 
112     /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
113     if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
114         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
115     }
116 
117     if (arm_v7m_is_handler_mode(env)) {
118         DP_TBFLAG_M32(flags, HANDLER, 1);
119     }
120 
121     /*
122      * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
123      * is suppressing them because the requested execution priority
124      * is less than 0.
125      */
126     if (arm_feature(env, ARM_FEATURE_V8) &&
127         !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
128           (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
129         DP_TBFLAG_M32(flags, STACKCHECK, 1);
130     }
131 
132     if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
133         DP_TBFLAG_M32(flags, SECURE, 1);
134     }
135 
136     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
137 }
138 
139 /* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
140 static bool sme_fa64(CPUARMState *env, int el)
141 {
142     if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
143         return false;
144     }
145 
146     if (el <= 1 && !el_is_in_host(env, el)) {
147         if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
148             return false;
149         }
150     }
151     if (el <= 2 && arm_is_el2_enabled(env)) {
152         if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
153             return false;
154         }
155     }
156     if (arm_feature(env, ARM_FEATURE_EL3)) {
157         if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
158             return false;
159         }
160     }
161 
162     return true;
163 }
164 
165 static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
166                                         ARMMMUIdx mmu_idx)
167 {
168     CPUARMTBFlags flags = {};
169     int el = arm_current_el(env);
170     uint64_t sctlr = arm_sctlr(env, el);
171 
172     if (aprofile_require_alignment(env, el, sctlr)) {
173         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
174     }
175 
176     if (arm_el_is_aa64(env, 1)) {
177         DP_TBFLAG_A32(flags, VFPEN, 1);
178     }
179 
180     if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) &&
181         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
182         DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
183     }
184 
185     if (arm_fgt_active(env, el)) {
186         DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
187         if (fgt_svc(env, el)) {
188             DP_TBFLAG_ANY(flags, FGT_SVC, 1);
189         }
190     }
191 
192     if (env->uncached_cpsr & CPSR_IL) {
193         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
194     }
195 
196     /*
197      * The SME exception we are testing for is raised via
198      * AArch64.CheckFPAdvSIMDEnabled(), as called from
199      * AArch32.CheckAdvSIMDOrFPEnabled().
200      */
201     if (el == 0
202         && FIELD_EX64(env->svcr, SVCR, SM)
203         && (!arm_is_el2_enabled(env)
204             || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
205         && arm_el_is_aa64(env, 1)
206         && !sme_fa64(env, el)) {
207         DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
208     }
209 
210     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
211 }
212 
213 static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
214                                         ARMMMUIdx mmu_idx)
215 {
216     CPUARMTBFlags flags = {};
217     ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
218     uint64_t tcr = regime_tcr(env, mmu_idx);
219     uint64_t hcr = arm_hcr_el2_eff(env);
220     uint64_t sctlr;
221     int tbii, tbid;
222 
223     DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
224 
225     /* Get control bits for tagged addresses.  */
226     tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
227     tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
228 
229     DP_TBFLAG_A64(flags, TBII, tbii);
230     DP_TBFLAG_A64(flags, TBID, tbid);
231 
232     if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
233         int sve_el = sve_exception_el(env, el);
234 
235         /*
236          * If either FP or SVE are disabled, translator does not need len.
237          * If SVE EL > FP EL, FP exception has precedence, and translator
238          * does not need SVE EL.  Save potential re-translations by forcing
239          * the unneeded data to zero.
240          */
241         if (fp_el != 0) {
242             if (sve_el > fp_el) {
243                 sve_el = 0;
244             }
245         } else if (sve_el == 0) {
246             DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
247         }
248         DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
249     }
250     if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
251         int sme_el = sme_exception_el(env, el);
252         bool sm = FIELD_EX64(env->svcr, SVCR, SM);
253 
254         DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
255         if (sme_el == 0) {
256             /* Similarly, do not compute SVL if SME is disabled. */
257             int svl = sve_vqm1_for_el_sm(env, el, true);
258             DP_TBFLAG_A64(flags, SVL, svl);
259             if (sm) {
260                 /* If SVE is disabled, we will not have set VL above. */
261                 DP_TBFLAG_A64(flags, VL, svl);
262             }
263         }
264         if (sm) {
265             DP_TBFLAG_A64(flags, PSTATE_SM, 1);
266             DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
267         }
268         DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
269     }
270 
271     sctlr = regime_sctlr(env, stage1);
272 
273     if (aprofile_require_alignment(env, el, sctlr)) {
274         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
275     }
276 
277     if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
278         DP_TBFLAG_ANY(flags, BE_DATA, 1);
279     }
280 
281     if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
282         /*
283          * In order to save space in flags, we record only whether
284          * pauth is "inactive", meaning all insns are implemented as
285          * a nop, or "active" when some action must be performed.
286          * The decision of which action to take is left to a helper.
287          */
288         if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
289             DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
290         }
291     }
292 
293     if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
294         /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
295         if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
296             DP_TBFLAG_A64(flags, BT, 1);
297         }
298     }
299 
300     if (cpu_isar_feature(aa64_lse2, env_archcpu(env))) {
301         if (sctlr & SCTLR_nAA) {
302             DP_TBFLAG_A64(flags, NAA, 1);
303         }
304     }
305 
306     /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
307     if (!(env->pstate & PSTATE_UAO)) {
308         switch (mmu_idx) {
309         case ARMMMUIdx_E10_1:
310         case ARMMMUIdx_E10_1_PAN:
311             /* FEAT_NV: NV,NV1 == 1,1 means we don't do UNPRIV accesses */
312             if ((hcr & (HCR_NV | HCR_NV1)) != (HCR_NV | HCR_NV1)) {
313                 DP_TBFLAG_A64(flags, UNPRIV, 1);
314             }
315             break;
316         case ARMMMUIdx_E20_2:
317         case ARMMMUIdx_E20_2_PAN:
318             /*
319              * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
320              * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
321              */
322             if (env->cp15.hcr_el2 & HCR_TGE) {
323                 DP_TBFLAG_A64(flags, UNPRIV, 1);
324             }
325             break;
326         default:
327             break;
328         }
329     }
330 
331     if (env->pstate & PSTATE_IL) {
332         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
333     }
334 
335     if (arm_fgt_active(env, el)) {
336         DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
337         if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) {
338             DP_TBFLAG_A64(flags, TRAP_ERET, 1);
339         }
340         if (fgt_svc(env, el)) {
341             DP_TBFLAG_ANY(flags, FGT_SVC, 1);
342         }
343     }
344 
345     /*
346      * ERET can also be trapped for FEAT_NV. arm_hcr_el2_eff() takes care
347      * of "is EL2 enabled" and the NV bit can only be set if FEAT_NV is present.
348      */
349     if (el == 1 && (hcr & HCR_NV)) {
350         DP_TBFLAG_A64(flags, TRAP_ERET, 1);
351         DP_TBFLAG_A64(flags, NV, 1);
352         if (hcr & HCR_NV1) {
353             DP_TBFLAG_A64(flags, NV1, 1);
354         }
355         if (hcr & HCR_NV2) {
356             DP_TBFLAG_A64(flags, NV2, 1);
357             if (hcr & HCR_E2H) {
358                 DP_TBFLAG_A64(flags, NV2_MEM_E20, 1);
359             }
360             if (env->cp15.sctlr_el[2] & SCTLR_EE) {
361                 DP_TBFLAG_A64(flags, NV2_MEM_BE, 1);
362             }
363         }
364     }
365 
366     if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
367         /*
368          * Set MTE_ACTIVE if any access may be Checked, and leave clear
369          * if all accesses must be Unchecked:
370          * 1) If no TBI, then there are no tags in the address to check,
371          * 2) If Tag Check Override, then all accesses are Unchecked,
372          * 3) If Tag Check Fail == 0, then Checked access have no effect,
373          * 4) If no Allocation Tag Access, then all accesses are Unchecked.
374          */
375         if (allocation_tag_access_enabled(env, el, sctlr)) {
376             DP_TBFLAG_A64(flags, ATA, 1);
377             if (tbid
378                 && !(env->pstate & PSTATE_TCO)
379                 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
380                 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
381                 if (!EX_TBFLAG_A64(flags, UNPRIV)) {
382                     /*
383                      * In non-unpriv contexts (eg EL0), unpriv load/stores
384                      * act like normal ones; duplicate the MTE info to
385                      * avoid translate-a64.c having to check UNPRIV to see
386                      * whether it is OK to index into MTE_ACTIVE[].
387                      */
388                     DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
389                 }
390             }
391         }
392         /* And again for unprivileged accesses, if required.  */
393         if (EX_TBFLAG_A64(flags, UNPRIV)
394             && tbid
395             && !(env->pstate & PSTATE_TCO)
396             && (sctlr & SCTLR_TCF0)
397             && allocation_tag_access_enabled(env, 0, sctlr)) {
398             DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
399         }
400         /*
401          * For unpriv tag-setting accesses we also need ATA0. Again, in
402          * contexts where unpriv and normal insns are the same we
403          * duplicate the ATA bit to save effort for translate-a64.c.
404          */
405         if (EX_TBFLAG_A64(flags, UNPRIV)) {
406             if (allocation_tag_access_enabled(env, 0, sctlr)) {
407                 DP_TBFLAG_A64(flags, ATA0, 1);
408             }
409         } else {
410             DP_TBFLAG_A64(flags, ATA0, EX_TBFLAG_A64(flags, ATA));
411         }
412         /* Cache TCMA as well as TBI. */
413         DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
414     }
415 
416     if (env->vfp.fpcr & FPCR_AH) {
417         DP_TBFLAG_A64(flags, AH, 1);
418     }
419     if (env->vfp.fpcr & FPCR_NEP) {
420         /*
421          * In streaming-SVE without FA64, NEP behaves as if zero;
422          * compare pseudocode IsMerging()
423          */
424         if (!(EX_TBFLAG_A64(flags, PSTATE_SM) && !sme_fa64(env, el))) {
425             DP_TBFLAG_A64(flags, NEP, 1);
426         }
427     }
428 
429     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
430 }
431 
432 static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
433 {
434     int el = arm_current_el(env);
435     int fp_el = fp_exception_el(env, el);
436     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
437 
438     if (is_a64(env)) {
439         return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
440     } else if (arm_feature(env, ARM_FEATURE_M)) {
441         return rebuild_hflags_m32(env, fp_el, mmu_idx);
442     } else {
443         return rebuild_hflags_a32(env, fp_el, mmu_idx);
444     }
445 }
446 
447 void arm_rebuild_hflags(CPUARMState *env)
448 {
449     env->hflags = rebuild_hflags_internal(env);
450 }
451 
452 /*
453  * If we have triggered a EL state change we can't rely on the
454  * translator having passed it to us, we need to recompute.
455  */
456 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
457 {
458     int el = arm_current_el(env);
459     int fp_el = fp_exception_el(env, el);
460     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
461 
462     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
463 }
464 
465 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
466 {
467     int fp_el = fp_exception_el(env, el);
468     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
469 
470     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
471 }
472 
473 /*
474  * If we have triggered a EL state change we can't rely on the
475  * translator having passed it to us, we need to recompute.
476  */
477 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
478 {
479     int el = arm_current_el(env);
480     int fp_el = fp_exception_el(env, el);
481     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
482     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
483 }
484 
485 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
486 {
487     int fp_el = fp_exception_el(env, el);
488     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
489 
490     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
491 }
492 
493 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
494 {
495     int fp_el = fp_exception_el(env, el);
496     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
497 
498     env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
499 }
500 
501 void assert_hflags_rebuild_correctly(CPUARMState *env)
502 {
503 #ifdef CONFIG_DEBUG_TCG
504     CPUARMTBFlags c = env->hflags;
505     CPUARMTBFlags r = rebuild_hflags_internal(env);
506 
507     if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
508         fprintf(stderr, "TCG hflags mismatch "
509                         "(current:(0x%08x,0x" TARGET_FMT_lx ")"
510                         " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
511                 c.flags, c.flags2, r.flags, r.flags2);
512         abort();
513     }
514 #endif
515 }
516