xref: /qemu/target/arm/tcg/tlb_helper.c (revision edf838289b7fc698013f18d7a8a83b6b50ec41bb)
1 /*
2  * ARM TLB (Translation lookaside buffer) helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "internals.h"
11 #include "cpu-features.h"
12 #include "exec/helper-proto.h"
13 
14 
15 /*
16  * Returns true if the stage 1 translation regime is using LPAE format page
17  * tables. Used when raising alignment exceptions, whose FSR changes depending
18  * on whether the long or short descriptor format is in use.
19  */
20 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
21 {
22     mmu_idx = stage_1_mmu_idx(mmu_idx);
23     return regime_using_lpae_format(env, mmu_idx);
24 }
25 
26 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
27                                             ARMMMUFaultInfo *fi,
28                                             unsigned int target_el,
29                                             bool same_el, bool is_write,
30                                             int fsc)
31 {
32     uint32_t syn;
33 
34     /*
35      * ISV is only set for stage-2 data aborts routed to EL2 and
36      * never for stage-1 page table walks faulting on stage 2
37      * or for stage-1 faults.
38      *
39      * Furthermore, ISV is only set for certain kinds of load/stores.
40      * If the template syndrome does not have ISV set, we should leave
41      * it cleared.
42      *
43      * See ARMv8 specs, D7-1974:
44      * ISS encoding for an exception from a Data Abort, the
45      * ISV field.
46      *
47      * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation,
48      * Access Flag, and Permission faults caused by LD64B, ST64B,
49      * ST64BV, or ST64BV0 insns report syndrome info even for stage-1
50      * faults and regardless of the target EL.
51      */
52     if (template_syn & ARM_EL_VNCR) {
53         /*
54          * FEAT_NV2 faults on accesses via VNCR_EL2 are a special case:
55          * they are always reported as "same EL", even though we are going
56          * from EL1 to EL2.
57          */
58         assert(!fi->stage2);
59         syn = syn_data_abort_vncr(fi->ea, is_write, fsc);
60     } else if (!(template_syn & ARM_EL_ISV) || target_el != 2
61         || fi->s1ptw || !fi->stage2) {
62         syn = syn_data_abort_no_iss(same_el, 0,
63                                     fi->ea, 0, fi->s1ptw, is_write, fsc);
64     } else {
65         /*
66          * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
67          * syndrome created at translation time.
68          * Now we create the runtime syndrome with the remaining fields.
69          */
70         syn = syn_data_abort_with_iss(same_el,
71                                       0, 0, 0, 0, 0,
72                                       fi->ea, 0, fi->s1ptw, is_write, fsc,
73                                       true);
74         /* Merge the runtime syndrome with the template syndrome.  */
75         syn |= template_syn;
76     }
77     return syn;
78 }
79 
80 static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
81                                 int target_el, int mmu_idx, uint32_t *ret_fsc)
82 {
83     ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
84     uint32_t fsr, fsc;
85 
86     /*
87      * For M-profile there is no guest-facing FSR. We compute a
88      * short-form value for env->exception.fsr which we will then
89      * examine in arm_v7m_cpu_do_interrupt(). In theory we could
90      * use the LPAE format instead as long as both bits of code agree
91      * (and arm_fi_to_lfsc() handled the M-profile specific
92      * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases).
93      */
94     if (!arm_feature(env, ARM_FEATURE_M) &&
95         (target_el == 2 || arm_el_is_aa64(env, target_el) ||
96          arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) {
97         /*
98          * LPAE format fault status register : bottom 6 bits are
99          * status code in the same form as needed for syndrome
100          */
101         fsr = arm_fi_to_lfsc(fi);
102         fsc = extract32(fsr, 0, 6);
103     } else {
104         fsr = arm_fi_to_sfsc(fi);
105         /*
106          * Short format FSR : this fault will never actually be reported
107          * to an EL that uses a syndrome register. Use a (currently)
108          * reserved FSR code in case the constructed syndrome does leak
109          * into the guest somehow.
110          */
111         fsc = 0x3f;
112     }
113 
114     *ret_fsc = fsc;
115     return fsr;
116 }
117 
118 static bool report_as_gpc_exception(ARMCPU *cpu, int current_el,
119                                     ARMMMUFaultInfo *fi)
120 {
121     bool ret;
122 
123     switch (fi->gpcf) {
124     case GPCF_None:
125         return false;
126     case GPCF_AddressSize:
127     case GPCF_Walk:
128     case GPCF_EABT:
129         /* R_PYTGX: GPT faults are reported as GPC. */
130         ret = true;
131         break;
132     case GPCF_Fail:
133         /*
134          * R_BLYPM: A GPF at EL3 is reported as insn or data abort.
135          * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC
136          * if SCR_EL3.GPF is set, otherwise an insn or data abort.
137          */
138         ret = (cpu->env.cp15.scr_el3 & SCR_GPF) && current_el != 3;
139         break;
140     default:
141         g_assert_not_reached();
142     }
143 
144     assert(cpu_isar_feature(aa64_rme, cpu));
145     assert(fi->type == ARMFault_GPCFOnWalk ||
146            fi->type == ARMFault_GPCFOnOutput);
147     if (fi->gpcf == GPCF_AddressSize) {
148         assert(fi->level == 0);
149     } else {
150         assert(fi->level >= 0 && fi->level <= 1);
151     }
152 
153     return ret;
154 }
155 
156 static unsigned encode_gpcsc(ARMMMUFaultInfo *fi)
157 {
158     static uint8_t const gpcsc[] = {
159         [GPCF_AddressSize] = 0b000000,
160         [GPCF_Walk]        = 0b000100,
161         [GPCF_Fail]        = 0b001100,
162         [GPCF_EABT]        = 0b010100,
163     };
164 
165     /* Note that we've validated fi->gpcf and fi->level above. */
166     return gpcsc[fi->gpcf] | fi->level;
167 }
168 
169 static G_NORETURN
170 void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
171                        MMUAccessType access_type,
172                        int mmu_idx, ARMMMUFaultInfo *fi)
173 {
174     CPUARMState *env = &cpu->env;
175     int target_el = exception_target_el(env);
176     int current_el = arm_current_el(env);
177     bool same_el;
178     uint32_t syn, exc, fsr, fsc;
179     /*
180      * We know this must be a data or insn abort, and that
181      * env->exception.syndrome contains the template syndrome set
182      * up at translate time. So we can check only the VNCR bit
183      * (and indeed syndrome does not have the EC field in it,
184      * because we masked that out in disas_set_insn_syndrome())
185      */
186     bool is_vncr = (access_type != MMU_INST_FETCH) &&
187         (env->exception.syndrome & ARM_EL_VNCR);
188 
189     if (is_vncr) {
190         /* FEAT_NV2 faults on accesses via VNCR_EL2 go to EL2 */
191         target_el = 2;
192     }
193 
194     if (report_as_gpc_exception(cpu, current_el, fi)) {
195         target_el = 3;
196 
197         fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
198 
199         syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk,
200                       access_type == MMU_INST_FETCH,
201                       encode_gpcsc(fi), is_vncr,
202                       0, fi->s1ptw,
203                       access_type == MMU_DATA_STORE, fsc);
204 
205         env->cp15.mfar_el3 = fi->paddr;
206         switch (fi->paddr_space) {
207         case ARMSS_Secure:
208             break;
209         case ARMSS_NonSecure:
210             env->cp15.mfar_el3 |= R_MFAR_NS_MASK;
211             break;
212         case ARMSS_Root:
213             env->cp15.mfar_el3 |= R_MFAR_NSE_MASK;
214             break;
215         case ARMSS_Realm:
216             env->cp15.mfar_el3 |= R_MFAR_NSE_MASK | R_MFAR_NS_MASK;
217             break;
218         default:
219             g_assert_not_reached();
220         }
221 
222         exc = EXCP_GPC;
223         goto do_raise;
224     }
225 
226     /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */
227     if (fi->gpcf == GPCF_Fail && target_el < 2) {
228         if (arm_hcr_el2_eff(env) & HCR_GPF) {
229             target_el = 2;
230         }
231     }
232 
233     if (fi->stage2) {
234         target_el = 2;
235         env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
236         if (arm_is_secure_below_el3(env) && fi->s1ns) {
237             env->cp15.hpfar_el2 |= HPFAR_NS;
238         }
239     }
240 
241     same_el = current_el == target_el;
242     fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
243 
244     if (access_type == MMU_INST_FETCH) {
245         syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
246         exc = EXCP_PREFETCH_ABORT;
247     } else {
248         syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el,
249                                    same_el, access_type == MMU_DATA_STORE,
250                                    fsc);
251         if (access_type == MMU_DATA_STORE
252             && arm_feature(env, ARM_FEATURE_V6)) {
253             fsr |= (1 << 11);
254         }
255         exc = EXCP_DATA_ABORT;
256     }
257 
258  do_raise:
259     env->exception.vaddress = addr;
260     env->exception.fsr = fsr;
261     raise_exception(env, exc, syn, target_el);
262 }
263 
264 /* Raise a data fault alignment exception for the specified virtual address */
265 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
266                                  MMUAccessType access_type,
267                                  int mmu_idx, uintptr_t retaddr)
268 {
269     ARMCPU *cpu = ARM_CPU(cs);
270     ARMMMUFaultInfo fi = {};
271 
272     /* now we have a real cpu fault */
273     cpu_restore_state(cs, retaddr);
274 
275     fi.type = ARMFault_Alignment;
276     arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
277 }
278 
279 void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
280 {
281     ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
282     int target_el = exception_target_el(env);
283     int mmu_idx = arm_env_mmu_index(env);
284     uint32_t fsc;
285 
286     env->exception.vaddress = pc;
287 
288     /*
289      * Note that the fsc is not applicable to this exception,
290      * since any syndrome is pcalignment not insn_abort.
291      */
292     env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc);
293     raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
294 }
295 
296 #if !defined(CONFIG_USER_ONLY)
297 
298 /*
299  * arm_cpu_do_transaction_failed: handle a memory system error response
300  * (eg "no device/memory present at address") by raising an external abort
301  * exception
302  */
303 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
304                                    vaddr addr, unsigned size,
305                                    MMUAccessType access_type,
306                                    int mmu_idx, MemTxAttrs attrs,
307                                    MemTxResult response, uintptr_t retaddr)
308 {
309     ARMCPU *cpu = ARM_CPU(cs);
310     ARMMMUFaultInfo fi = {};
311 
312     /* now we have a real cpu fault */
313     cpu_restore_state(cs, retaddr);
314 
315     fi.ea = arm_extabort_type(response);
316     fi.type = ARMFault_SyncExternal;
317     arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
318 }
319 
320 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address,
321                             MMUAccessType access_type, int mmu_idx,
322                             MemOp memop, int size, bool probe, uintptr_t ra)
323 {
324     ARMCPU *cpu = ARM_CPU(cs);
325     GetPhysAddrResult res = {};
326     ARMMMUFaultInfo local_fi, *fi;
327 
328     /*
329      * Allow S1_ptw_translate to see any fault generated here.
330      * Since this may recurse, read and clear.
331      */
332     fi = cpu->env.tlb_fi;
333     if (fi) {
334         cpu->env.tlb_fi = NULL;
335     } else {
336         fi = memset(&local_fi, 0, sizeof(local_fi));
337     }
338 
339     /*
340      * Per R_XCHFJ, alignment fault not due to memory type has
341      * highest precedence.  Otherwise, walk the page table and
342      * and collect the page description.
343      */
344     if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
345         fi->type = ARMFault_Alignment;
346     } else if (!get_phys_addr(&cpu->env, address, access_type, memop,
347                               core_to_arm_mmu_idx(&cpu->env, mmu_idx),
348                               &res, fi)) {
349         res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
350         res.f.extra.arm.shareability = res.cacheattrs.shareability;
351         *out = res.f;
352         return true;
353     }
354     if (probe) {
355         return false;
356     }
357 
358     /* Now we have a real cpu fault. */
359     cpu_restore_state(cs, ra);
360     arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
361 }
362 #else
363 void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
364                             MMUAccessType access_type,
365                             bool maperr, uintptr_t ra)
366 {
367     ARMMMUFaultInfo fi = {
368         .type = maperr ? ARMFault_Translation : ARMFault_Permission,
369         .level = 3,
370     };
371     ARMCPU *cpu = ARM_CPU(cs);
372 
373     /*
374      * We report both ESR and FAR to signal handlers.
375      * For now, it's easiest to deliver the fault normally.
376      */
377     cpu_restore_state(cs, ra);
378     arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
379 }
380 
381 void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
382                            MMUAccessType access_type, uintptr_t ra)
383 {
384     arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
385 }
386 #endif /* !defined(CONFIG_USER_ONLY) */
387