1e21b551cSPhilippe Mathieu-Daudé /*
2e21b551cSPhilippe Mathieu-Daudé * ARM TLB (Translation lookaside buffer) helpers.
3e21b551cSPhilippe Mathieu-Daudé *
4e21b551cSPhilippe Mathieu-Daudé * This code is licensed under the GNU GPL v2 or later.
5e21b551cSPhilippe Mathieu-Daudé *
6e21b551cSPhilippe Mathieu-Daudé * SPDX-License-Identifier: GPL-2.0-or-later
7e21b551cSPhilippe Mathieu-Daudé */
8e21b551cSPhilippe Mathieu-Daudé #include "qemu/osdep.h"
9e21b551cSPhilippe Mathieu-Daudé #include "cpu.h"
10e21b551cSPhilippe Mathieu-Daudé #include "internals.h"
115a534314SPeter Maydell #include "cpu-features.h"
12e21b551cSPhilippe Mathieu-Daudé
13*cbf565b0SPierrick Bouvier #define HELPER_H "tcg/helper.h"
14*cbf565b0SPierrick Bouvier #include "exec/helper-proto.h.inc"
15cd6bc4d5SRichard Henderson
16cd6bc4d5SRichard Henderson /*
17cd6bc4d5SRichard Henderson * Returns true if the stage 1 translation regime is using LPAE format page
18cd6bc4d5SRichard Henderson * tables. Used when raising alignment exceptions, whose FSR changes depending
19cd6bc4d5SRichard Henderson * on whether the long or short descriptor format is in use.
20cd6bc4d5SRichard Henderson */
arm_s1_regime_using_lpae_format(CPUARMState * env,ARMMMUIdx mmu_idx)21cd6bc4d5SRichard Henderson bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
22cd6bc4d5SRichard Henderson {
23cd6bc4d5SRichard Henderson mmu_idx = stage_1_mmu_idx(mmu_idx);
24cd6bc4d5SRichard Henderson return regime_using_lpae_format(env, mmu_idx);
25cd6bc4d5SRichard Henderson }
26cd6bc4d5SRichard Henderson
merge_syn_data_abort(uint32_t template_syn,ARMMMUFaultInfo * fi,unsigned int target_el,bool same_el,bool is_write,int fsc)27e21b551cSPhilippe Mathieu-Daudé static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
28e61c4d87SPeter Maydell ARMMMUFaultInfo *fi,
29e21b551cSPhilippe Mathieu-Daudé unsigned int target_el,
30e61c4d87SPeter Maydell bool same_el, bool is_write,
31e21b551cSPhilippe Mathieu-Daudé int fsc)
32e21b551cSPhilippe Mathieu-Daudé {
33e21b551cSPhilippe Mathieu-Daudé uint32_t syn;
34e21b551cSPhilippe Mathieu-Daudé
35e21b551cSPhilippe Mathieu-Daudé /*
36a3856808SPeter Maydell * ISV is only set for stage-2 data aborts routed to EL2 and
37a3856808SPeter Maydell * never for stage-1 page table walks faulting on stage 2
38a3856808SPeter Maydell * or for stage-1 faults.
39e21b551cSPhilippe Mathieu-Daudé *
40e21b551cSPhilippe Mathieu-Daudé * Furthermore, ISV is only set for certain kinds of load/stores.
41e21b551cSPhilippe Mathieu-Daudé * If the template syndrome does not have ISV set, we should leave
42e21b551cSPhilippe Mathieu-Daudé * it cleared.
43e21b551cSPhilippe Mathieu-Daudé *
44e21b551cSPhilippe Mathieu-Daudé * See ARMv8 specs, D7-1974:
45e21b551cSPhilippe Mathieu-Daudé * ISS encoding for an exception from a Data Abort, the
46e21b551cSPhilippe Mathieu-Daudé * ISV field.
47a3856808SPeter Maydell *
48a3856808SPeter Maydell * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation,
49a3856808SPeter Maydell * Access Flag, and Permission faults caused by LD64B, ST64B,
50a3856808SPeter Maydell * ST64BV, or ST64BV0 insns report syndrome info even for stage-1
51a3856808SPeter Maydell * faults and regardless of the target EL.
52e21b551cSPhilippe Mathieu-Daudé */
53674e5345SPeter Maydell if (template_syn & ARM_EL_VNCR) {
54674e5345SPeter Maydell /*
55674e5345SPeter Maydell * FEAT_NV2 faults on accesses via VNCR_EL2 are a special case:
56674e5345SPeter Maydell * they are always reported as "same EL", even though we are going
57674e5345SPeter Maydell * from EL1 to EL2.
58674e5345SPeter Maydell */
59674e5345SPeter Maydell assert(!fi->stage2);
60674e5345SPeter Maydell syn = syn_data_abort_vncr(fi->ea, is_write, fsc);
61674e5345SPeter Maydell } else if (!(template_syn & ARM_EL_ISV) || target_el != 2
62a3856808SPeter Maydell || fi->s1ptw || !fi->stage2) {
63e24fd076SDongjiu Geng syn = syn_data_abort_no_iss(same_el, 0,
64e61c4d87SPeter Maydell fi->ea, 0, fi->s1ptw, is_write, fsc);
65e21b551cSPhilippe Mathieu-Daudé } else {
66e21b551cSPhilippe Mathieu-Daudé /*
67e21b551cSPhilippe Mathieu-Daudé * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
68e21b551cSPhilippe Mathieu-Daudé * syndrome created at translation time.
69e21b551cSPhilippe Mathieu-Daudé * Now we create the runtime syndrome with the remaining fields.
70e21b551cSPhilippe Mathieu-Daudé */
71e21b551cSPhilippe Mathieu-Daudé syn = syn_data_abort_with_iss(same_el,
72e21b551cSPhilippe Mathieu-Daudé 0, 0, 0, 0, 0,
73e61c4d87SPeter Maydell fi->ea, 0, fi->s1ptw, is_write, fsc,
7430d54483SJeff Kubascik true);
75e21b551cSPhilippe Mathieu-Daudé /* Merge the runtime syndrome with the template syndrome. */
76e21b551cSPhilippe Mathieu-Daudé syn |= template_syn;
77e21b551cSPhilippe Mathieu-Daudé }
78e21b551cSPhilippe Mathieu-Daudé return syn;
79e21b551cSPhilippe Mathieu-Daudé }
80e21b551cSPhilippe Mathieu-Daudé
compute_fsr_fsc(CPUARMState * env,ARMMMUFaultInfo * fi,int target_el,int mmu_idx,uint32_t * ret_fsc)81936a6b86SRichard Henderson static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
82936a6b86SRichard Henderson int target_el, int mmu_idx, uint32_t *ret_fsc)
83e21b551cSPhilippe Mathieu-Daudé {
84e21b551cSPhilippe Mathieu-Daudé ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
85936a6b86SRichard Henderson uint32_t fsr, fsc;
86e21b551cSPhilippe Mathieu-Daudé
87d7fe699bSPeter Maydell /*
88d7fe699bSPeter Maydell * For M-profile there is no guest-facing FSR. We compute a
89d7fe699bSPeter Maydell * short-form value for env->exception.fsr which we will then
90d7fe699bSPeter Maydell * examine in arm_v7m_cpu_do_interrupt(). In theory we could
91d7fe699bSPeter Maydell * use the LPAE format instead as long as both bits of code agree
92d7fe699bSPeter Maydell * (and arm_fi_to_lfsc() handled the M-profile specific
93d7fe699bSPeter Maydell * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases).
94d7fe699bSPeter Maydell */
95d7fe699bSPeter Maydell if (!arm_feature(env, ARM_FEATURE_M) &&
96d7fe699bSPeter Maydell (target_el == 2 || arm_el_is_aa64(env, target_el) ||
97d7fe699bSPeter Maydell arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) {
98e21b551cSPhilippe Mathieu-Daudé /*
99e21b551cSPhilippe Mathieu-Daudé * LPAE format fault status register : bottom 6 bits are
100e21b551cSPhilippe Mathieu-Daudé * status code in the same form as needed for syndrome
101e21b551cSPhilippe Mathieu-Daudé */
102e21b551cSPhilippe Mathieu-Daudé fsr = arm_fi_to_lfsc(fi);
103e21b551cSPhilippe Mathieu-Daudé fsc = extract32(fsr, 0, 6);
104e21b551cSPhilippe Mathieu-Daudé } else {
105e21b551cSPhilippe Mathieu-Daudé fsr = arm_fi_to_sfsc(fi);
106e21b551cSPhilippe Mathieu-Daudé /*
107e21b551cSPhilippe Mathieu-Daudé * Short format FSR : this fault will never actually be reported
108e21b551cSPhilippe Mathieu-Daudé * to an EL that uses a syndrome register. Use a (currently)
109e21b551cSPhilippe Mathieu-Daudé * reserved FSR code in case the constructed syndrome does leak
110e21b551cSPhilippe Mathieu-Daudé * into the guest somehow.
111e21b551cSPhilippe Mathieu-Daudé */
112e21b551cSPhilippe Mathieu-Daudé fsc = 0x3f;
113e21b551cSPhilippe Mathieu-Daudé }
114e21b551cSPhilippe Mathieu-Daudé
115936a6b86SRichard Henderson *ret_fsc = fsc;
116936a6b86SRichard Henderson return fsr;
117936a6b86SRichard Henderson }
118936a6b86SRichard Henderson
report_as_gpc_exception(ARMCPU * cpu,int current_el,ARMMMUFaultInfo * fi)11911b76fdaSRichard Henderson static bool report_as_gpc_exception(ARMCPU *cpu, int current_el,
12011b76fdaSRichard Henderson ARMMMUFaultInfo *fi)
12111b76fdaSRichard Henderson {
12211b76fdaSRichard Henderson bool ret;
12311b76fdaSRichard Henderson
12411b76fdaSRichard Henderson switch (fi->gpcf) {
12511b76fdaSRichard Henderson case GPCF_None:
12611b76fdaSRichard Henderson return false;
12711b76fdaSRichard Henderson case GPCF_AddressSize:
12811b76fdaSRichard Henderson case GPCF_Walk:
12911b76fdaSRichard Henderson case GPCF_EABT:
13011b76fdaSRichard Henderson /* R_PYTGX: GPT faults are reported as GPC. */
13111b76fdaSRichard Henderson ret = true;
13211b76fdaSRichard Henderson break;
13311b76fdaSRichard Henderson case GPCF_Fail:
13411b76fdaSRichard Henderson /*
13511b76fdaSRichard Henderson * R_BLYPM: A GPF at EL3 is reported as insn or data abort.
13611b76fdaSRichard Henderson * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC
13711b76fdaSRichard Henderson * if SCR_EL3.GPF is set, otherwise an insn or data abort.
13811b76fdaSRichard Henderson */
13911b76fdaSRichard Henderson ret = (cpu->env.cp15.scr_el3 & SCR_GPF) && current_el != 3;
14011b76fdaSRichard Henderson break;
14111b76fdaSRichard Henderson default:
14211b76fdaSRichard Henderson g_assert_not_reached();
14311b76fdaSRichard Henderson }
14411b76fdaSRichard Henderson
14511b76fdaSRichard Henderson assert(cpu_isar_feature(aa64_rme, cpu));
14611b76fdaSRichard Henderson assert(fi->type == ARMFault_GPCFOnWalk ||
14711b76fdaSRichard Henderson fi->type == ARMFault_GPCFOnOutput);
14811b76fdaSRichard Henderson if (fi->gpcf == GPCF_AddressSize) {
14911b76fdaSRichard Henderson assert(fi->level == 0);
15011b76fdaSRichard Henderson } else {
15111b76fdaSRichard Henderson assert(fi->level >= 0 && fi->level <= 1);
15211b76fdaSRichard Henderson }
15311b76fdaSRichard Henderson
15411b76fdaSRichard Henderson return ret;
15511b76fdaSRichard Henderson }
15611b76fdaSRichard Henderson
encode_gpcsc(ARMMMUFaultInfo * fi)15711b76fdaSRichard Henderson static unsigned encode_gpcsc(ARMMMUFaultInfo *fi)
15811b76fdaSRichard Henderson {
15911b76fdaSRichard Henderson static uint8_t const gpcsc[] = {
16011b76fdaSRichard Henderson [GPCF_AddressSize] = 0b000000,
16111b76fdaSRichard Henderson [GPCF_Walk] = 0b000100,
16211b76fdaSRichard Henderson [GPCF_Fail] = 0b001100,
16311b76fdaSRichard Henderson [GPCF_EABT] = 0b010100,
16411b76fdaSRichard Henderson };
16511b76fdaSRichard Henderson
16611b76fdaSRichard Henderson /* Note that we've validated fi->gpcf and fi->level above. */
16711b76fdaSRichard Henderson return gpcsc[fi->gpcf] | fi->level;
16811b76fdaSRichard Henderson }
16911b76fdaSRichard Henderson
1708905770bSMarc-André Lureau static G_NORETURN
arm_deliver_fault(ARMCPU * cpu,vaddr addr,MMUAccessType access_type,int mmu_idx,ARMMMUFaultInfo * fi)1718905770bSMarc-André Lureau void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
172936a6b86SRichard Henderson MMUAccessType access_type,
173936a6b86SRichard Henderson int mmu_idx, ARMMMUFaultInfo *fi)
174936a6b86SRichard Henderson {
175936a6b86SRichard Henderson CPUARMState *env = &cpu->env;
17611b76fdaSRichard Henderson int target_el = exception_target_el(env);
17711b76fdaSRichard Henderson int current_el = arm_current_el(env);
178936a6b86SRichard Henderson bool same_el;
179936a6b86SRichard Henderson uint32_t syn, exc, fsr, fsc;
180674e5345SPeter Maydell /*
181674e5345SPeter Maydell * We know this must be a data or insn abort, and that
182674e5345SPeter Maydell * env->exception.syndrome contains the template syndrome set
183674e5345SPeter Maydell * up at translate time. So we can check only the VNCR bit
184674e5345SPeter Maydell * (and indeed syndrome does not have the EC field in it,
185674e5345SPeter Maydell * because we masked that out in disas_set_insn_syndrome())
186674e5345SPeter Maydell */
1876b504a01SPeter Maydell bool is_vncr = (access_type != MMU_INST_FETCH) &&
188674e5345SPeter Maydell (env->exception.syndrome & ARM_EL_VNCR);
189674e5345SPeter Maydell
190674e5345SPeter Maydell if (is_vncr) {
191674e5345SPeter Maydell /* FEAT_NV2 faults on accesses via VNCR_EL2 go to EL2 */
192674e5345SPeter Maydell target_el = 2;
193674e5345SPeter Maydell }
194936a6b86SRichard Henderson
19511b76fdaSRichard Henderson if (report_as_gpc_exception(cpu, current_el, fi)) {
19611b76fdaSRichard Henderson target_el = 3;
19711b76fdaSRichard Henderson
19811b76fdaSRichard Henderson fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
19911b76fdaSRichard Henderson
20011b76fdaSRichard Henderson syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk,
20111b76fdaSRichard Henderson access_type == MMU_INST_FETCH,
202674e5345SPeter Maydell encode_gpcsc(fi), is_vncr,
203674e5345SPeter Maydell 0, fi->s1ptw,
20411b76fdaSRichard Henderson access_type == MMU_DATA_STORE, fsc);
20511b76fdaSRichard Henderson
20611b76fdaSRichard Henderson env->cp15.mfar_el3 = fi->paddr;
20711b76fdaSRichard Henderson switch (fi->paddr_space) {
20811b76fdaSRichard Henderson case ARMSS_Secure:
20911b76fdaSRichard Henderson break;
21011b76fdaSRichard Henderson case ARMSS_NonSecure:
21111b76fdaSRichard Henderson env->cp15.mfar_el3 |= R_MFAR_NS_MASK;
21211b76fdaSRichard Henderson break;
21311b76fdaSRichard Henderson case ARMSS_Root:
21411b76fdaSRichard Henderson env->cp15.mfar_el3 |= R_MFAR_NSE_MASK;
21511b76fdaSRichard Henderson break;
21611b76fdaSRichard Henderson case ARMSS_Realm:
21711b76fdaSRichard Henderson env->cp15.mfar_el3 |= R_MFAR_NSE_MASK | R_MFAR_NS_MASK;
21811b76fdaSRichard Henderson break;
21911b76fdaSRichard Henderson default:
22011b76fdaSRichard Henderson g_assert_not_reached();
22111b76fdaSRichard Henderson }
22211b76fdaSRichard Henderson
22311b76fdaSRichard Henderson exc = EXCP_GPC;
22411b76fdaSRichard Henderson goto do_raise;
22511b76fdaSRichard Henderson }
22611b76fdaSRichard Henderson
22711b76fdaSRichard Henderson /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */
22811b76fdaSRichard Henderson if (fi->gpcf == GPCF_Fail && target_el < 2) {
22911b76fdaSRichard Henderson if (arm_hcr_el2_eff(env) & HCR_GPF) {
23011b76fdaSRichard Henderson target_el = 2;
23111b76fdaSRichard Henderson }
23211b76fdaSRichard Henderson }
23311b76fdaSRichard Henderson
234936a6b86SRichard Henderson if (fi->stage2) {
235936a6b86SRichard Henderson target_el = 2;
236936a6b86SRichard Henderson env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
237936a6b86SRichard Henderson if (arm_is_secure_below_el3(env) && fi->s1ns) {
238936a6b86SRichard Henderson env->cp15.hpfar_el2 |= HPFAR_NS;
239936a6b86SRichard Henderson }
240936a6b86SRichard Henderson }
241936a6b86SRichard Henderson
24211b76fdaSRichard Henderson same_el = current_el == target_el;
243936a6b86SRichard Henderson fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
244936a6b86SRichard Henderson
245e21b551cSPhilippe Mathieu-Daudé if (access_type == MMU_INST_FETCH) {
246e21b551cSPhilippe Mathieu-Daudé syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
247e21b551cSPhilippe Mathieu-Daudé exc = EXCP_PREFETCH_ABORT;
248e21b551cSPhilippe Mathieu-Daudé } else {
249e61c4d87SPeter Maydell syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el,
250e61c4d87SPeter Maydell same_el, access_type == MMU_DATA_STORE,
251e21b551cSPhilippe Mathieu-Daudé fsc);
252e21b551cSPhilippe Mathieu-Daudé if (access_type == MMU_DATA_STORE
253e21b551cSPhilippe Mathieu-Daudé && arm_feature(env, ARM_FEATURE_V6)) {
254e21b551cSPhilippe Mathieu-Daudé fsr |= (1 << 11);
255e21b551cSPhilippe Mathieu-Daudé }
256e21b551cSPhilippe Mathieu-Daudé exc = EXCP_DATA_ABORT;
257e21b551cSPhilippe Mathieu-Daudé }
258e21b551cSPhilippe Mathieu-Daudé
25911b76fdaSRichard Henderson do_raise:
260e21b551cSPhilippe Mathieu-Daudé env->exception.vaddress = addr;
261e21b551cSPhilippe Mathieu-Daudé env->exception.fsr = fsr;
262e21b551cSPhilippe Mathieu-Daudé raise_exception(env, exc, syn, target_el);
263e21b551cSPhilippe Mathieu-Daudé }
264e21b551cSPhilippe Mathieu-Daudé
265e21b551cSPhilippe Mathieu-Daudé /* Raise a data fault alignment exception for the specified virtual address */
arm_cpu_do_unaligned_access(CPUState * cs,vaddr vaddr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)266e21b551cSPhilippe Mathieu-Daudé void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
267e21b551cSPhilippe Mathieu-Daudé MMUAccessType access_type,
268e21b551cSPhilippe Mathieu-Daudé int mmu_idx, uintptr_t retaddr)
269e21b551cSPhilippe Mathieu-Daudé {
270e21b551cSPhilippe Mathieu-Daudé ARMCPU *cpu = ARM_CPU(cs);
271e21b551cSPhilippe Mathieu-Daudé ARMMMUFaultInfo fi = {};
272e21b551cSPhilippe Mathieu-Daudé
273e21b551cSPhilippe Mathieu-Daudé /* now we have a real cpu fault */
2743d419a4dSRichard Henderson cpu_restore_state(cs, retaddr);
275e21b551cSPhilippe Mathieu-Daudé
276e21b551cSPhilippe Mathieu-Daudé fi.type = ARMFault_Alignment;
277e21b551cSPhilippe Mathieu-Daudé arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
278e21b551cSPhilippe Mathieu-Daudé }
279e21b551cSPhilippe Mathieu-Daudé
helper_exception_pc_alignment(CPUARMState * env,vaddr pc)280a0307ea3SPierrick Bouvier void helper_exception_pc_alignment(CPUARMState *env, vaddr pc)
281ee03027aSRichard Henderson {
282ee03027aSRichard Henderson ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
283ee03027aSRichard Henderson int target_el = exception_target_el(env);
284b7770d72SRichard Henderson int mmu_idx = arm_env_mmu_index(env);
285ee03027aSRichard Henderson uint32_t fsc;
286ee03027aSRichard Henderson
287ee03027aSRichard Henderson env->exception.vaddress = pc;
288ee03027aSRichard Henderson
289ee03027aSRichard Henderson /*
290ee03027aSRichard Henderson * Note that the fsc is not applicable to this exception,
291ee03027aSRichard Henderson * since any syndrome is pcalignment not insn_abort.
292ee03027aSRichard Henderson */
293ee03027aSRichard Henderson env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc);
294ee03027aSRichard Henderson raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
295ee03027aSRichard Henderson }
296ee03027aSRichard Henderson
2970d1762e9SRichard Henderson #if !defined(CONFIG_USER_ONLY)
2980d1762e9SRichard Henderson
299e21b551cSPhilippe Mathieu-Daudé /*
300e21b551cSPhilippe Mathieu-Daudé * arm_cpu_do_transaction_failed: handle a memory system error response
301e21b551cSPhilippe Mathieu-Daudé * (eg "no device/memory present at address") by raising an external abort
302e21b551cSPhilippe Mathieu-Daudé * exception
303e21b551cSPhilippe Mathieu-Daudé */
arm_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)304e21b551cSPhilippe Mathieu-Daudé void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
305e21b551cSPhilippe Mathieu-Daudé vaddr addr, unsigned size,
306e21b551cSPhilippe Mathieu-Daudé MMUAccessType access_type,
307e21b551cSPhilippe Mathieu-Daudé int mmu_idx, MemTxAttrs attrs,
308e21b551cSPhilippe Mathieu-Daudé MemTxResult response, uintptr_t retaddr)
309e21b551cSPhilippe Mathieu-Daudé {
310e21b551cSPhilippe Mathieu-Daudé ARMCPU *cpu = ARM_CPU(cs);
311e21b551cSPhilippe Mathieu-Daudé ARMMMUFaultInfo fi = {};
312e21b551cSPhilippe Mathieu-Daudé
313e21b551cSPhilippe Mathieu-Daudé /* now we have a real cpu fault */
3143d419a4dSRichard Henderson cpu_restore_state(cs, retaddr);
315e21b551cSPhilippe Mathieu-Daudé
316e21b551cSPhilippe Mathieu-Daudé fi.ea = arm_extabort_type(response);
317e21b551cSPhilippe Mathieu-Daudé fi.type = ARMFault_SyncExternal;
318e21b551cSPhilippe Mathieu-Daudé arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
319e21b551cSPhilippe Mathieu-Daudé }
320e21b551cSPhilippe Mathieu-Daudé
arm_cpu_tlb_fill_align(CPUState * cs,CPUTLBEntryFull * out,vaddr address,MMUAccessType access_type,int mmu_idx,MemOp memop,int size,bool probe,uintptr_t ra)3211ba3cb88SRichard Henderson bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address,
322e21b551cSPhilippe Mathieu-Daudé MMUAccessType access_type, int mmu_idx,
3231ba3cb88SRichard Henderson MemOp memop, int size, bool probe, uintptr_t ra)
324e21b551cSPhilippe Mathieu-Daudé {
325e21b551cSPhilippe Mathieu-Daudé ARMCPU *cpu = ARM_CPU(cs);
326de05a709SRichard Henderson GetPhysAddrResult res = {};
327f3639a64SRichard Henderson ARMMMUFaultInfo local_fi, *fi;
328e21b551cSPhilippe Mathieu-Daudé
329e21b551cSPhilippe Mathieu-Daudé /*
330f3639a64SRichard Henderson * Allow S1_ptw_translate to see any fault generated here.
331f3639a64SRichard Henderson * Since this may recurse, read and clear.
332f3639a64SRichard Henderson */
333f3639a64SRichard Henderson fi = cpu->env.tlb_fi;
334f3639a64SRichard Henderson if (fi) {
335f3639a64SRichard Henderson cpu->env.tlb_fi = NULL;
336f3639a64SRichard Henderson } else {
337f3639a64SRichard Henderson fi = memset(&local_fi, 0, sizeof(local_fi));
338f3639a64SRichard Henderson }
339f3639a64SRichard Henderson
340f3639a64SRichard Henderson /*
3411ba3cb88SRichard Henderson * Per R_XCHFJ, alignment fault not due to memory type has
3421ba3cb88SRichard Henderson * highest precedence. Otherwise, walk the page table and
3431ba3cb88SRichard Henderson * and collect the page description.
344e21b551cSPhilippe Mathieu-Daudé */
3451ba3cb88SRichard Henderson if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
3461ba3cb88SRichard Henderson fi->type = ARMFault_Alignment;
3471ba3cb88SRichard Henderson } else if (!get_phys_addr(&cpu->env, address, access_type, memop,
348e21b551cSPhilippe Mathieu-Daudé core_to_arm_mmu_idx(&cpu->env, mmu_idx),
3491ba3cb88SRichard Henderson &res, fi)) {
350a81fef4bSAnton Johansson res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
351a81fef4bSAnton Johansson res.f.extra.arm.shareability = res.cacheattrs.shareability;
3521ba3cb88SRichard Henderson *out = res.f;
353e21b551cSPhilippe Mathieu-Daudé return true;
354e21b551cSPhilippe Mathieu-Daudé }
3551ba3cb88SRichard Henderson if (probe) {
3561ba3cb88SRichard Henderson return false;
3571ba3cb88SRichard Henderson }
3581ba3cb88SRichard Henderson
3591ba3cb88SRichard Henderson /* Now we have a real cpu fault. */
3601ba3cb88SRichard Henderson cpu_restore_state(cs, ra);
3611ba3cb88SRichard Henderson arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
362e21b551cSPhilippe Mathieu-Daudé }
3639b12b6b4SRichard Henderson #else
arm_cpu_record_sigsegv(CPUState * cs,vaddr addr,MMUAccessType access_type,bool maperr,uintptr_t ra)3649b12b6b4SRichard Henderson void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
3659b12b6b4SRichard Henderson MMUAccessType access_type,
3669b12b6b4SRichard Henderson bool maperr, uintptr_t ra)
3679b12b6b4SRichard Henderson {
3689b12b6b4SRichard Henderson ARMMMUFaultInfo fi = {
3699b12b6b4SRichard Henderson .type = maperr ? ARMFault_Translation : ARMFault_Permission,
3709b12b6b4SRichard Henderson .level = 3,
3719b12b6b4SRichard Henderson };
3729b12b6b4SRichard Henderson ARMCPU *cpu = ARM_CPU(cs);
3739b12b6b4SRichard Henderson
3749b12b6b4SRichard Henderson /*
3759b12b6b4SRichard Henderson * We report both ESR and FAR to signal handlers.
3769b12b6b4SRichard Henderson * For now, it's easiest to deliver the fault normally.
3779b12b6b4SRichard Henderson */
3783d419a4dSRichard Henderson cpu_restore_state(cs, ra);
3799b12b6b4SRichard Henderson arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
3809b12b6b4SRichard Henderson }
38139a099caSRichard Henderson
arm_cpu_record_sigbus(CPUState * cs,vaddr addr,MMUAccessType access_type,uintptr_t ra)38239a099caSRichard Henderson void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
38339a099caSRichard Henderson MMUAccessType access_type, uintptr_t ra)
38439a099caSRichard Henderson {
38539a099caSRichard Henderson arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
38639a099caSRichard Henderson }
3879b12b6b4SRichard Henderson #endif /* !defined(CONFIG_USER_ONLY) */
388