1e21b551cSPhilippe Mathieu-Daudé /* 2e21b551cSPhilippe Mathieu-Daudé * ARM TLB (Translation lookaside buffer) helpers. 3e21b551cSPhilippe Mathieu-Daudé * 4e21b551cSPhilippe Mathieu-Daudé * This code is licensed under the GNU GPL v2 or later. 5e21b551cSPhilippe Mathieu-Daudé * 6e21b551cSPhilippe Mathieu-Daudé * SPDX-License-Identifier: GPL-2.0-or-later 7e21b551cSPhilippe Mathieu-Daudé */ 8e21b551cSPhilippe Mathieu-Daudé #include "qemu/osdep.h" 9e21b551cSPhilippe Mathieu-Daudé #include "cpu.h" 10e21b551cSPhilippe Mathieu-Daudé #include "internals.h" 115a534314SPeter Maydell #include "cpu-features.h" 12ee03027aSRichard Henderson #include "exec/helper-proto.h" 13e21b551cSPhilippe Mathieu-Daudé 14cd6bc4d5SRichard Henderson 15cd6bc4d5SRichard Henderson /* 16cd6bc4d5SRichard Henderson * Returns true if the stage 1 translation regime is using LPAE format page 17cd6bc4d5SRichard Henderson * tables. Used when raising alignment exceptions, whose FSR changes depending 18cd6bc4d5SRichard Henderson * on whether the long or short descriptor format is in use. 19cd6bc4d5SRichard Henderson */ 20cd6bc4d5SRichard Henderson bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 21cd6bc4d5SRichard Henderson { 22cd6bc4d5SRichard Henderson mmu_idx = stage_1_mmu_idx(mmu_idx); 23cd6bc4d5SRichard Henderson return regime_using_lpae_format(env, mmu_idx); 24cd6bc4d5SRichard Henderson } 25cd6bc4d5SRichard Henderson 26e21b551cSPhilippe Mathieu-Daudé static inline uint32_t merge_syn_data_abort(uint32_t template_syn, 27e61c4d87SPeter Maydell ARMMMUFaultInfo *fi, 28e21b551cSPhilippe Mathieu-Daudé unsigned int target_el, 29e61c4d87SPeter Maydell bool same_el, bool is_write, 30e21b551cSPhilippe Mathieu-Daudé int fsc) 31e21b551cSPhilippe Mathieu-Daudé { 32e21b551cSPhilippe Mathieu-Daudé uint32_t syn; 33e21b551cSPhilippe Mathieu-Daudé 34e21b551cSPhilippe Mathieu-Daudé /* 35a3856808SPeter Maydell * ISV is only set for stage-2 data aborts routed to EL2 and 36a3856808SPeter Maydell * never for stage-1 page table walks faulting on stage 2 37a3856808SPeter Maydell * or for stage-1 faults. 38e21b551cSPhilippe Mathieu-Daudé * 39e21b551cSPhilippe Mathieu-Daudé * Furthermore, ISV is only set for certain kinds of load/stores. 40e21b551cSPhilippe Mathieu-Daudé * If the template syndrome does not have ISV set, we should leave 41e21b551cSPhilippe Mathieu-Daudé * it cleared. 42e21b551cSPhilippe Mathieu-Daudé * 43e21b551cSPhilippe Mathieu-Daudé * See ARMv8 specs, D7-1974: 44e21b551cSPhilippe Mathieu-Daudé * ISS encoding for an exception from a Data Abort, the 45e21b551cSPhilippe Mathieu-Daudé * ISV field. 46a3856808SPeter Maydell * 47a3856808SPeter Maydell * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation, 48a3856808SPeter Maydell * Access Flag, and Permission faults caused by LD64B, ST64B, 49a3856808SPeter Maydell * ST64BV, or ST64BV0 insns report syndrome info even for stage-1 50a3856808SPeter Maydell * faults and regardless of the target EL. 51e21b551cSPhilippe Mathieu-Daudé */ 52674e5345SPeter Maydell if (template_syn & ARM_EL_VNCR) { 53674e5345SPeter Maydell /* 54674e5345SPeter Maydell * FEAT_NV2 faults on accesses via VNCR_EL2 are a special case: 55674e5345SPeter Maydell * they are always reported as "same EL", even though we are going 56674e5345SPeter Maydell * from EL1 to EL2. 57674e5345SPeter Maydell */ 58674e5345SPeter Maydell assert(!fi->stage2); 59674e5345SPeter Maydell syn = syn_data_abort_vncr(fi->ea, is_write, fsc); 60674e5345SPeter Maydell } else if (!(template_syn & ARM_EL_ISV) || target_el != 2 61a3856808SPeter Maydell || fi->s1ptw || !fi->stage2) { 62e24fd076SDongjiu Geng syn = syn_data_abort_no_iss(same_el, 0, 63e61c4d87SPeter Maydell fi->ea, 0, fi->s1ptw, is_write, fsc); 64e21b551cSPhilippe Mathieu-Daudé } else { 65e21b551cSPhilippe Mathieu-Daudé /* 66e21b551cSPhilippe Mathieu-Daudé * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template 67e21b551cSPhilippe Mathieu-Daudé * syndrome created at translation time. 68e21b551cSPhilippe Mathieu-Daudé * Now we create the runtime syndrome with the remaining fields. 69e21b551cSPhilippe Mathieu-Daudé */ 70e21b551cSPhilippe Mathieu-Daudé syn = syn_data_abort_with_iss(same_el, 71e21b551cSPhilippe Mathieu-Daudé 0, 0, 0, 0, 0, 72e61c4d87SPeter Maydell fi->ea, 0, fi->s1ptw, is_write, fsc, 7330d54483SJeff Kubascik true); 74e21b551cSPhilippe Mathieu-Daudé /* Merge the runtime syndrome with the template syndrome. */ 75e21b551cSPhilippe Mathieu-Daudé syn |= template_syn; 76e21b551cSPhilippe Mathieu-Daudé } 77e21b551cSPhilippe Mathieu-Daudé return syn; 78e21b551cSPhilippe Mathieu-Daudé } 79e21b551cSPhilippe Mathieu-Daudé 80936a6b86SRichard Henderson static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi, 81936a6b86SRichard Henderson int target_el, int mmu_idx, uint32_t *ret_fsc) 82e21b551cSPhilippe Mathieu-Daudé { 83e21b551cSPhilippe Mathieu-Daudé ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); 84936a6b86SRichard Henderson uint32_t fsr, fsc; 85e21b551cSPhilippe Mathieu-Daudé 86d7fe699bSPeter Maydell /* 87d7fe699bSPeter Maydell * For M-profile there is no guest-facing FSR. We compute a 88d7fe699bSPeter Maydell * short-form value for env->exception.fsr which we will then 89d7fe699bSPeter Maydell * examine in arm_v7m_cpu_do_interrupt(). In theory we could 90d7fe699bSPeter Maydell * use the LPAE format instead as long as both bits of code agree 91d7fe699bSPeter Maydell * (and arm_fi_to_lfsc() handled the M-profile specific 92d7fe699bSPeter Maydell * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases). 93d7fe699bSPeter Maydell */ 94d7fe699bSPeter Maydell if (!arm_feature(env, ARM_FEATURE_M) && 95d7fe699bSPeter Maydell (target_el == 2 || arm_el_is_aa64(env, target_el) || 96d7fe699bSPeter Maydell arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) { 97e21b551cSPhilippe Mathieu-Daudé /* 98e21b551cSPhilippe Mathieu-Daudé * LPAE format fault status register : bottom 6 bits are 99e21b551cSPhilippe Mathieu-Daudé * status code in the same form as needed for syndrome 100e21b551cSPhilippe Mathieu-Daudé */ 101e21b551cSPhilippe Mathieu-Daudé fsr = arm_fi_to_lfsc(fi); 102e21b551cSPhilippe Mathieu-Daudé fsc = extract32(fsr, 0, 6); 103e21b551cSPhilippe Mathieu-Daudé } else { 104e21b551cSPhilippe Mathieu-Daudé fsr = arm_fi_to_sfsc(fi); 105e21b551cSPhilippe Mathieu-Daudé /* 106e21b551cSPhilippe Mathieu-Daudé * Short format FSR : this fault will never actually be reported 107e21b551cSPhilippe Mathieu-Daudé * to an EL that uses a syndrome register. Use a (currently) 108e21b551cSPhilippe Mathieu-Daudé * reserved FSR code in case the constructed syndrome does leak 109e21b551cSPhilippe Mathieu-Daudé * into the guest somehow. 110e21b551cSPhilippe Mathieu-Daudé */ 111e21b551cSPhilippe Mathieu-Daudé fsc = 0x3f; 112e21b551cSPhilippe Mathieu-Daudé } 113e21b551cSPhilippe Mathieu-Daudé 114936a6b86SRichard Henderson *ret_fsc = fsc; 115936a6b86SRichard Henderson return fsr; 116936a6b86SRichard Henderson } 117936a6b86SRichard Henderson 11811b76fdaSRichard Henderson static bool report_as_gpc_exception(ARMCPU *cpu, int current_el, 11911b76fdaSRichard Henderson ARMMMUFaultInfo *fi) 12011b76fdaSRichard Henderson { 12111b76fdaSRichard Henderson bool ret; 12211b76fdaSRichard Henderson 12311b76fdaSRichard Henderson switch (fi->gpcf) { 12411b76fdaSRichard Henderson case GPCF_None: 12511b76fdaSRichard Henderson return false; 12611b76fdaSRichard Henderson case GPCF_AddressSize: 12711b76fdaSRichard Henderson case GPCF_Walk: 12811b76fdaSRichard Henderson case GPCF_EABT: 12911b76fdaSRichard Henderson /* R_PYTGX: GPT faults are reported as GPC. */ 13011b76fdaSRichard Henderson ret = true; 13111b76fdaSRichard Henderson break; 13211b76fdaSRichard Henderson case GPCF_Fail: 13311b76fdaSRichard Henderson /* 13411b76fdaSRichard Henderson * R_BLYPM: A GPF at EL3 is reported as insn or data abort. 13511b76fdaSRichard Henderson * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC 13611b76fdaSRichard Henderson * if SCR_EL3.GPF is set, otherwise an insn or data abort. 13711b76fdaSRichard Henderson */ 13811b76fdaSRichard Henderson ret = (cpu->env.cp15.scr_el3 & SCR_GPF) && current_el != 3; 13911b76fdaSRichard Henderson break; 14011b76fdaSRichard Henderson default: 14111b76fdaSRichard Henderson g_assert_not_reached(); 14211b76fdaSRichard Henderson } 14311b76fdaSRichard Henderson 14411b76fdaSRichard Henderson assert(cpu_isar_feature(aa64_rme, cpu)); 14511b76fdaSRichard Henderson assert(fi->type == ARMFault_GPCFOnWalk || 14611b76fdaSRichard Henderson fi->type == ARMFault_GPCFOnOutput); 14711b76fdaSRichard Henderson if (fi->gpcf == GPCF_AddressSize) { 14811b76fdaSRichard Henderson assert(fi->level == 0); 14911b76fdaSRichard Henderson } else { 15011b76fdaSRichard Henderson assert(fi->level >= 0 && fi->level <= 1); 15111b76fdaSRichard Henderson } 15211b76fdaSRichard Henderson 15311b76fdaSRichard Henderson return ret; 15411b76fdaSRichard Henderson } 15511b76fdaSRichard Henderson 15611b76fdaSRichard Henderson static unsigned encode_gpcsc(ARMMMUFaultInfo *fi) 15711b76fdaSRichard Henderson { 15811b76fdaSRichard Henderson static uint8_t const gpcsc[] = { 15911b76fdaSRichard Henderson [GPCF_AddressSize] = 0b000000, 16011b76fdaSRichard Henderson [GPCF_Walk] = 0b000100, 16111b76fdaSRichard Henderson [GPCF_Fail] = 0b001100, 16211b76fdaSRichard Henderson [GPCF_EABT] = 0b010100, 16311b76fdaSRichard Henderson }; 16411b76fdaSRichard Henderson 16511b76fdaSRichard Henderson /* Note that we've validated fi->gpcf and fi->level above. */ 16611b76fdaSRichard Henderson return gpcsc[fi->gpcf] | fi->level; 16711b76fdaSRichard Henderson } 16811b76fdaSRichard Henderson 1698905770bSMarc-André Lureau static G_NORETURN 1708905770bSMarc-André Lureau void arm_deliver_fault(ARMCPU *cpu, vaddr addr, 171936a6b86SRichard Henderson MMUAccessType access_type, 172936a6b86SRichard Henderson int mmu_idx, ARMMMUFaultInfo *fi) 173936a6b86SRichard Henderson { 174936a6b86SRichard Henderson CPUARMState *env = &cpu->env; 17511b76fdaSRichard Henderson int target_el = exception_target_el(env); 17611b76fdaSRichard Henderson int current_el = arm_current_el(env); 177936a6b86SRichard Henderson bool same_el; 178936a6b86SRichard Henderson uint32_t syn, exc, fsr, fsc; 179674e5345SPeter Maydell /* 180674e5345SPeter Maydell * We know this must be a data or insn abort, and that 181674e5345SPeter Maydell * env->exception.syndrome contains the template syndrome set 182674e5345SPeter Maydell * up at translate time. So we can check only the VNCR bit 183674e5345SPeter Maydell * (and indeed syndrome does not have the EC field in it, 184674e5345SPeter Maydell * because we masked that out in disas_set_insn_syndrome()) 185674e5345SPeter Maydell */ 1866b504a01SPeter Maydell bool is_vncr = (access_type != MMU_INST_FETCH) && 187674e5345SPeter Maydell (env->exception.syndrome & ARM_EL_VNCR); 188674e5345SPeter Maydell 189674e5345SPeter Maydell if (is_vncr) { 190674e5345SPeter Maydell /* FEAT_NV2 faults on accesses via VNCR_EL2 go to EL2 */ 191674e5345SPeter Maydell target_el = 2; 192674e5345SPeter Maydell } 193936a6b86SRichard Henderson 19411b76fdaSRichard Henderson if (report_as_gpc_exception(cpu, current_el, fi)) { 19511b76fdaSRichard Henderson target_el = 3; 19611b76fdaSRichard Henderson 19711b76fdaSRichard Henderson fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc); 19811b76fdaSRichard Henderson 19911b76fdaSRichard Henderson syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk, 20011b76fdaSRichard Henderson access_type == MMU_INST_FETCH, 201674e5345SPeter Maydell encode_gpcsc(fi), is_vncr, 202674e5345SPeter Maydell 0, fi->s1ptw, 20311b76fdaSRichard Henderson access_type == MMU_DATA_STORE, fsc); 20411b76fdaSRichard Henderson 20511b76fdaSRichard Henderson env->cp15.mfar_el3 = fi->paddr; 20611b76fdaSRichard Henderson switch (fi->paddr_space) { 20711b76fdaSRichard Henderson case ARMSS_Secure: 20811b76fdaSRichard Henderson break; 20911b76fdaSRichard Henderson case ARMSS_NonSecure: 21011b76fdaSRichard Henderson env->cp15.mfar_el3 |= R_MFAR_NS_MASK; 21111b76fdaSRichard Henderson break; 21211b76fdaSRichard Henderson case ARMSS_Root: 21311b76fdaSRichard Henderson env->cp15.mfar_el3 |= R_MFAR_NSE_MASK; 21411b76fdaSRichard Henderson break; 21511b76fdaSRichard Henderson case ARMSS_Realm: 21611b76fdaSRichard Henderson env->cp15.mfar_el3 |= R_MFAR_NSE_MASK | R_MFAR_NS_MASK; 21711b76fdaSRichard Henderson break; 21811b76fdaSRichard Henderson default: 21911b76fdaSRichard Henderson g_assert_not_reached(); 22011b76fdaSRichard Henderson } 22111b76fdaSRichard Henderson 22211b76fdaSRichard Henderson exc = EXCP_GPC; 22311b76fdaSRichard Henderson goto do_raise; 22411b76fdaSRichard Henderson } 22511b76fdaSRichard Henderson 22611b76fdaSRichard Henderson /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */ 22711b76fdaSRichard Henderson if (fi->gpcf == GPCF_Fail && target_el < 2) { 22811b76fdaSRichard Henderson if (arm_hcr_el2_eff(env) & HCR_GPF) { 22911b76fdaSRichard Henderson target_el = 2; 23011b76fdaSRichard Henderson } 23111b76fdaSRichard Henderson } 23211b76fdaSRichard Henderson 233936a6b86SRichard Henderson if (fi->stage2) { 234936a6b86SRichard Henderson target_el = 2; 235936a6b86SRichard Henderson env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4; 236936a6b86SRichard Henderson if (arm_is_secure_below_el3(env) && fi->s1ns) { 237936a6b86SRichard Henderson env->cp15.hpfar_el2 |= HPFAR_NS; 238936a6b86SRichard Henderson } 239936a6b86SRichard Henderson } 240936a6b86SRichard Henderson 24111b76fdaSRichard Henderson same_el = current_el == target_el; 242936a6b86SRichard Henderson fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc); 243936a6b86SRichard Henderson 244e21b551cSPhilippe Mathieu-Daudé if (access_type == MMU_INST_FETCH) { 245e21b551cSPhilippe Mathieu-Daudé syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); 246e21b551cSPhilippe Mathieu-Daudé exc = EXCP_PREFETCH_ABORT; 247e21b551cSPhilippe Mathieu-Daudé } else { 248e61c4d87SPeter Maydell syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el, 249e61c4d87SPeter Maydell same_el, access_type == MMU_DATA_STORE, 250e21b551cSPhilippe Mathieu-Daudé fsc); 251e21b551cSPhilippe Mathieu-Daudé if (access_type == MMU_DATA_STORE 252e21b551cSPhilippe Mathieu-Daudé && arm_feature(env, ARM_FEATURE_V6)) { 253e21b551cSPhilippe Mathieu-Daudé fsr |= (1 << 11); 254e21b551cSPhilippe Mathieu-Daudé } 255e21b551cSPhilippe Mathieu-Daudé exc = EXCP_DATA_ABORT; 256e21b551cSPhilippe Mathieu-Daudé } 257e21b551cSPhilippe Mathieu-Daudé 25811b76fdaSRichard Henderson do_raise: 259e21b551cSPhilippe Mathieu-Daudé env->exception.vaddress = addr; 260e21b551cSPhilippe Mathieu-Daudé env->exception.fsr = fsr; 261e21b551cSPhilippe Mathieu-Daudé raise_exception(env, exc, syn, target_el); 262e21b551cSPhilippe Mathieu-Daudé } 263e21b551cSPhilippe Mathieu-Daudé 264e21b551cSPhilippe Mathieu-Daudé /* Raise a data fault alignment exception for the specified virtual address */ 265e21b551cSPhilippe Mathieu-Daudé void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 266e21b551cSPhilippe Mathieu-Daudé MMUAccessType access_type, 267e21b551cSPhilippe Mathieu-Daudé int mmu_idx, uintptr_t retaddr) 268e21b551cSPhilippe Mathieu-Daudé { 269e21b551cSPhilippe Mathieu-Daudé ARMCPU *cpu = ARM_CPU(cs); 270e21b551cSPhilippe Mathieu-Daudé ARMMMUFaultInfo fi = {}; 271e21b551cSPhilippe Mathieu-Daudé 272e21b551cSPhilippe Mathieu-Daudé /* now we have a real cpu fault */ 2733d419a4dSRichard Henderson cpu_restore_state(cs, retaddr); 274e21b551cSPhilippe Mathieu-Daudé 275e21b551cSPhilippe Mathieu-Daudé fi.type = ARMFault_Alignment; 276e21b551cSPhilippe Mathieu-Daudé arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); 277e21b551cSPhilippe Mathieu-Daudé } 278e21b551cSPhilippe Mathieu-Daudé 279*a0307ea3SPierrick Bouvier void helper_exception_pc_alignment(CPUARMState *env, vaddr pc) 280ee03027aSRichard Henderson { 281ee03027aSRichard Henderson ARMMMUFaultInfo fi = { .type = ARMFault_Alignment }; 282ee03027aSRichard Henderson int target_el = exception_target_el(env); 283b7770d72SRichard Henderson int mmu_idx = arm_env_mmu_index(env); 284ee03027aSRichard Henderson uint32_t fsc; 285ee03027aSRichard Henderson 286ee03027aSRichard Henderson env->exception.vaddress = pc; 287ee03027aSRichard Henderson 288ee03027aSRichard Henderson /* 289ee03027aSRichard Henderson * Note that the fsc is not applicable to this exception, 290ee03027aSRichard Henderson * since any syndrome is pcalignment not insn_abort. 291ee03027aSRichard Henderson */ 292ee03027aSRichard Henderson env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc); 293ee03027aSRichard Henderson raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el); 294ee03027aSRichard Henderson } 295ee03027aSRichard Henderson 2960d1762e9SRichard Henderson #if !defined(CONFIG_USER_ONLY) 2970d1762e9SRichard Henderson 298e21b551cSPhilippe Mathieu-Daudé /* 299e21b551cSPhilippe Mathieu-Daudé * arm_cpu_do_transaction_failed: handle a memory system error response 300e21b551cSPhilippe Mathieu-Daudé * (eg "no device/memory present at address") by raising an external abort 301e21b551cSPhilippe Mathieu-Daudé * exception 302e21b551cSPhilippe Mathieu-Daudé */ 303e21b551cSPhilippe Mathieu-Daudé void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 304e21b551cSPhilippe Mathieu-Daudé vaddr addr, unsigned size, 305e21b551cSPhilippe Mathieu-Daudé MMUAccessType access_type, 306e21b551cSPhilippe Mathieu-Daudé int mmu_idx, MemTxAttrs attrs, 307e21b551cSPhilippe Mathieu-Daudé MemTxResult response, uintptr_t retaddr) 308e21b551cSPhilippe Mathieu-Daudé { 309e21b551cSPhilippe Mathieu-Daudé ARMCPU *cpu = ARM_CPU(cs); 310e21b551cSPhilippe Mathieu-Daudé ARMMMUFaultInfo fi = {}; 311e21b551cSPhilippe Mathieu-Daudé 312e21b551cSPhilippe Mathieu-Daudé /* now we have a real cpu fault */ 3133d419a4dSRichard Henderson cpu_restore_state(cs, retaddr); 314e21b551cSPhilippe Mathieu-Daudé 315e21b551cSPhilippe Mathieu-Daudé fi.ea = arm_extabort_type(response); 316e21b551cSPhilippe Mathieu-Daudé fi.type = ARMFault_SyncExternal; 317e21b551cSPhilippe Mathieu-Daudé arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); 318e21b551cSPhilippe Mathieu-Daudé } 319e21b551cSPhilippe Mathieu-Daudé 3201ba3cb88SRichard Henderson bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address, 321e21b551cSPhilippe Mathieu-Daudé MMUAccessType access_type, int mmu_idx, 3221ba3cb88SRichard Henderson MemOp memop, int size, bool probe, uintptr_t ra) 323e21b551cSPhilippe Mathieu-Daudé { 324e21b551cSPhilippe Mathieu-Daudé ARMCPU *cpu = ARM_CPU(cs); 325de05a709SRichard Henderson GetPhysAddrResult res = {}; 326f3639a64SRichard Henderson ARMMMUFaultInfo local_fi, *fi; 327e21b551cSPhilippe Mathieu-Daudé 328e21b551cSPhilippe Mathieu-Daudé /* 329f3639a64SRichard Henderson * Allow S1_ptw_translate to see any fault generated here. 330f3639a64SRichard Henderson * Since this may recurse, read and clear. 331f3639a64SRichard Henderson */ 332f3639a64SRichard Henderson fi = cpu->env.tlb_fi; 333f3639a64SRichard Henderson if (fi) { 334f3639a64SRichard Henderson cpu->env.tlb_fi = NULL; 335f3639a64SRichard Henderson } else { 336f3639a64SRichard Henderson fi = memset(&local_fi, 0, sizeof(local_fi)); 337f3639a64SRichard Henderson } 338f3639a64SRichard Henderson 339f3639a64SRichard Henderson /* 3401ba3cb88SRichard Henderson * Per R_XCHFJ, alignment fault not due to memory type has 3411ba3cb88SRichard Henderson * highest precedence. Otherwise, walk the page table and 3421ba3cb88SRichard Henderson * and collect the page description. 343e21b551cSPhilippe Mathieu-Daudé */ 3441ba3cb88SRichard Henderson if (address & ((1 << memop_alignment_bits(memop)) - 1)) { 3451ba3cb88SRichard Henderson fi->type = ARMFault_Alignment; 3461ba3cb88SRichard Henderson } else if (!get_phys_addr(&cpu->env, address, access_type, memop, 347e21b551cSPhilippe Mathieu-Daudé core_to_arm_mmu_idx(&cpu->env, mmu_idx), 3481ba3cb88SRichard Henderson &res, fi)) { 349a81fef4bSAnton Johansson res.f.extra.arm.pte_attrs = res.cacheattrs.attrs; 350a81fef4bSAnton Johansson res.f.extra.arm.shareability = res.cacheattrs.shareability; 3511ba3cb88SRichard Henderson *out = res.f; 352e21b551cSPhilippe Mathieu-Daudé return true; 353e21b551cSPhilippe Mathieu-Daudé } 3541ba3cb88SRichard Henderson if (probe) { 3551ba3cb88SRichard Henderson return false; 3561ba3cb88SRichard Henderson } 3571ba3cb88SRichard Henderson 3581ba3cb88SRichard Henderson /* Now we have a real cpu fault. */ 3591ba3cb88SRichard Henderson cpu_restore_state(cs, ra); 3601ba3cb88SRichard Henderson arm_deliver_fault(cpu, address, access_type, mmu_idx, fi); 361e21b551cSPhilippe Mathieu-Daudé } 3629b12b6b4SRichard Henderson #else 3639b12b6b4SRichard Henderson void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr, 3649b12b6b4SRichard Henderson MMUAccessType access_type, 3659b12b6b4SRichard Henderson bool maperr, uintptr_t ra) 3669b12b6b4SRichard Henderson { 3679b12b6b4SRichard Henderson ARMMMUFaultInfo fi = { 3689b12b6b4SRichard Henderson .type = maperr ? ARMFault_Translation : ARMFault_Permission, 3699b12b6b4SRichard Henderson .level = 3, 3709b12b6b4SRichard Henderson }; 3719b12b6b4SRichard Henderson ARMCPU *cpu = ARM_CPU(cs); 3729b12b6b4SRichard Henderson 3739b12b6b4SRichard Henderson /* 3749b12b6b4SRichard Henderson * We report both ESR and FAR to signal handlers. 3759b12b6b4SRichard Henderson * For now, it's easiest to deliver the fault normally. 3769b12b6b4SRichard Henderson */ 3773d419a4dSRichard Henderson cpu_restore_state(cs, ra); 3789b12b6b4SRichard Henderson arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi); 3799b12b6b4SRichard Henderson } 38039a099caSRichard Henderson 38139a099caSRichard Henderson void arm_cpu_record_sigbus(CPUState *cs, vaddr addr, 38239a099caSRichard Henderson MMUAccessType access_type, uintptr_t ra) 38339a099caSRichard Henderson { 38439a099caSRichard Henderson arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra); 38539a099caSRichard Henderson } 3869b12b6b4SRichard Henderson #endif /* !defined(CONFIG_USER_ONLY) */ 387