Lines Matching +full:power +full:- +full:domain
6 * SPDX-License-Identifier: GPL-2.0-or-later
12 #include "qemu/main-loop.h"
13 #include "exec/page-protection.h"
15 #include "exec/tlb-flags.h"
19 #include "cpu-features.h"
45 * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
47 * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
104 for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) { in round_down_to_parange_index()
118 * The cpu-specific constant value of PAMax; also used by hw/arm/virt.
123 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { in arm_pamax()
125 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); in arm_pamax()
128 * id_aa64mmfr0 is a read-only register so values outside of the in arm_pamax()
135 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { in arm_pamax()
196 s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW); in ptw_idx_for_stage_2()
198 s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW); in ptw_idx_for_stage_2()
215 return env->cp15.vttbr_el2; in regime_ttbr()
218 return env->cp15.vsttbr_el2; in regime_ttbr()
221 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; in regime_ttbr()
223 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; in regime_ttbr()
235 switch (env->v7m.mpu_ctrl[is_secure] & in regime_translation_disabled()
313 uint64_t gpccr = env->cp15.gpccr_el3; in granule_protection_check()
335 if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) { in granule_protection_check()
344 case 0b00: /* non-shareable */ in granule_protection_check()
346 /* Inner and Outer non-cacheable requires Outer shareable. */ in granule_protection_check()
370 /* Note this field is read-only and fixed at reset. */ in granule_protection_check()
386 tableaddr = env->cp15.gptbr_el3 << 12; in granule_protection_check()
396 align = MAX(pps - l0gptsz + 3, 12); in granule_protection_check()
403 index = extract64(paddress, l0gptsz, pps - l0gptsz); in granule_protection_check()
419 align = MAX(l0gptsz - pgs - 1, 12); in granule_protection_check()
431 index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4); in granule_protection_check()
478 fi->gpcf = GPCF_Fail; in granule_protection_check()
481 fi->gpcf = GPCF_EABT; in granule_protection_check()
484 fi->gpcf = GPCF_AddressSize; in granule_protection_check()
487 fi->gpcf = GPCF_Walk; in granule_protection_check()
489 fi->level = level; in granule_protection_check()
490 fi->paddr = paddress; in granule_protection_check()
491 fi->paddr_space = pspace; in granule_protection_check()
498 * This slightly under-decodes the MAIR_ELx field: in S1_attrs_are_device()
537 * Root translations are always single-stage. in S2_security_space()
567 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; in S1_ptw_translate()
568 ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; in S1_ptw_translate()
571 ptw->out_virt = addr; in S1_ptw_translate()
573 if (unlikely(ptw->in_debug)) { in S1_ptw_translate()
578 ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); in S1_ptw_translate()
591 ptw->out_phys = s2.f.phys_addr; in S1_ptw_translate()
593 ptw->out_host = NULL; in S1_ptw_translate()
594 ptw->out_rw = false; in S1_ptw_translate()
595 ptw->out_space = s2.f.attrs.space; in S1_ptw_translate()
601 env->tlb_fi = fi; in S1_ptw_translate()
604 &ptw->out_host, &full); in S1_ptw_translate()
605 env->tlb_fi = NULL; in S1_ptw_translate()
610 ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); in S1_ptw_translate()
611 ptw->out_rw = full->prot & PAGE_WRITE; in S1_ptw_translate()
612 pte_attrs = full->extra.arm.pte_attrs; in S1_ptw_translate()
613 ptw->out_space = full->attrs.space; in S1_ptw_translate()
620 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); in S1_ptw_translate()
627 fi->type = ARMFault_Permission; in S1_ptw_translate()
628 fi->s2addr = addr; in S1_ptw_translate()
629 fi->stage2 = true; in S1_ptw_translate()
630 fi->s1ptw = true; in S1_ptw_translate()
631 fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); in S1_ptw_translate()
636 ptw->out_be = regime_translation_big_endian(env, mmu_idx); in S1_ptw_translate()
640 assert(fi->type != ARMFault_None); in S1_ptw_translate()
641 if (fi->type == ARMFault_GPCFOnOutput) { in S1_ptw_translate()
642 fi->type = ARMFault_GPCFOnWalk; in S1_ptw_translate()
644 fi->s2addr = addr; in S1_ptw_translate()
645 fi->stage2 = regime_is_stage2(s2_mmu_idx); in S1_ptw_translate()
646 fi->s1ptw = fi->stage2; in S1_ptw_translate()
647 fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); in S1_ptw_translate()
656 void *host = ptw->out_host; in arm_ldl_ptw()
662 if (ptw->out_be) { in arm_ldl_ptw()
670 .space = ptw->out_space, in arm_ldl_ptw()
671 .secure = arm_space_is_secure(ptw->out_space), in arm_ldl_ptw()
676 if (ptw->out_be) { in arm_ldl_ptw()
677 data = address_space_ldl_be(as, ptw->out_phys, attrs, &result); in arm_ldl_ptw()
679 data = address_space_ldl_le(as, ptw->out_phys, attrs, &result); in arm_ldl_ptw()
682 fi->type = ARMFault_SyncExternalOnWalk; in arm_ldl_ptw()
683 fi->ea = arm_extabort_type(result); in arm_ldl_ptw()
694 void *host = ptw->out_host; in arm_ldq_ptw()
701 if (ptw->out_be) { in arm_ldq_ptw()
707 if (ptw->out_be) { in arm_ldq_ptw()
716 .space = ptw->out_space, in arm_ldq_ptw()
717 .secure = arm_space_is_secure(ptw->out_space), in arm_ldq_ptw()
722 if (ptw->out_be) { in arm_ldq_ptw()
723 data = address_space_ldq_be(as, ptw->out_phys, attrs, &result); in arm_ldq_ptw()
725 data = address_space_ldq_le(as, ptw->out_phys, attrs, &result); in arm_ldq_ptw()
728 fi->type = ARMFault_SyncExternalOnWalk; in arm_ldq_ptw()
729 fi->ea = arm_extabort_type(result); in arm_ldq_ptw()
742 void *host = ptw->out_host; in arm_casq_ptw()
748 .space = ptw->out_space, in arm_casq_ptw()
749 .secure = arm_space_is_secure(ptw->out_space), in arm_casq_ptw()
758 if (ptw->out_be) { in arm_casq_ptw()
759 cur_val = address_space_ldq_be(as, ptw->out_phys, attrs, &result); in arm_casq_ptw()
761 fi->type = ARMFault_SyncExternalOnWalk; in arm_casq_ptw()
762 fi->ea = arm_extabort_type(result); in arm_casq_ptw()
769 address_space_stq_be(as, ptw->out_phys, new_val, attrs, &result); in arm_casq_ptw()
771 fi->type = ARMFault_SyncExternalOnWalk; in arm_casq_ptw()
772 fi->ea = arm_extabort_type(result); in arm_casq_ptw()
781 cur_val = address_space_ldq_le(as, ptw->out_phys, attrs, &result); in arm_casq_ptw()
783 fi->type = ARMFault_SyncExternalOnWalk; in arm_casq_ptw()
784 fi->ea = arm_extabort_type(result); in arm_casq_ptw()
791 address_space_stq_le(as, ptw->out_phys, new_val, attrs, &result); in arm_casq_ptw()
793 fi->type = ARMFault_SyncExternalOnWalk; in arm_casq_ptw()
794 fi->ea = arm_extabort_type(result); in arm_casq_ptw()
810 * Raising a stage2 Protection fault for an atomic update to a read-only in arm_casq_ptw()
813 if (unlikely(!ptw->out_rw)) { in arm_casq_ptw()
816 env->tlb_fi = fi; in arm_casq_ptw()
817 flags = probe_access_full_mmu(env, ptw->out_virt, 0, in arm_casq_ptw()
819 arm_to_core_mmu_idx(ptw->in_ptw_idx), in arm_casq_ptw()
821 env->tlb_fi = NULL; in arm_casq_ptw()
830 assert(fi->type != ARMFault_None); in arm_casq_ptw()
831 fi->s2addr = ptw->out_virt; in arm_casq_ptw()
832 fi->stage2 = true; in arm_casq_ptw()
833 fi->s1ptw = true; in arm_casq_ptw()
834 fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx); in arm_casq_ptw()
839 ptw->out_rw = true; in arm_casq_ptw()
842 if (ptw->out_be) { in arm_casq_ptw()
855 /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */ in arm_casq_ptw()
891 * @ap: The 3-bit access permissions (AP[2:0])
892 * @domain_prot: The 2-bit domain access permissions
945 * @ap: The 3-bit access permissions (AP[2:0])
946 * @domain_prot: The 2-bit domain access permissions
957 * @ap: The 2-bit simple AP (AP[2:1])
990 int domain = 0; in get_phys_addr_v5() local
997 if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) { in get_phys_addr_v5()
999 fi->type = ARMFault_Translation; in get_phys_addr_v5()
1006 if (fi->type != ARMFault_None) { in get_phys_addr_v5()
1010 domain = (desc >> 5) & 0x0f; in get_phys_addr_v5()
1011 if (regime_el(env, ptw->in_mmu_idx) == 1) { in get_phys_addr_v5()
1012 dacr = env->cp15.dacr_ns; in get_phys_addr_v5()
1014 dacr = env->cp15.dacr_s; in get_phys_addr_v5()
1016 domain_prot = (dacr >> (domain * 2)) & 3; in get_phys_addr_v5()
1019 fi->type = ARMFault_Translation; in get_phys_addr_v5()
1026 fi->type = ARMFault_Domain; in get_phys_addr_v5()
1033 result->f.lg_page_size = 20; /* 1MB */ in get_phys_addr_v5()
1047 if (fi->type != ARMFault_None) { in get_phys_addr_v5()
1052 fi->type = ARMFault_Translation; in get_phys_addr_v5()
1057 result->f.lg_page_size = 16; in get_phys_addr_v5()
1062 result->f.lg_page_size = 12; in get_phys_addr_v5()
1070 result->f.lg_page_size = 12; in get_phys_addr_v5()
1076 fi->type = ARMFault_Translation; in get_phys_addr_v5()
1081 result->f.lg_page_size = 10; in get_phys_addr_v5()
1090 result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot); in get_phys_addr_v5()
1091 result->f.prot |= result->f.prot ? PAGE_EXEC : 0; in get_phys_addr_v5()
1092 if (!(result->f.prot & (1 << access_type))) { in get_phys_addr_v5()
1094 fi->type = ARMFault_Permission; in get_phys_addr_v5()
1097 result->f.phys_addr = phys_addr; in get_phys_addr_v5()
1100 fi->domain = domain; in get_phys_addr_v5()
1101 fi->level = level; in get_phys_addr_v5()
1110 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; in get_phys_addr_v6()
1118 int domain = 0; in get_phys_addr_v6() local
1129 fi->type = ARMFault_Translation; in get_phys_addr_v6()
1136 if (fi->type != ARMFault_None) { in get_phys_addr_v6()
1144 fi->type = ARMFault_Translation; in get_phys_addr_v6()
1149 domain = (desc >> 5) & 0x0f; in get_phys_addr_v6()
1152 dacr = env->cp15.dacr_ns; in get_phys_addr_v6()
1154 dacr = env->cp15.dacr_s; in get_phys_addr_v6()
1159 domain_prot = (dacr >> (domain * 2)) & 3; in get_phys_addr_v6()
1161 /* Section or Page domain fault */ in get_phys_addr_v6()
1162 fi->type = ARMFault_Domain; in get_phys_addr_v6()
1171 result->f.lg_page_size = 24; /* 16MB */ in get_phys_addr_v6()
1175 result->f.lg_page_size = 20; /* 1MB */ in get_phys_addr_v6()
1192 if (fi->type != ARMFault_None) { in get_phys_addr_v6()
1198 fi->type = ARMFault_Translation; in get_phys_addr_v6()
1203 result->f.lg_page_size = 16; in get_phys_addr_v6()
1208 result->f.lg_page_size = 12; in get_phys_addr_v6()
1215 out_space = ptw->in_space; in get_phys_addr_v6()
1219 * the CPU doesn't support TZ or this is a non-secure translation in get_phys_addr_v6()
1220 * regime, because the output space will already be non-secure. in get_phys_addr_v6()
1225 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; in get_phys_addr_v6()
1234 fi->type = ARMFault_AccessFlag; in get_phys_addr_v6()
1244 result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw, in get_phys_addr_v6()
1245 xn, pxn, result->f.attrs.space, out_space); in get_phys_addr_v6()
1246 if (!(result->f.prot & (1 << access_type))) { in get_phys_addr_v6()
1248 fi->type = ARMFault_Permission; in get_phys_addr_v6()
1252 result->f.attrs.space = out_space; in get_phys_addr_v6()
1253 result->f.attrs.secure = arm_space_is_secure(out_space); in get_phys_addr_v6()
1254 result->f.phys_addr = phys_addr; in get_phys_addr_v6()
1257 fi->domain = domain; in get_phys_addr_v6()
1258 fi->level = level; in get_phys_addr_v6()
1265 * @s2ap: The 2-bit stage2 access permissions (S2AP)
1266 * @xn: XN (execute-never) bits
1323 * @xn: XN (execute-never) bit
1324 * @pxn: PXN (privileged execute-never) bit
1363 * R_ZWRVD: permission fault for insn fetched from non-Root, in get_S1prot()
1369 * R_PKTDS: permission fault for insn fetched from non-Realm, in get_S1prot()
1384 if (env->cp15.scr_el3 & SCR_SIF) { in get_S1prot()
1453 * If the sign-extend bit is not the same as t0sz[3], the result in aa32_va_parameters()
1508 * @stride: Page-table stride (See the ARM ARM)
1533 startlevel = -1; in check_s2_mmu_setup()
1535 startlevel = 2 - sl0; in check_s2_mmu_setup()
1564 startlevel = 3 - sl0; in check_s2_mmu_setup()
1576 startlevel = 3 - sl0; in check_s2_mmu_setup()
1591 startlevel = 2 - sl0; in check_s2_mmu_setup()
1595 levels = 3 - startlevel; in check_s2_mmu_setup()
1599 s1_max_iasize = s1_min_iasize + (stride - 1) + 4; in check_s2_mmu_setup()
1630 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); in nv_nv1_enabled()
1640 * of a long-format DFSR/IFSR fault register, with the following caveat:
1657 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; in get_phys_addr_lpae()
1698 addrsize = 64 - 8 * param.tbi; in get_phys_addr_lpae()
1699 inputsize = 64 - param.tsz; in get_phys_addr_lpae()
1703 * ID_AA64MMFR0 is a read-only register so values outside of the in get_phys_addr_lpae()
1706 ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); in get_phys_addr_lpae()
1722 inputsize = addrsize - param.tsz; in get_phys_addr_lpae()
1737 addrsize - inputsize); in get_phys_addr_lpae()
1738 if (-top_bits != param.select) { in get_phys_addr_lpae()
1744 stride = arm_granule_bits(param.gran) - 3; in get_phys_addr_lpae()
1751 * implement any ASID-like capability so we can ignore it (instead in get_phys_addr_lpae()
1764 * Note: This is always 0 on 64-bit EL2 and EL3. in get_phys_addr_lpae()
1775 * level = 4 - RoundUp((inputsize - grainsize) / stride) in get_phys_addr_lpae()
1778 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: in get_phys_addr_lpae()
1779 * = 4 - (inputsize - stride - 3 + stride - 1) / stride in get_phys_addr_lpae()
1780 * = 4 - (inputsize - 4) / stride; in get_phys_addr_lpae()
1782 level = 4 - (inputsize - 4) / stride; in get_phys_addr_lpae()
1794 indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level))); in get_phys_addr_lpae()
1810 fi->type = ARMFault_AddressSize; in get_phys_addr_lpae()
1816 * and also to mask out CnP (bit 0) which could validly be non-zero. in get_phys_addr_lpae()
1826 * the highest bits of a 52-bit output are placed elsewhere. in get_phys_addr_lpae()
1839 descaddr |= (address >> (stride * (4 - level))) & indexmask; in get_phys_addr_lpae()
1848 if (ptw->in_space == ARMSS_Secure in get_phys_addr_lpae()
1852 * Stage2_S -> Stage2 or Phys_S -> Phys_NS in get_phys_addr_lpae()
1853 * Assert the relative order of the secure/non-secure indexes. in get_phys_addr_lpae()
1857 ptw->in_ptw_idx += 1; in get_phys_addr_lpae()
1858 ptw->in_space = ARMSS_NonSecure; in get_phys_addr_lpae()
1865 if (fi->type != ARMFault_None) { in get_phys_addr_lpae()
1893 fi->type = ARMFault_AddressSize; in get_phys_addr_lpae()
1921 page_size = (1ULL << ((stride * (4 - level)) + 3)); in get_phys_addr_lpae()
1922 descaddr &= ~(hwaddr)(page_size - 1); in get_phys_addr_lpae()
1923 descaddr |= (address & (page_size - 1)); in get_phys_addr_lpae()
1925 if (likely(!ptw->in_debug)) { in get_phys_addr_lpae()
1935 fi->type = ARMFault_AccessFlag; in get_phys_addr_lpae()
1942 * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP in get_phys_addr_lpae()
1977 out_space = ptw->in_space; in get_phys_addr_lpae()
1982 * R_YMCSL: Executing an insn fetched from non-Realm causes in get_phys_addr_lpae()
1987 result->f.prot = get_S2prot_noexecute(ap); in get_phys_addr_lpae()
1990 result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); in get_phys_addr_lpae()
1993 result->cacheattrs.is_s2_format = true; in get_phys_addr_lpae()
1994 result->cacheattrs.attrs = extract32(attrs, 2, 4); in get_phys_addr_lpae()
2000 result->cacheattrs.attrs); in get_phys_addr_lpae()
2038 * NS changes the output to non-secure space. in get_phys_addr_lpae()
2076 * Note that we modified ptw->in_space earlier for NSTable, but in get_phys_addr_lpae()
2077 * result->f.attrs retains a copy of the original security space. in get_phys_addr_lpae()
2079 result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw, in get_phys_addr_lpae()
2080 xn, pxn, result->f.attrs.space, out_space); in get_phys_addr_lpae()
2084 mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; in get_phys_addr_lpae()
2086 result->cacheattrs.is_s2_format = false; in get_phys_addr_lpae()
2087 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); in get_phys_addr_lpae()
2091 result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ in get_phys_addr_lpae()
2093 device = S1_attrs_are_device(result->cacheattrs.attrs); in get_phys_addr_lpae()
2101 * - Alignment fault caused by the memory type in get_phys_addr_lpae()
2102 * - Permission fault in get_phys_addr_lpae()
2103 * - A stage 2 fault on the memory access in get_phys_addr_lpae()
2107 * non-device path so that tlb_fill_flags is consistent in the in get_phys_addr_lpae()
2117 if (address & ((1 << a_bits) - 1)) { in get_phys_addr_lpae()
2118 fi->type = ARMFault_Alignment; in get_phys_addr_lpae()
2121 result->f.tlb_fill_flags = TLB_CHECK_ALIGNED; in get_phys_addr_lpae()
2123 result->f.tlb_fill_flags = 0; in get_phys_addr_lpae()
2126 if (!(result->f.prot & (1 << access_type))) { in get_phys_addr_lpae()
2127 fi->type = ARMFault_Permission; in get_phys_addr_lpae()
2134 if (fi->type != ARMFault_None) { in get_phys_addr_lpae()
2138 * I_YZSVV says that if the in-memory descriptor has changed, in get_phys_addr_lpae()
2150 result->f.attrs.space = out_space; in get_phys_addr_lpae()
2151 result->f.attrs.secure = arm_space_is_secure(out_space); in get_phys_addr_lpae()
2155 * was re-purposed for output address bits. The SH attribute in in get_phys_addr_lpae()
2159 result->cacheattrs.shareability = param.sh; in get_phys_addr_lpae()
2161 result->cacheattrs.shareability = extract32(attrs, 8, 2); in get_phys_addr_lpae()
2164 result->f.phys_addr = descaddr; in get_phys_addr_lpae()
2165 result->f.lg_page_size = ctz64(page_size); in get_phys_addr_lpae()
2169 fi->type = ARMFault_Translation; in get_phys_addr_lpae()
2171 if (fi->s1ptw) { in get_phys_addr_lpae()
2172 /* Retain the existing stage 2 fi->level */ in get_phys_addr_lpae()
2173 assert(fi->stage2); in get_phys_addr_lpae()
2175 fi->level = level; in get_phys_addr_lpae()
2176 fi->stage2 = regime_is_stage2(mmu_idx); in get_phys_addr_lpae()
2178 fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx); in get_phys_addr_lpae()
2192 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; in get_phys_addr_pmsav5()
2195 if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { in get_phys_addr_pmsav5()
2197 result->f.phys_addr = address; in get_phys_addr_pmsav5()
2198 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; in get_phys_addr_pmsav5()
2202 result->f.phys_addr = address; in get_phys_addr_pmsav5()
2203 for (n = 7; n >= 0; n--) { in get_phys_addr_pmsav5()
2204 base = env->cp15.c6_region[n]; in get_phys_addr_pmsav5()
2211 mask = (mask << 1) - 1; in get_phys_addr_pmsav5()
2217 fi->type = ARMFault_Background; in get_phys_addr_pmsav5()
2222 mask = env->cp15.pmsav5_insn_ap; in get_phys_addr_pmsav5()
2224 mask = env->cp15.pmsav5_data_ap; in get_phys_addr_pmsav5()
2229 fi->type = ARMFault_Permission; in get_phys_addr_pmsav5()
2230 fi->level = 1; in get_phys_addr_pmsav5()
2234 fi->type = ARMFault_Permission; in get_phys_addr_pmsav5()
2235 fi->level = 1; in get_phys_addr_pmsav5()
2238 result->f.prot = PAGE_READ | PAGE_WRITE; in get_phys_addr_pmsav5()
2241 result->f.prot = PAGE_READ; in get_phys_addr_pmsav5()
2243 result->f.prot |= PAGE_WRITE; in get_phys_addr_pmsav5()
2247 result->f.prot = PAGE_READ | PAGE_WRITE; in get_phys_addr_pmsav5()
2251 fi->type = ARMFault_Permission; in get_phys_addr_pmsav5()
2252 fi->level = 1; in get_phys_addr_pmsav5()
2255 result->f.prot = PAGE_READ; in get_phys_addr_pmsav5()
2258 result->f.prot = PAGE_READ; in get_phys_addr_pmsav5()
2262 fi->type = ARMFault_Permission; in get_phys_addr_pmsav5()
2263 fi->level = 1; in get_phys_addr_pmsav5()
2266 result->f.prot |= PAGE_EXEC; in get_phys_addr_pmsav5()
2288 * The architecture specifies which regions are execute-never; in get_phys_addr_pmsav7_default()
2312 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ in m_is_ppb_region()
2321 * 0xe0000000 - 0xffffffff in m_is_system_region()
2333 CPUARMState *env = &cpu->env; in pmsav7_use_background_region()
2340 return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; in pmsav7_use_background_region()
2359 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; in get_phys_addr_pmsav7()
2361 bool secure = arm_space_is_secure(ptw->in_space); in get_phys_addr_pmsav7()
2363 result->f.phys_addr = address; in get_phys_addr_pmsav7()
2364 result->f.lg_page_size = TARGET_PAGE_BITS; in get_phys_addr_pmsav7()
2365 result->f.prot = 0; in get_phys_addr_pmsav7()
2367 if (regime_translation_disabled(env, mmu_idx, ptw->in_space) || in get_phys_addr_pmsav7()
2377 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); in get_phys_addr_pmsav7()
2379 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { in get_phys_addr_pmsav7()
2381 uint32_t base = env->pmsav7.drbar[n]; in get_phys_addr_pmsav7()
2382 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); in get_phys_addr_pmsav7()
2386 if (!(env->pmsav7.drsr[n] & 0x1)) { in get_phys_addr_pmsav7()
2396 rmask = (1ull << rsize) - 1; in get_phys_addr_pmsav7()
2419 result->f.lg_page_size = 0; in get_phys_addr_pmsav7()
2430 rsize -= 3; /* sub region size (power of 2) */ in get_phys_addr_pmsav7()
2431 snd = ((address - base) >> rsize) & 0x7; in get_phys_addr_pmsav7()
2432 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); in get_phys_addr_pmsav7()
2443 int snd_rounded = snd & ~(i - 1); in get_phys_addr_pmsav7()
2444 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], in get_phys_addr_pmsav7()
2457 result->f.lg_page_size = rsize; in get_phys_addr_pmsav7()
2462 if (n == -1) { /* no hits */ in get_phys_addr_pmsav7()
2465 fi->type = ARMFault_Background; in get_phys_addr_pmsav7()
2469 &result->f.prot); in get_phys_addr_pmsav7()
2471 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); in get_phys_addr_pmsav7()
2472 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); in get_phys_addr_pmsav7()
2486 result->f.prot |= PAGE_WRITE; in get_phys_addr_pmsav7()
2490 result->f.prot |= PAGE_READ | PAGE_EXEC; in get_phys_addr_pmsav7()
2495 result->f.prot |= PAGE_READ | PAGE_EXEC; in get_phys_addr_pmsav7()
2511 result->f.prot |= PAGE_WRITE; in get_phys_addr_pmsav7()
2515 result->f.prot |= PAGE_READ | PAGE_EXEC; in get_phys_addr_pmsav7()
2520 result->f.prot |= PAGE_READ | PAGE_EXEC; in get_phys_addr_pmsav7()
2533 result->f.prot &= ~PAGE_EXEC; in get_phys_addr_pmsav7()
2538 fi->type = ARMFault_Permission; in get_phys_addr_pmsav7()
2539 fi->level = 1; in get_phys_addr_pmsav7()
2540 return !(result->f.prot & (1 << access_type)); in get_phys_addr_pmsav7()
2547 return env->pmsav8.hprbar; in regime_rbar()
2549 return env->pmsav8.rbar[secure]; in regime_rbar()
2557 return env->pmsav8.hprlar; in regime_rlar()
2559 return env->pmsav8.rlar[secure]; in regime_rlar()
2570 * that a full phys-to-virt translation does). in pmsav8_mpu_lookup()
2572 * or -1 if no region number is returned (MPU off, address did not in pmsav8_mpu_lookup()
2581 int matchregion = -1; in pmsav8_mpu_lookup()
2584 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); in pmsav8_mpu_lookup()
2588 region_counter = cpu->pmsav8r_hdregion; in pmsav8_mpu_lookup()
2590 region_counter = cpu->pmsav7_dregion; in pmsav8_mpu_lookup()
2593 result->f.lg_page_size = TARGET_PAGE_BITS; in pmsav8_mpu_lookup()
2594 result->f.phys_addr = address; in pmsav8_mpu_lookup()
2595 result->f.prot = 0; in pmsav8_mpu_lookup()
2597 *mregion = -1; in pmsav8_mpu_lookup()
2601 fi->stage2 = true; in pmsav8_mpu_lookup()
2626 fi->level = 0; in pmsav8_mpu_lookup()
2629 for (n = region_counter - 1; n >= 0; n--) { in pmsav8_mpu_lookup()
2633 * with bits [x-1:0] all zeroes, but the limit address is bits in pmsav8_mpu_lookup()
2635 * 5 for Cortex-M and 6 for Cortex-R in pmsav8_mpu_lookup()
2656 ranges_overlap(base, limit - base + 1, in pmsav8_mpu_lookup()
2659 result->f.lg_page_size = 0; in pmsav8_mpu_lookup()
2665 result->f.lg_page_size = 0; in pmsav8_mpu_lookup()
2668 if (matchregion != -1) { in pmsav8_mpu_lookup()
2670 * Multiple regions match -- always a failure (unlike in pmsav8_mpu_lookup()
2671 * PMSAv7 where highest-numbered-region wins) in pmsav8_mpu_lookup()
2673 fi->type = ARMFault_Permission; in pmsav8_mpu_lookup()
2675 fi->level = 1; in pmsav8_mpu_lookup()
2687 fi->type = ARMFault_Background; in pmsav8_mpu_lookup()
2689 fi->type = ARMFault_Permission; in pmsav8_mpu_lookup()
2694 if (matchregion == -1) { in pmsav8_mpu_lookup()
2696 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); in pmsav8_mpu_lookup()
2714 result->f.prot = simple_ap_to_rw_prot_is_user(ap, in pmsav8_mpu_lookup()
2717 result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap); in pmsav8_mpu_lookup()
2722 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; in pmsav8_mpu_lookup()
2726 result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) { in pmsav8_mpu_lookup()
2735 result->cacheattrs.is_s2_format = false; in pmsav8_mpu_lookup()
2736 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); in pmsav8_mpu_lookup()
2737 result->cacheattrs.shareability = sh; in pmsav8_mpu_lookup()
2740 if (result->f.prot && !xn && !(pxn && !is_user)) { in pmsav8_mpu_lookup()
2741 result->f.prot |= PAGE_EXEC; in pmsav8_mpu_lookup()
2749 fi->type = ARMFault_Permission; in pmsav8_mpu_lookup()
2751 fi->level = 1; in pmsav8_mpu_lookup()
2753 return !(result->f.prot & (1 << access_type)); in pmsav8_mpu_lookup()
2779 * We assume the caller has zero-initialized *sattrs. in v8m_security_lookup()
2786 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); in v8m_security_lookup()
2788 if (cpu->idau) { in v8m_security_lookup()
2789 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); in v8m_security_lookup()
2790 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); in v8m_security_lookup()
2792 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, in v8m_security_lookup()
2802 sattrs->ns = !is_secure; in v8m_security_lookup()
2807 sattrs->irvalid = true; in v8m_security_lookup()
2808 sattrs->iregion = idau_region; in v8m_security_lookup()
2811 switch (env->sau.ctrl & 3) { in v8m_security_lookup()
2815 sattrs->ns = true; in v8m_security_lookup()
2818 for (r = 0; r < cpu->sau_sregion; r++) { in v8m_security_lookup()
2819 if (env->sau.rlar[r] & 1) { in v8m_security_lookup()
2820 uint32_t base = env->sau.rbar[r] & ~0x1f; in v8m_security_lookup()
2821 uint32_t limit = env->sau.rlar[r] | 0x1f; in v8m_security_lookup()
2825 sattrs->subpage = true; in v8m_security_lookup()
2827 if (sattrs->srvalid) { in v8m_security_lookup()
2830 * as Secure, not NS-Callable, with no valid region in v8m_security_lookup()
2833 sattrs->ns = false; in v8m_security_lookup()
2834 sattrs->nsc = false; in v8m_security_lookup()
2835 sattrs->sregion = 0; in v8m_security_lookup()
2836 sattrs->srvalid = false; in v8m_security_lookup()
2839 if (env->sau.rlar[r] & 2) { in v8m_security_lookup()
2840 sattrs->nsc = true; in v8m_security_lookup()
2842 sattrs->ns = true; in v8m_security_lookup()
2844 sattrs->srvalid = true; in v8m_security_lookup()
2845 sattrs->sregion = r; in v8m_security_lookup()
2858 ranges_overlap(base, limit - base + 1, in v8m_security_lookup()
2861 sattrs->subpage = true; in v8m_security_lookup()
2874 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { in v8m_security_lookup()
2875 sattrs->ns = false; in v8m_security_lookup()
2876 sattrs->nsc = idau_nsc; in v8m_security_lookup()
2889 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; in get_phys_addr_pmsav8()
2890 bool secure = arm_space_is_secure(ptw->in_space); in get_phys_addr_pmsav8()
2917 fi->type = ARMFault_QEMU_NSCExec; in get_phys_addr_pmsav8()
2919 fi->type = ARMFault_QEMU_SFault; in get_phys_addr_pmsav8()
2921 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; in get_phys_addr_pmsav8()
2922 result->f.phys_addr = address; in get_phys_addr_pmsav8()
2923 result->f.prot = 0; in get_phys_addr_pmsav8()
2933 result->f.attrs.secure = false; in get_phys_addr_pmsav8()
2934 result->f.attrs.space = ARMSS_NonSecure; in get_phys_addr_pmsav8()
2946 fi->type = ARMFault_QEMU_SFault; in get_phys_addr_pmsav8()
2947 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; in get_phys_addr_pmsav8()
2948 result->f.phys_addr = address; in get_phys_addr_pmsav8()
2949 result->f.prot = 0; in get_phys_addr_pmsav8()
2958 result->f.lg_page_size = 0; in get_phys_addr_pmsav8()
2964 * Translate from the 4-bit stage 2 representation of
2965 * memory attributes (without cache-allocation hints) to
2966 * the 8-bit representation of the stage 1 MAIR registers
2980 hiattr = loattr = 1; /* non-cacheable */ in convert_stage2_attrs()
2982 if (hiattr != 1) { /* Write-through or write-back */ in convert_stage2_attrs()
2985 if (loattr != 1) { /* Write-through or write-back */ in convert_stage2_attrs()
2996 * memory, according to table D4-42 and pseudocode procedure
3005 /* non-cacheable has precedence */ in combine_cacheattr_nibble()
3008 /* stage 1 write-through takes precedence */ in combine_cacheattr_nibble()
3011 /* stage 2 write-through takes precedence, but the allocation hint in combine_cacheattr_nibble()
3015 } else { /* write-back */ in combine_cacheattr_nibble()
3048 /* non-Reordering has precedence over Reordering */ in combined_attrs_nofwb()
3051 /* non-Gathering has precedence over Gathering */ in combined_attrs_nofwb()
3068 * in MAIR format, return a value specifying Normal Write-Back, in force_cacheattr_nibble_wb()
3075 * 4 == Non-cacheable in force_cacheattr_nibble_wb()
3076 * Either way, force Write-Back RW allocate non-transient in force_cacheattr_nibble_wb()
3099 * Force Normal Write-Back. Note that if S1 is Normal cacheable in combined_attrs_fwb()
3101 * RW allocate, non-transient. in combined_attrs_fwb()
3111 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */ in combined_attrs_fwb()
3150 /* Combine shareability attributes (table D4-43) */ in combine_cacheattrs()
3152 /* if either are outer-shareable, the result is outer-shareable */ in combine_cacheattrs()
3155 /* if either are inner-shareable, the result is inner-shareable */ in combine_cacheattrs()
3158 /* both non-shareable */ in combine_cacheattrs()
3173 * Inner Non-cacheable, Outer Non-cacheable is always treated in combine_cacheattrs()
3191 * still checked for bounds -- see AArch64.S1DisabledOutput().
3200 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; in get_phys_addr_disabled()
3202 uint8_t shareability = 0; /* non-shareable */ in get_phys_addr_disabled()
3218 uint64_t tcr = env->cp15.tcr_el[r_el]; in get_phys_addr_disabled()
3228 if (extract64(address, pamax, addrtop - pamax + 1) != 0) { in get_phys_addr_disabled()
3229 fi->type = ARMFault_AddressSize; in get_phys_addr_disabled()
3230 fi->level = 0; in get_phys_addr_disabled()
3231 fi->stage2 = false; in get_phys_addr_disabled()
3244 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ in get_phys_addr_disabled()
3246 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); in get_phys_addr_disabled()
3265 result->cacheattrs.is_s2_format = false; in get_phys_addr_disabled()
3269 result->f.phys_addr = address; in get_phys_addr_disabled()
3270 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; in get_phys_addr_disabled()
3271 result->f.lg_page_size = TARGET_PAGE_BITS; in get_phys_addr_disabled()
3272 result->cacheattrs.shareability = shareability; in get_phys_addr_disabled()
3273 result->cacheattrs.attrs = memattr; in get_phys_addr_disabled()
3285 ARMSecuritySpace in_space = ptw->in_space; in get_phys_addr_twostage()
3299 ipa = result->f.phys_addr; in get_phys_addr_twostage()
3300 ipa_secure = result->f.attrs.secure; in get_phys_addr_twostage()
3301 ipa_space = result->f.attrs.space; in get_phys_addr_twostage()
3303 ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0; in get_phys_addr_twostage()
3304 ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; in get_phys_addr_twostage()
3305 ptw->in_space = ipa_space; in get_phys_addr_twostage()
3306 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx); in get_phys_addr_twostage()
3312 s1_prot = result->f.prot; in get_phys_addr_twostage()
3313 s1_lgpgsz = result->f.lg_page_size; in get_phys_addr_twostage()
3314 s1_guarded = result->f.extra.arm.guarded; in get_phys_addr_twostage()
3315 cacheattrs1 = result->cacheattrs; in get_phys_addr_twostage()
3320 fi->s2addr = ipa; in get_phys_addr_twostage()
3323 result->f.prot &= s1_prot; in get_phys_addr_twostage()
3341 if (result->f.lg_page_size < TARGET_PAGE_BITS || in get_phys_addr_twostage()
3343 result->f.lg_page_size = 0; in get_phys_addr_twostage()
3344 } else if (result->f.lg_page_size < s1_lgpgsz) { in get_phys_addr_twostage()
3345 result->f.lg_page_size = s1_lgpgsz; in get_phys_addr_twostage()
3353 * Normal Non-Shareable, in get_phys_addr_twostage()
3354 * Inner Write-Back Read-Allocate Write-Allocate, in get_phys_addr_twostage()
3355 * Outer Write-Back Read-Allocate Write-Allocate. in get_phys_addr_twostage()
3363 result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1, in get_phys_addr_twostage()
3364 result->cacheattrs); in get_phys_addr_twostage()
3367 result->f.extra.arm.guarded = s1_guarded; in get_phys_addr_twostage()
3370 * Check if IPA translates to secure or non-secure PA space. in get_phys_addr_twostage()
3374 result->f.attrs.secure = in get_phys_addr_twostage()
3375 !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) in get_phys_addr_twostage()
3377 || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); in get_phys_addr_twostage()
3378 result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); in get_phys_addr_twostage()
3390 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; in get_phys_addr_nogpc()
3398 result->f.attrs.space = ptw->in_space; in get_phys_addr_nogpc()
3399 result->f.attrs.secure = arm_space_is_secure(ptw->in_space); in get_phys_addr_nogpc()
3417 ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ? in get_phys_addr_nogpc()
3428 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx); in get_phys_addr_nogpc()
3442 * translations if mmu_idx is a two-stage regime, and EL2 present. in get_phys_addr_nogpc()
3445 ptw->in_mmu_idx = mmu_idx = s1_mmu_idx; in get_phys_addr_nogpc()
3447 !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) { in get_phys_addr_nogpc()
3455 ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space); in get_phys_addr_nogpc()
3459 result->f.attrs.user = regime_is_user(env, mmu_idx); in get_phys_addr_nogpc()
3468 address += env->cp15.fcseidr_s; in get_phys_addr_nogpc()
3470 address += env->cp15.fcseidr_ns; in get_phys_addr_nogpc()
3476 result->f.lg_page_size = TARGET_PAGE_BITS; in get_phys_addr_nogpc()
3487 /* Pre-v7 MPU */ in get_phys_addr_nogpc()
3492 " mmu_idx %u -> %s (prot %c%c%c)\n", in get_phys_addr_nogpc()
3497 result->f.prot & PAGE_READ ? 'r' : '-', in get_phys_addr_nogpc()
3498 result->f.prot & PAGE_WRITE ? 'w' : '-', in get_phys_addr_nogpc()
3499 result->f.prot & PAGE_EXEC ? 'x' : '-'); in get_phys_addr_nogpc()
3506 if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { in get_phys_addr_nogpc()
3532 if (!granule_protection_check(env, result->f.phys_addr, in get_phys_addr_gpc()
3533 result->f.attrs.space, fi)) { in get_phys_addr_gpc()
3534 fi->type = ARMFault_GPCFOnOutput; in get_phys_addr_gpc()
3647 return -1; in arm_cpu_get_phys_page()
3656 CPUARMState *env = &cpu->env; in arm_cpu_get_phys_page_attrs_debug()
3661 if (res != -1) { in arm_cpu_get_phys_page_attrs_debug()
3678 return -1; in arm_cpu_get_phys_page_attrs_debug()