1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * LoongArch CPU helpers for qemu 4 * 5 * Copyright (c) 2024 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "cpu.h" 11 #include "accel/tcg/cpu-mmu-index.h" 12 #include "internals.h" 13 #include "cpu-csr.h" 14 15 #ifdef CONFIG_TCG 16 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, 17 int *prot, target_ulong address, 18 int access_type, int index, int mmu_idx) 19 { 20 LoongArchTLB *tlb = &env->tlb[index]; 21 uint64_t plv = mmu_idx; 22 uint64_t tlb_entry, tlb_ppn; 23 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; 24 25 if (index >= LOONGARCH_STLB) { 26 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 27 } else { 28 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 29 } 30 n = (address >> tlb_ps) & 0x1;/* Odd or even */ 31 32 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; 33 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); 34 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); 35 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); 36 if (is_la64(env)) { 37 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); 38 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); 39 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); 40 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); 41 } else { 42 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); 43 tlb_nx = 0; 44 tlb_nr = 0; 45 tlb_rplv = 0; 46 } 47 48 /* Remove sw bit between bit12 -- bit PS*/ 49 tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); 50 51 /* Check access rights */ 52 if (!tlb_v) { 53 return TLBRET_INVALID; 54 } 55 56 if (access_type == MMU_INST_FETCH && tlb_nx) { 57 return TLBRET_XI; 58 } 59 60 if (access_type == MMU_DATA_LOAD && tlb_nr) { 61 return TLBRET_RI; 62 } 63 64 if (((tlb_rplv == 0) && (plv > tlb_plv)) || 65 ((tlb_rplv == 1) && (plv != tlb_plv))) { 66 return TLBRET_PE; 67 } 68 69 if ((access_type == MMU_DATA_STORE) && !tlb_d) { 70 return TLBRET_DIRTY; 71 } 72 73 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | 74 (address & MAKE_64BIT_MASK(0, tlb_ps)); 75 *prot = PAGE_READ; 76 if (tlb_d) { 77 *prot |= PAGE_WRITE; 78 } 79 if (!tlb_nx) { 80 *prot |= PAGE_EXEC; 81 } 82 return TLBRET_MATCH; 83 } 84 85 /* 86 * One tlb entry holds an adjacent odd/even pair, the vpn is the 87 * content of the virtual page number divided by 2. So the 88 * compare vpn is bit[47:15] for 16KiB page. while the vppn 89 * field in tlb entry contains bit[47:13], so need adjust. 90 * virt_vpn = vaddr[47:13] 91 */ 92 bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, 93 int *index) 94 { 95 LoongArchTLB *tlb; 96 uint16_t csr_asid, tlb_asid, stlb_idx; 97 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; 98 int i, compare_shift; 99 uint64_t vpn, tlb_vppn; 100 101 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 102 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 103 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); 104 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ 105 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 106 107 /* Search STLB */ 108 for (i = 0; i < 8; ++i) { 109 tlb = &env->tlb[i * 256 + stlb_idx]; 110 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 111 if (tlb_e) { 112 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 113 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 114 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 115 116 if ((tlb_g == 1 || tlb_asid == csr_asid) && 117 (vpn == (tlb_vppn >> compare_shift))) { 118 *index = i * 256 + stlb_idx; 119 return true; 120 } 121 } 122 } 123 124 /* Search MTLB */ 125 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { 126 tlb = &env->tlb[i]; 127 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 128 if (tlb_e) { 129 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 130 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 131 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 132 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 133 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 134 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 135 if ((tlb_g == 1 || tlb_asid == csr_asid) && 136 (vpn == (tlb_vppn >> compare_shift))) { 137 *index = i; 138 return true; 139 } 140 } 141 } 142 return false; 143 } 144 145 static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical, 146 int *prot, target_ulong address) 147 { 148 CPUState *cs = env_cpu(env); 149 target_ulong index, phys; 150 uint64_t dir_base, dir_width; 151 uint64_t base; 152 int level; 153 154 if ((address >> 63) & 0x1) { 155 base = env->CSR_PGDH; 156 } else { 157 base = env->CSR_PGDL; 158 } 159 base &= TARGET_PHYS_MASK; 160 161 for (level = 4; level > 0; level--) { 162 get_dir_base_width(env, &dir_base, &dir_width, level); 163 164 if (dir_width == 0) { 165 continue; 166 } 167 168 /* get next level page directory */ 169 index = (address >> dir_base) & ((1 << dir_width) - 1); 170 phys = base | index << 3; 171 base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 172 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 173 /* base is a huge pte */ 174 break; 175 } 176 } 177 178 /* pte */ 179 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 180 /* Huge Page. base is pte */ 181 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); 182 base = FIELD_DP64(base, TLBENTRY, HUGE, 0); 183 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { 184 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); 185 base = FIELD_DP64(base, TLBENTRY, G, 1); 186 } 187 } else { 188 /* Normal Page. base points to pte */ 189 get_dir_base_width(env, &dir_base, &dir_width, 0); 190 index = (address >> dir_base) & ((1 << dir_width) - 1); 191 phys = base | index << 3; 192 base = ldq_phys(cs->as, phys); 193 } 194 195 /* TODO: check plv and other bits? */ 196 197 /* base is pte, in normal pte format */ 198 if (!FIELD_EX64(base, TLBENTRY, V)) { 199 return TLBRET_NOMATCH; 200 } 201 202 if (!FIELD_EX64(base, TLBENTRY, D)) { 203 *prot = PAGE_READ; 204 } else { 205 *prot = PAGE_READ | PAGE_WRITE; 206 } 207 208 /* get TARGET_PAGE_SIZE aligned physical address */ 209 base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1); 210 /* mask RPLV, NX, NR bits */ 211 base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0); 212 base = FIELD_DP64(base, TLBENTRY_64, NX, 0); 213 base = FIELD_DP64(base, TLBENTRY_64, NR, 0); 214 /* mask other attribute bits */ 215 *physical = base & TARGET_PAGE_MASK; 216 217 return 0; 218 } 219 220 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 221 int *prot, target_ulong address, 222 MMUAccessType access_type, int mmu_idx, 223 int is_debug) 224 { 225 int index, match; 226 227 match = loongarch_tlb_search(env, address, &index); 228 if (match) { 229 return loongarch_map_tlb_entry(env, physical, prot, 230 address, access_type, index, mmu_idx); 231 } else if (is_debug) { 232 /* 233 * For debugger memory access, we want to do the map when there is a 234 * legal mapping, even if the mapping is not yet in TLB. return 0 if 235 * there is a valid map, else none zero. 236 */ 237 return loongarch_page_table_walker(env, physical, prot, address); 238 } 239 240 return TLBRET_NOMATCH; 241 } 242 #else 243 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 244 int *prot, target_ulong address, 245 MMUAccessType access_type, int mmu_idx, 246 int is_debug) 247 { 248 return TLBRET_NOMATCH; 249 } 250 #endif 251 252 static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, 253 target_ulong dmw) 254 { 255 if (is_la64(env)) { 256 return va & TARGET_VIRT_MASK; 257 } else { 258 uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); 259 return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ 260 (pseg << R_CSR_DMW_32_VSEG_SHIFT); 261 } 262 } 263 264 int get_physical_address(CPULoongArchState *env, hwaddr *physical, 265 int *prot, target_ulong address, 266 MMUAccessType access_type, int mmu_idx, int is_debug) 267 { 268 int user_mode = mmu_idx == MMU_USER_IDX; 269 int kernel_mode = mmu_idx == MMU_KERNEL_IDX; 270 uint32_t plv, base_c, base_v; 271 int64_t addr_high; 272 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); 273 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); 274 275 /* Check PG and DA */ 276 if (da & !pg) { 277 *physical = address & TARGET_PHYS_MASK; 278 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 279 return TLBRET_MATCH; 280 } 281 282 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); 283 if (is_la64(env)) { 284 base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; 285 } else { 286 base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; 287 } 288 /* Check direct map window */ 289 for (int i = 0; i < 4; i++) { 290 if (is_la64(env)) { 291 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); 292 } else { 293 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); 294 } 295 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { 296 *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); 297 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 298 return TLBRET_MATCH; 299 } 300 } 301 302 /* Check valid extension */ 303 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); 304 if (!(addr_high == 0 || addr_high == -1)) { 305 return TLBRET_BADADDR; 306 } 307 308 /* Mapped address */ 309 return loongarch_map_address(env, physical, prot, address, 310 access_type, mmu_idx, is_debug); 311 } 312 313 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 314 { 315 CPULoongArchState *env = cpu_env(cs); 316 hwaddr phys_addr; 317 int prot; 318 319 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, 320 cpu_mmu_index(cs, false), 1) != 0) { 321 return -1; 322 } 323 return phys_addr; 324 } 325