1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * LoongArch CPU helpers for qemu 4 * 5 * Copyright (c) 2024 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "cpu.h" 11 #include "accel/tcg/cpu-mmu-index.h" 12 #include "exec/target_page.h" 13 #include "internals.h" 14 #include "cpu-csr.h" 15 16 #ifdef CONFIG_TCG 17 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, 18 int *prot, target_ulong address, 19 int access_type, int index, int mmu_idx) 20 { 21 LoongArchTLB *tlb = &env->tlb[index]; 22 uint64_t plv = mmu_idx; 23 uint64_t tlb_entry, tlb_ppn; 24 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; 25 26 if (index >= LOONGARCH_STLB) { 27 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 28 } else { 29 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 30 } 31 n = (address >> tlb_ps) & 0x1;/* Odd or even */ 32 33 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; 34 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); 35 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); 36 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); 37 if (is_la64(env)) { 38 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); 39 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); 40 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); 41 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); 42 } else { 43 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); 44 tlb_nx = 0; 45 tlb_nr = 0; 46 tlb_rplv = 0; 47 } 48 49 /* Remove sw bit between bit12 -- bit PS*/ 50 tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); 51 52 /* Check access rights */ 53 if (!tlb_v) { 54 return TLBRET_INVALID; 55 } 56 57 if (access_type == MMU_INST_FETCH && tlb_nx) { 58 return TLBRET_XI; 59 } 60 61 if (access_type == MMU_DATA_LOAD && tlb_nr) { 62 return TLBRET_RI; 63 } 64 65 if (((tlb_rplv == 0) && (plv > tlb_plv)) || 66 ((tlb_rplv == 1) && (plv != tlb_plv))) { 67 return TLBRET_PE; 68 } 69 70 if ((access_type == MMU_DATA_STORE) && !tlb_d) { 71 return TLBRET_DIRTY; 72 } 73 74 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | 75 (address & MAKE_64BIT_MASK(0, tlb_ps)); 76 *prot = PAGE_READ; 77 if (tlb_d) { 78 *prot |= PAGE_WRITE; 79 } 80 if (!tlb_nx) { 81 *prot |= PAGE_EXEC; 82 } 83 return TLBRET_MATCH; 84 } 85 86 /* 87 * One tlb entry holds an adjacent odd/even pair, the vpn is the 88 * content of the virtual page number divided by 2. So the 89 * compare vpn is bit[47:15] for 16KiB page. while the vppn 90 * field in tlb entry contains bit[47:13], so need adjust. 91 * virt_vpn = vaddr[47:13] 92 */ 93 bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, 94 int *index) 95 { 96 LoongArchTLB *tlb; 97 uint16_t csr_asid, tlb_asid, stlb_idx; 98 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; 99 int i, compare_shift; 100 uint64_t vpn, tlb_vppn; 101 102 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 103 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 104 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); 105 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ 106 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 107 108 /* Search STLB */ 109 for (i = 0; i < 8; ++i) { 110 tlb = &env->tlb[i * 256 + stlb_idx]; 111 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 112 if (tlb_e) { 113 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 114 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 115 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 116 117 if ((tlb_g == 1 || tlb_asid == csr_asid) && 118 (vpn == (tlb_vppn >> compare_shift))) { 119 *index = i * 256 + stlb_idx; 120 return true; 121 } 122 } 123 } 124 125 /* Search MTLB */ 126 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { 127 tlb = &env->tlb[i]; 128 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 129 if (tlb_e) { 130 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 131 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 132 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 133 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 134 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 135 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 136 if ((tlb_g == 1 || tlb_asid == csr_asid) && 137 (vpn == (tlb_vppn >> compare_shift))) { 138 *index = i; 139 return true; 140 } 141 } 142 } 143 return false; 144 } 145 146 static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical, 147 int *prot, target_ulong address) 148 { 149 CPUState *cs = env_cpu(env); 150 target_ulong index, phys; 151 uint64_t dir_base, dir_width; 152 uint64_t base; 153 int level; 154 155 if ((address >> 63) & 0x1) { 156 base = env->CSR_PGDH; 157 } else { 158 base = env->CSR_PGDL; 159 } 160 base &= TARGET_PHYS_MASK; 161 162 for (level = 4; level > 0; level--) { 163 get_dir_base_width(env, &dir_base, &dir_width, level); 164 165 if (dir_width == 0) { 166 continue; 167 } 168 169 /* get next level page directory */ 170 index = (address >> dir_base) & ((1 << dir_width) - 1); 171 phys = base | index << 3; 172 base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 173 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 174 /* base is a huge pte */ 175 break; 176 } 177 } 178 179 /* pte */ 180 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 181 /* Huge Page. base is pte */ 182 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); 183 base = FIELD_DP64(base, TLBENTRY, HUGE, 0); 184 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { 185 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); 186 base = FIELD_DP64(base, TLBENTRY, G, 1); 187 } 188 } else { 189 /* Normal Page. base points to pte */ 190 get_dir_base_width(env, &dir_base, &dir_width, 0); 191 index = (address >> dir_base) & ((1 << dir_width) - 1); 192 phys = base | index << 3; 193 base = ldq_phys(cs->as, phys); 194 } 195 196 /* TODO: check plv and other bits? */ 197 198 /* base is pte, in normal pte format */ 199 if (!FIELD_EX64(base, TLBENTRY, V)) { 200 return TLBRET_NOMATCH; 201 } 202 203 if (!FIELD_EX64(base, TLBENTRY, D)) { 204 *prot = PAGE_READ; 205 } else { 206 *prot = PAGE_READ | PAGE_WRITE; 207 } 208 209 /* get TARGET_PAGE_SIZE aligned physical address */ 210 base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1); 211 /* mask RPLV, NX, NR bits */ 212 base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0); 213 base = FIELD_DP64(base, TLBENTRY_64, NX, 0); 214 base = FIELD_DP64(base, TLBENTRY_64, NR, 0); 215 /* mask other attribute bits */ 216 *physical = base & TARGET_PAGE_MASK; 217 218 return 0; 219 } 220 221 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 222 int *prot, target_ulong address, 223 MMUAccessType access_type, int mmu_idx, 224 int is_debug) 225 { 226 int index, match; 227 228 match = loongarch_tlb_search(env, address, &index); 229 if (match) { 230 return loongarch_map_tlb_entry(env, physical, prot, 231 address, access_type, index, mmu_idx); 232 } else if (is_debug) { 233 /* 234 * For debugger memory access, we want to do the map when there is a 235 * legal mapping, even if the mapping is not yet in TLB. return 0 if 236 * there is a valid map, else none zero. 237 */ 238 return loongarch_page_table_walker(env, physical, prot, address); 239 } 240 241 return TLBRET_NOMATCH; 242 } 243 #else 244 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 245 int *prot, target_ulong address, 246 MMUAccessType access_type, int mmu_idx, 247 int is_debug) 248 { 249 return TLBRET_NOMATCH; 250 } 251 #endif 252 253 static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, 254 target_ulong dmw) 255 { 256 if (is_la64(env)) { 257 return va & TARGET_VIRT_MASK; 258 } else { 259 uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); 260 return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ 261 (pseg << R_CSR_DMW_32_VSEG_SHIFT); 262 } 263 } 264 265 int get_physical_address(CPULoongArchState *env, hwaddr *physical, 266 int *prot, target_ulong address, 267 MMUAccessType access_type, int mmu_idx, int is_debug) 268 { 269 int user_mode = mmu_idx == MMU_USER_IDX; 270 int kernel_mode = mmu_idx == MMU_KERNEL_IDX; 271 uint32_t plv, base_c, base_v; 272 int64_t addr_high; 273 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); 274 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); 275 276 /* Check PG and DA */ 277 if (da & !pg) { 278 *physical = address & TARGET_PHYS_MASK; 279 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 280 return TLBRET_MATCH; 281 } 282 283 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); 284 if (is_la64(env)) { 285 base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; 286 } else { 287 base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; 288 } 289 /* Check direct map window */ 290 for (int i = 0; i < 4; i++) { 291 if (is_la64(env)) { 292 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); 293 } else { 294 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); 295 } 296 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { 297 *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); 298 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 299 return TLBRET_MATCH; 300 } 301 } 302 303 /* Check valid extension */ 304 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); 305 if (!(addr_high == 0 || addr_high == -1)) { 306 return TLBRET_BADADDR; 307 } 308 309 /* Mapped address */ 310 return loongarch_map_address(env, physical, prot, address, 311 access_type, mmu_idx, is_debug); 312 } 313 314 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 315 { 316 CPULoongArchState *env = cpu_env(cs); 317 hwaddr phys_addr; 318 int prot; 319 320 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, 321 cpu_mmu_index(cs, false), 1) != 0) { 322 return -1; 323 } 324 return phys_addr; 325 } 326