1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * LoongArch CPU helpers for qemu 4 * 5 * Copyright (c) 2024 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "cpu.h" 11 #include "internals.h" 12 #include "cpu-csr.h" 13 14 #ifdef CONFIG_TCG 15 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, 16 int *prot, target_ulong address, 17 int access_type, int index, int mmu_idx) 18 { 19 LoongArchTLB *tlb = &env->tlb[index]; 20 uint64_t plv = mmu_idx; 21 uint64_t tlb_entry, tlb_ppn; 22 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; 23 24 if (index >= LOONGARCH_STLB) { 25 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 26 } else { 27 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 28 } 29 n = (address >> tlb_ps) & 0x1;/* Odd or even */ 30 31 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; 32 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); 33 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); 34 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); 35 if (is_la64(env)) { 36 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); 37 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); 38 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); 39 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); 40 } else { 41 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); 42 tlb_nx = 0; 43 tlb_nr = 0; 44 tlb_rplv = 0; 45 } 46 47 /* Remove sw bit between bit12 -- bit PS*/ 48 tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); 49 50 /* Check access rights */ 51 if (!tlb_v) { 52 return TLBRET_INVALID; 53 } 54 55 if (access_type == MMU_INST_FETCH && tlb_nx) { 56 return TLBRET_XI; 57 } 58 59 if (access_type == MMU_DATA_LOAD && tlb_nr) { 60 return TLBRET_RI; 61 } 62 63 if (((tlb_rplv == 0) && (plv > tlb_plv)) || 64 ((tlb_rplv == 1) && (plv != tlb_plv))) { 65 return TLBRET_PE; 66 } 67 68 if ((access_type == MMU_DATA_STORE) && !tlb_d) { 69 return TLBRET_DIRTY; 70 } 71 72 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | 73 (address & MAKE_64BIT_MASK(0, tlb_ps)); 74 *prot = PAGE_READ; 75 if (tlb_d) { 76 *prot |= PAGE_WRITE; 77 } 78 if (!tlb_nx) { 79 *prot |= PAGE_EXEC; 80 } 81 return TLBRET_MATCH; 82 } 83 84 /* 85 * One tlb entry holds an adjacent odd/even pair, the vpn is the 86 * content of the virtual page number divided by 2. So the 87 * compare vpn is bit[47:15] for 16KiB page. while the vppn 88 * field in tlb entry contains bit[47:13], so need adjust. 89 * virt_vpn = vaddr[47:13] 90 */ 91 bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, 92 int *index) 93 { 94 LoongArchTLB *tlb; 95 uint16_t csr_asid, tlb_asid, stlb_idx; 96 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; 97 int i, compare_shift; 98 uint64_t vpn, tlb_vppn; 99 100 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 101 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 102 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); 103 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ 104 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 105 106 /* Search STLB */ 107 for (i = 0; i < 8; ++i) { 108 tlb = &env->tlb[i * 256 + stlb_idx]; 109 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 110 if (tlb_e) { 111 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 112 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 113 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 114 115 if ((tlb_g == 1 || tlb_asid == csr_asid) && 116 (vpn == (tlb_vppn >> compare_shift))) { 117 *index = i * 256 + stlb_idx; 118 return true; 119 } 120 } 121 } 122 123 /* Search MTLB */ 124 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { 125 tlb = &env->tlb[i]; 126 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 127 if (tlb_e) { 128 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 129 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 130 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 131 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 132 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 133 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 134 if ((tlb_g == 1 || tlb_asid == csr_asid) && 135 (vpn == (tlb_vppn >> compare_shift))) { 136 *index = i; 137 return true; 138 } 139 } 140 } 141 return false; 142 } 143 144 static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical, 145 int *prot, target_ulong address) 146 { 147 CPUState *cs = env_cpu(env); 148 target_ulong index, phys; 149 uint64_t dir_base, dir_width; 150 uint64_t base; 151 int level; 152 153 if ((address >> 63) & 0x1) { 154 base = env->CSR_PGDH; 155 } else { 156 base = env->CSR_PGDL; 157 } 158 base &= TARGET_PHYS_MASK; 159 160 for (level = 4; level > 0; level--) { 161 get_dir_base_width(env, &dir_base, &dir_width, level); 162 163 if (dir_width == 0) { 164 continue; 165 } 166 167 /* get next level page directory */ 168 index = (address >> dir_base) & ((1 << dir_width) - 1); 169 phys = base | index << 3; 170 base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 171 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 172 /* base is a huge pte */ 173 break; 174 } 175 } 176 177 /* pte */ 178 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 179 /* Huge Page. base is pte */ 180 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); 181 base = FIELD_DP64(base, TLBENTRY, HUGE, 0); 182 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { 183 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); 184 base = FIELD_DP64(base, TLBENTRY, G, 1); 185 } 186 } else { 187 /* Normal Page. base points to pte */ 188 get_dir_base_width(env, &dir_base, &dir_width, 0); 189 index = (address >> dir_base) & ((1 << dir_width) - 1); 190 phys = base | index << 3; 191 base = ldq_phys(cs->as, phys); 192 } 193 194 /* TODO: check plv and other bits? */ 195 196 /* base is pte, in normal pte format */ 197 if (!FIELD_EX64(base, TLBENTRY, V)) { 198 return TLBRET_NOMATCH; 199 } 200 201 if (!FIELD_EX64(base, TLBENTRY, D)) { 202 *prot = PAGE_READ; 203 } else { 204 *prot = PAGE_READ | PAGE_WRITE; 205 } 206 207 /* get TARGET_PAGE_SIZE aligned physical address */ 208 base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1); 209 /* mask RPLV, NX, NR bits */ 210 base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0); 211 base = FIELD_DP64(base, TLBENTRY_64, NX, 0); 212 base = FIELD_DP64(base, TLBENTRY_64, NR, 0); 213 /* mask other attribute bits */ 214 *physical = base & TARGET_PAGE_MASK; 215 216 return 0; 217 } 218 219 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 220 int *prot, target_ulong address, 221 MMUAccessType access_type, int mmu_idx, 222 int is_debug) 223 { 224 int index, match; 225 226 match = loongarch_tlb_search(env, address, &index); 227 if (match) { 228 return loongarch_map_tlb_entry(env, physical, prot, 229 address, access_type, index, mmu_idx); 230 } else if (is_debug) { 231 /* 232 * For debugger memory access, we want to do the map when there is a 233 * legal mapping, even if the mapping is not yet in TLB. return 0 if 234 * there is a valid map, else none zero. 235 */ 236 return loongarch_page_table_walker(env, physical, prot, address); 237 } 238 239 return TLBRET_NOMATCH; 240 } 241 #else 242 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, 243 int *prot, target_ulong address, 244 MMUAccessType access_type, int mmu_idx, 245 int is_debug) 246 { 247 return TLBRET_NOMATCH; 248 } 249 #endif 250 251 static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, 252 target_ulong dmw) 253 { 254 if (is_la64(env)) { 255 return va & TARGET_VIRT_MASK; 256 } else { 257 uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); 258 return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ 259 (pseg << R_CSR_DMW_32_VSEG_SHIFT); 260 } 261 } 262 263 int get_physical_address(CPULoongArchState *env, hwaddr *physical, 264 int *prot, target_ulong address, 265 MMUAccessType access_type, int mmu_idx, int is_debug) 266 { 267 int user_mode = mmu_idx == MMU_USER_IDX; 268 int kernel_mode = mmu_idx == MMU_KERNEL_IDX; 269 uint32_t plv, base_c, base_v; 270 int64_t addr_high; 271 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); 272 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); 273 274 /* Check PG and DA */ 275 if (da & !pg) { 276 *physical = address & TARGET_PHYS_MASK; 277 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 278 return TLBRET_MATCH; 279 } 280 281 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); 282 if (is_la64(env)) { 283 base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; 284 } else { 285 base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; 286 } 287 /* Check direct map window */ 288 for (int i = 0; i < 4; i++) { 289 if (is_la64(env)) { 290 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); 291 } else { 292 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); 293 } 294 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { 295 *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); 296 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 297 return TLBRET_MATCH; 298 } 299 } 300 301 /* Check valid extension */ 302 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); 303 if (!(addr_high == 0 || addr_high == -1)) { 304 return TLBRET_BADADDR; 305 } 306 307 /* Mapped address */ 308 return loongarch_map_address(env, physical, prot, address, 309 access_type, mmu_idx, is_debug); 310 } 311 312 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 313 { 314 CPULoongArchState *env = cpu_env(cs); 315 hwaddr phys_addr; 316 int prot; 317 318 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, 319 cpu_mmu_index(cs, false), 1) != 0) { 320 return -1; 321 } 322 return phys_addr; 323 } 324