1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch TLB helpers 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/guest-random.h" 11 12 #include "cpu.h" 13 #include "internals.h" 14 #include "exec/helper-proto.h" 15 #include "exec/cputlb.h" 16 #include "exec/exec-all.h" 17 #include "exec/page-protection.h" 18 #include "exec/cpu_ldst.h" 19 #include "exec/log.h" 20 #include "cpu-csr.h" 21 22 bool check_ps(CPULoongArchState *env, uint8_t tlb_ps) 23 { 24 if (tlb_ps >= 64) { 25 return false; 26 } 27 return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2); 28 } 29 30 void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, 31 uint64_t *dir_width, target_ulong level) 32 { 33 switch (level) { 34 case 1: 35 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); 36 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); 37 break; 38 case 2: 39 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); 40 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); 41 break; 42 case 3: 43 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); 44 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); 45 break; 46 case 4: 47 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); 48 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); 49 break; 50 default: 51 /* level may be zero for ldpte */ 52 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 53 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 54 break; 55 } 56 } 57 58 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, 59 MMUAccessType access_type, int tlb_error) 60 { 61 CPUState *cs = env_cpu(env); 62 63 switch (tlb_error) { 64 default: 65 case TLBRET_BADADDR: 66 cs->exception_index = access_type == MMU_INST_FETCH 67 ? EXCCODE_ADEF : EXCCODE_ADEM; 68 break; 69 case TLBRET_NOMATCH: 70 /* No TLB match for a mapped address */ 71 if (access_type == MMU_DATA_LOAD) { 72 cs->exception_index = EXCCODE_PIL; 73 } else if (access_type == MMU_DATA_STORE) { 74 cs->exception_index = EXCCODE_PIS; 75 } else if (access_type == MMU_INST_FETCH) { 76 cs->exception_index = EXCCODE_PIF; 77 } 78 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1); 79 break; 80 case TLBRET_INVALID: 81 /* TLB match with no valid bit */ 82 if (access_type == MMU_DATA_LOAD) { 83 cs->exception_index = EXCCODE_PIL; 84 } else if (access_type == MMU_DATA_STORE) { 85 cs->exception_index = EXCCODE_PIS; 86 } else if (access_type == MMU_INST_FETCH) { 87 cs->exception_index = EXCCODE_PIF; 88 } 89 break; 90 case TLBRET_DIRTY: 91 /* TLB match but 'D' bit is cleared */ 92 cs->exception_index = EXCCODE_PME; 93 break; 94 case TLBRET_XI: 95 /* Execute-Inhibit Exception */ 96 cs->exception_index = EXCCODE_PNX; 97 break; 98 case TLBRET_RI: 99 /* Read-Inhibit Exception */ 100 cs->exception_index = EXCCODE_PNR; 101 break; 102 case TLBRET_PE: 103 /* Privileged Exception */ 104 cs->exception_index = EXCCODE_PPI; 105 break; 106 } 107 108 if (tlb_error == TLBRET_NOMATCH) { 109 env->CSR_TLBRBADV = address; 110 if (is_la64(env)) { 111 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64, 112 VPPN, extract64(address, 13, 35)); 113 } else { 114 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32, 115 VPPN, extract64(address, 13, 19)); 116 } 117 } else { 118 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 119 env->CSR_BADV = address; 120 } 121 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); 122 } 123 } 124 125 static void invalidate_tlb_entry(CPULoongArchState *env, int index) 126 { 127 target_ulong addr, mask, pagesize; 128 uint8_t tlb_ps; 129 LoongArchTLB *tlb = &env->tlb[index]; 130 131 int mmu_idx = cpu_mmu_index(env_cpu(env), false); 132 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); 133 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); 134 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 135 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 136 137 if (!tlb_e) { 138 return; 139 } 140 if (index >= LOONGARCH_STLB) { 141 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 142 } else { 143 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 144 } 145 pagesize = MAKE_64BIT_MASK(tlb_ps, 1); 146 mask = MAKE_64BIT_MASK(0, tlb_ps + 1); 147 148 if (tlb_v0) { 149 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ 150 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 151 mmu_idx, TARGET_LONG_BITS); 152 } 153 154 if (tlb_v1) { 155 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ 156 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, 157 mmu_idx, TARGET_LONG_BITS); 158 } 159 } 160 161 static void invalidate_tlb(CPULoongArchState *env, int index) 162 { 163 LoongArchTLB *tlb; 164 uint16_t csr_asid, tlb_asid, tlb_g; 165 166 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 167 tlb = &env->tlb[index]; 168 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 169 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 170 if (tlb_g == 0 && tlb_asid != csr_asid) { 171 return; 172 } 173 invalidate_tlb_entry(env, index); 174 } 175 176 static void fill_tlb_entry(CPULoongArchState *env, int index) 177 { 178 LoongArchTLB *tlb = &env->tlb[index]; 179 uint64_t lo0, lo1, csr_vppn; 180 uint16_t csr_asid; 181 uint8_t csr_ps; 182 183 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 184 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 185 if (is_la64(env)) { 186 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN); 187 } else { 188 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN); 189 } 190 lo0 = env->CSR_TLBRELO0; 191 lo1 = env->CSR_TLBRELO1; 192 } else { 193 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 194 if (is_la64(env)) { 195 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN); 196 } else { 197 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN); 198 } 199 lo0 = env->CSR_TLBELO0; 200 lo1 = env->CSR_TLBELO1; 201 } 202 203 /*check csr_ps */ 204 if (!check_ps(env, csr_ps)) { 205 qemu_log_mask(LOG_GUEST_ERROR, "csr_ps %d is illegal\n", csr_ps); 206 return; 207 } 208 209 /* Only MTLB has the ps fields */ 210 if (index >= LOONGARCH_STLB) { 211 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); 212 } 213 214 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); 215 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); 216 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 217 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid); 218 219 tlb->tlb_entry0 = lo0; 220 tlb->tlb_entry1 = lo1; 221 } 222 223 /* Return an random value between low and high */ 224 static uint32_t get_random_tlb(uint32_t low, uint32_t high) 225 { 226 uint32_t val; 227 228 qemu_guest_getrandom_nofail(&val, sizeof(val)); 229 return val % (high - low + 1) + low; 230 } 231 232 void helper_tlbsrch(CPULoongArchState *env) 233 { 234 int index, match; 235 236 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 237 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index); 238 } else { 239 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index); 240 } 241 242 if (match) { 243 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index); 244 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 245 return; 246 } 247 248 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 249 } 250 251 void helper_tlbrd(CPULoongArchState *env) 252 { 253 LoongArchTLB *tlb; 254 int index; 255 uint8_t tlb_ps, tlb_e; 256 257 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 258 tlb = &env->tlb[index]; 259 260 if (index >= LOONGARCH_STLB) { 261 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 262 } else { 263 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 264 } 265 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 266 267 if (!tlb_e) { 268 /* Invalid TLB entry */ 269 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1); 270 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0); 271 env->CSR_TLBEHI = 0; 272 env->CSR_TLBELO0 = 0; 273 env->CSR_TLBELO1 = 0; 274 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0); 275 } else { 276 /* Valid TLB entry */ 277 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0); 278 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, 279 PS, (tlb_ps & 0x3f)); 280 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) << 281 R_TLB_MISC_VPPN_SHIFT; 282 env->CSR_TLBELO0 = tlb->tlb_entry0; 283 env->CSR_TLBELO1 = tlb->tlb_entry1; 284 } 285 } 286 287 void helper_tlbwr(CPULoongArchState *env) 288 { 289 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 290 291 invalidate_tlb(env, index); 292 293 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { 294 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, 295 TLB_MISC, E, 0); 296 return; 297 } 298 299 fill_tlb_entry(env, index); 300 } 301 302 void helper_tlbfill(CPULoongArchState *env) 303 { 304 uint64_t address, entryhi; 305 int index, set, stlb_idx; 306 uint16_t pagesize, stlb_ps; 307 308 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { 309 entryhi = env->CSR_TLBREHI; 310 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); 311 } else { 312 entryhi = env->CSR_TLBEHI; 313 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); 314 } 315 316 if (!check_ps(env, pagesize)) { 317 qemu_log_mask(LOG_GUEST_ERROR, "pagesize %d is illegal\n", pagesize); 318 return; 319 } 320 321 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 322 if (!check_ps(env, stlb_ps)) { 323 qemu_log_mask(LOG_GUEST_ERROR, "stlb_ps %d is illegal\n", stlb_ps); 324 return; 325 } 326 327 if (pagesize == stlb_ps) { 328 /* Only write into STLB bits [47:13] */ 329 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT); 330 331 /* Choose one set ramdomly */ 332 set = get_random_tlb(0, 7); 333 334 /* Index in one set */ 335 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ 336 337 index = set * 256 + stlb_idx; 338 } else { 339 /* Only write into MTLB */ 340 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); 341 } 342 343 invalidate_tlb(env, index); 344 fill_tlb_entry(env, index); 345 } 346 347 void helper_tlbclr(CPULoongArchState *env) 348 { 349 LoongArchTLB *tlb; 350 int i, index; 351 uint16_t csr_asid, tlb_asid, tlb_g; 352 353 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); 354 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 355 356 if (index < LOONGARCH_STLB) { 357 /* STLB. One line per operation */ 358 for (i = 0; i < 8; i++) { 359 tlb = &env->tlb[i * 256 + (index % 256)]; 360 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 361 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 362 if (!tlb_g && tlb_asid == csr_asid) { 363 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 364 } 365 } 366 } else if (index < LOONGARCH_TLB_MAX) { 367 /* All MTLB entries */ 368 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 369 tlb = &env->tlb[i]; 370 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 371 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 372 if (!tlb_g && tlb_asid == csr_asid) { 373 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 374 } 375 } 376 } 377 378 tlb_flush(env_cpu(env)); 379 } 380 381 void helper_tlbflush(CPULoongArchState *env) 382 { 383 int i, index; 384 385 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); 386 387 if (index < LOONGARCH_STLB) { 388 /* STLB. One line per operation */ 389 for (i = 0; i < 8; i++) { 390 int s_idx = i * 256 + (index % 256); 391 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc, 392 TLB_MISC, E, 0); 393 } 394 } else if (index < LOONGARCH_TLB_MAX) { 395 /* All MTLB entries */ 396 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { 397 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 398 TLB_MISC, E, 0); 399 } 400 } 401 402 tlb_flush(env_cpu(env)); 403 } 404 405 void helper_invtlb_all(CPULoongArchState *env) 406 { 407 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 408 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc, 409 TLB_MISC, E, 0); 410 } 411 tlb_flush(env_cpu(env)); 412 } 413 414 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g) 415 { 416 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 417 LoongArchTLB *tlb = &env->tlb[i]; 418 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 419 420 if (tlb_g == g) { 421 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 422 } 423 } 424 tlb_flush(env_cpu(env)); 425 } 426 427 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) 428 { 429 uint16_t asid = info & R_CSR_ASID_ASID_MASK; 430 431 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 432 LoongArchTLB *tlb = &env->tlb[i]; 433 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 434 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 435 436 if (!tlb_g && (tlb_asid == asid)) { 437 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 438 } 439 } 440 tlb_flush(env_cpu(env)); 441 } 442 443 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, 444 target_ulong addr) 445 { 446 uint16_t asid = info & 0x3ff; 447 448 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 449 LoongArchTLB *tlb = &env->tlb[i]; 450 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 451 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 452 uint64_t vpn, tlb_vppn; 453 uint8_t tlb_ps, compare_shift; 454 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 455 456 if (!tlb_e) { 457 continue; 458 } 459 if (i >= LOONGARCH_STLB) { 460 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 461 } else { 462 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 463 } 464 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 465 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 466 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 467 468 if (!tlb_g && (tlb_asid == asid) && 469 (vpn == (tlb_vppn >> compare_shift))) { 470 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 471 } 472 } 473 tlb_flush(env_cpu(env)); 474 } 475 476 void helper_invtlb_page_asid_or_g(CPULoongArchState *env, 477 target_ulong info, target_ulong addr) 478 { 479 uint16_t asid = info & 0x3ff; 480 481 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { 482 LoongArchTLB *tlb = &env->tlb[i]; 483 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); 484 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); 485 uint64_t vpn, tlb_vppn; 486 uint8_t tlb_ps, compare_shift; 487 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); 488 489 if (!tlb_e) { 490 continue; 491 } 492 if (i >= LOONGARCH_STLB) { 493 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); 494 } else { 495 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); 496 } 497 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); 498 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); 499 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; 500 501 if ((tlb_g || (tlb_asid == asid)) && 502 (vpn == (tlb_vppn >> compare_shift))) { 503 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); 504 } 505 } 506 tlb_flush(env_cpu(env)); 507 } 508 509 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 510 MMUAccessType access_type, int mmu_idx, 511 bool probe, uintptr_t retaddr) 512 { 513 CPULoongArchState *env = cpu_env(cs); 514 hwaddr physical; 515 int prot; 516 int ret; 517 518 /* Data access */ 519 ret = get_physical_address(env, &physical, &prot, address, 520 access_type, mmu_idx, 0); 521 522 if (ret == TLBRET_MATCH) { 523 tlb_set_page(cs, address & TARGET_PAGE_MASK, 524 physical & TARGET_PAGE_MASK, prot, 525 mmu_idx, TARGET_PAGE_SIZE); 526 qemu_log_mask(CPU_LOG_MMU, 527 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx 528 " prot %d\n", __func__, address, physical, prot); 529 return true; 530 } else { 531 qemu_log_mask(CPU_LOG_MMU, 532 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, 533 ret); 534 } 535 if (probe) { 536 return false; 537 } 538 raise_mmu_exception(env, address, access_type, ret); 539 cpu_loop_exit_restore(cs, retaddr); 540 } 541 542 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, 543 target_ulong level, uint32_t mem_idx) 544 { 545 CPUState *cs = env_cpu(env); 546 target_ulong badvaddr, index, phys; 547 uint64_t dir_base, dir_width; 548 549 if (unlikely((level == 0) || (level > 4))) { 550 qemu_log_mask(LOG_GUEST_ERROR, 551 "Attepted LDDIR with level %"PRId64"\n", level); 552 return base; 553 } 554 555 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 556 if (unlikely(level == 4)) { 557 qemu_log_mask(LOG_GUEST_ERROR, 558 "Attempted use of level 4 huge page\n"); 559 return base; 560 } 561 562 if (FIELD_EX64(base, TLBENTRY, LEVEL)) { 563 return base; 564 } else { 565 return FIELD_DP64(base, TLBENTRY, LEVEL, level); 566 } 567 } 568 569 badvaddr = env->CSR_TLBRBADV; 570 base = base & TARGET_PHYS_MASK; 571 get_dir_base_width(env, &dir_base, &dir_width, level); 572 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); 573 phys = base | index << 3; 574 return ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 575 } 576 577 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, 578 uint32_t mem_idx) 579 { 580 CPUState *cs = env_cpu(env); 581 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; 582 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); 583 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); 584 uint64_t dir_base, dir_width; 585 586 /* 587 * The parameter "base" has only two types, 588 * one is the page table base address, 589 * whose bit 6 should be 0, 590 * and the other is the huge page entry, 591 * whose bit 6 should be 1. 592 */ 593 base = base & TARGET_PHYS_MASK; 594 if (FIELD_EX64(base, TLBENTRY, HUGE)) { 595 /* 596 * Gets the huge page level and Gets huge page size. 597 * Clears the huge page level information in the entry. 598 * Clears huge page bit. 599 * Move HGLOBAL bit to GLOBAL bit. 600 */ 601 get_dir_base_width(env, &dir_base, &dir_width, 602 FIELD_EX64(base, TLBENTRY, LEVEL)); 603 604 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); 605 base = FIELD_DP64(base, TLBENTRY, HUGE, 0); 606 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { 607 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); 608 base = FIELD_DP64(base, TLBENTRY, G, 1); 609 } 610 611 ps = dir_base + dir_width - 1; 612 /* 613 * Huge pages are evenly split into parity pages 614 * when loaded into the tlb, 615 * so the tlb page size needs to be divided by 2. 616 */ 617 tmp0 = base; 618 if (odd) { 619 tmp0 += MAKE_64BIT_MASK(ps, 1); 620 } 621 } else { 622 badv = env->CSR_TLBRBADV; 623 624 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); 625 ptindex = ptindex & ~0x1; /* clear bit 0 */ 626 ptoffset0 = ptindex << 3; 627 ptoffset1 = (ptindex + 1) << 3; 628 phys = base | (odd ? ptoffset1 : ptoffset0); 629 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; 630 ps = ptbase; 631 } 632 633 if (odd) { 634 env->CSR_TLBRELO1 = tmp0; 635 } else { 636 env->CSR_TLBRELO0 = tmp0; 637 } 638 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); 639 } 640