1 /* 2 * Sparc MMU helpers 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/cputlb.h" 24 #include "accel/tcg/cpu-mmu-index.h" 25 #include "exec/page-protection.h" 26 #include "exec/tlb-flags.h" 27 #include "qemu/qemu-print.h" 28 #include "trace.h" 29 30 /* Sparc MMU emulation */ 31 32 #ifndef TARGET_SPARC64 33 /* 34 * Sparc V8 Reference MMU (SRMMU) 35 */ 36 static const int access_table[8][8] = { 37 { 0, 0, 0, 0, 8, 0, 12, 12 }, 38 { 0, 0, 0, 0, 8, 0, 0, 0 }, 39 { 8, 8, 0, 0, 0, 8, 12, 12 }, 40 { 8, 8, 0, 0, 0, 8, 0, 0 }, 41 { 8, 0, 8, 0, 8, 8, 12, 12 }, 42 { 8, 0, 8, 0, 8, 0, 8, 0 }, 43 { 8, 8, 8, 0, 8, 8, 12, 12 }, 44 { 8, 8, 8, 0, 8, 8, 8, 0 } 45 }; 46 47 static const int perm_table[2][8] = { 48 { 49 PAGE_READ, 50 PAGE_READ | PAGE_WRITE, 51 PAGE_READ | PAGE_EXEC, 52 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 53 PAGE_EXEC, 54 PAGE_READ | PAGE_WRITE, 55 PAGE_READ | PAGE_EXEC, 56 PAGE_READ | PAGE_WRITE | PAGE_EXEC 57 }, 58 { 59 PAGE_READ, 60 PAGE_READ | PAGE_WRITE, 61 PAGE_READ | PAGE_EXEC, 62 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 63 PAGE_EXEC, 64 PAGE_READ, 65 0, 66 0, 67 } 68 }; 69 70 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, 71 int *access_index, target_ulong address, 72 int rw, int mmu_idx) 73 { 74 int access_perms = 0; 75 hwaddr pde_ptr; 76 uint32_t pde; 77 int error_code = 0, is_dirty, is_user; 78 unsigned long page_offset; 79 CPUState *cs = env_cpu(env); 80 MemTxResult result; 81 82 is_user = mmu_idx == MMU_USER_IDX; 83 84 if (mmu_idx == MMU_PHYS_IDX) { 85 full->lg_page_size = TARGET_PAGE_BITS; 86 /* Boot mode: instruction fetches are taken from PROM */ 87 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { 88 full->phys_addr = env->prom_addr | (address & 0x7ffffULL); 89 full->prot = PAGE_READ | PAGE_EXEC; 90 return 0; 91 } 92 full->phys_addr = address; 93 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 94 return 0; 95 } 96 97 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); 98 full->phys_addr = 0xffffffffffff0000ULL; 99 100 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ 101 /* Context base + context number */ 102 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 103 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 104 if (result != MEMTX_OK) { 105 return 4 << 2; /* Translation fault, L = 0 */ 106 } 107 108 /* Ctx pde */ 109 switch (pde & PTE_ENTRYTYPE_MASK) { 110 default: 111 case 0: /* Invalid */ 112 return 1 << 2; 113 case 2: /* L0 PTE, maybe should not happen? */ 114 case 3: /* Reserved */ 115 return 4 << 2; 116 case 1: /* L0 PDE */ 117 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 118 pde = address_space_ldl(cs->as, pde_ptr, 119 MEMTXATTRS_UNSPECIFIED, &result); 120 if (result != MEMTX_OK) { 121 return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */ 122 } 123 124 switch (pde & PTE_ENTRYTYPE_MASK) { 125 default: 126 case 0: /* Invalid */ 127 return (1 << 8) | (1 << 2); 128 case 3: /* Reserved */ 129 return (1 << 8) | (4 << 2); 130 case 1: /* L1 PDE */ 131 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 132 pde = address_space_ldl(cs->as, pde_ptr, 133 MEMTXATTRS_UNSPECIFIED, &result); 134 if (result != MEMTX_OK) { 135 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */ 136 } 137 138 switch (pde & PTE_ENTRYTYPE_MASK) { 139 default: 140 case 0: /* Invalid */ 141 return (2 << 8) | (1 << 2); 142 case 3: /* Reserved */ 143 return (2 << 8) | (4 << 2); 144 case 1: /* L2 PDE */ 145 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 146 pde = address_space_ldl(cs->as, pde_ptr, 147 MEMTXATTRS_UNSPECIFIED, &result); 148 if (result != MEMTX_OK) { 149 return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */ 150 } 151 152 switch (pde & PTE_ENTRYTYPE_MASK) { 153 default: 154 case 0: /* Invalid */ 155 return (3 << 8) | (1 << 2); 156 case 1: /* PDE, should not happen */ 157 case 3: /* Reserved */ 158 return (3 << 8) | (4 << 2); 159 case 2: /* L3 PTE */ 160 page_offset = 0; 161 } 162 full->lg_page_size = TARGET_PAGE_BITS; 163 break; 164 case 2: /* L2 PTE */ 165 page_offset = address & 0x3f000; 166 full->lg_page_size = 18; 167 } 168 break; 169 case 2: /* L1 PTE */ 170 page_offset = address & 0xfff000; 171 full->lg_page_size = 24; 172 break; 173 } 174 } 175 176 /* check access */ 177 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; 178 error_code = access_table[*access_index][access_perms]; 179 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { 180 return error_code; 181 } 182 183 /* update page modified and dirty bits */ 184 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); 185 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 186 pde |= PG_ACCESSED_MASK; 187 if (is_dirty) { 188 pde |= PG_MODIFIED_MASK; 189 } 190 stl_phys_notdirty(cs->as, pde_ptr, pde); 191 } 192 193 /* the page can be put in the TLB */ 194 full->prot = perm_table[is_user][access_perms]; 195 if (!(pde & PG_MODIFIED_MASK)) { 196 /* only set write access if already dirty... otherwise wait 197 for dirty access */ 198 full->prot &= ~PAGE_WRITE; 199 } 200 201 /* Even if large ptes, we map only one 4KB page in the cache to 202 avoid filling it too fast */ 203 full->phys_addr = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; 204 return error_code; 205 } 206 207 /* Perform address translation */ 208 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 209 MMUAccessType access_type, int mmu_idx, 210 bool probe, uintptr_t retaddr) 211 { 212 CPUSPARCState *env = cpu_env(cs); 213 CPUTLBEntryFull full = {}; 214 target_ulong vaddr; 215 int error_code = 0, access_index; 216 217 /* 218 * TODO: If we ever need tlb_vaddr_to_host for this target, 219 * then we must figure out how to manipulate FSR and FAR 220 * when both MMU_NF and probe are set. In the meantime, 221 * do not support this use case. 222 */ 223 assert(!probe); 224 225 address &= TARGET_PAGE_MASK; 226 error_code = get_physical_address(env, &full, &access_index, 227 address, access_type, mmu_idx); 228 vaddr = address; 229 if (likely(error_code == 0)) { 230 qemu_log_mask(CPU_LOG_MMU, 231 "Translate at %" VADDR_PRIx " -> " 232 HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n", 233 address, full.phys_addr, vaddr); 234 tlb_set_page_full(cs, mmu_idx, vaddr, &full); 235 return true; 236 } 237 238 if (env->mmuregs[3]) { /* Fault status register */ 239 env->mmuregs[3] = 1; /* overflow (not read before another fault) */ 240 } 241 env->mmuregs[3] |= (access_index << 5) | error_code | 2; 242 env->mmuregs[4] = address; /* Fault address register */ 243 244 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { 245 /* No fault mode: if a mapping is available, just override 246 permissions. If no mapping is available, redirect accesses to 247 neverland. Fake/overridden mappings will be flushed when 248 switching to normal mode. */ 249 full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 250 tlb_set_page_full(cs, mmu_idx, vaddr, &full); 251 return true; 252 } else { 253 if (access_type == MMU_INST_FETCH) { 254 cs->exception_index = TT_TFAULT; 255 } else { 256 cs->exception_index = TT_DFAULT; 257 } 258 cpu_loop_exit_restore(cs, retaddr); 259 } 260 } 261 262 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 263 { 264 CPUState *cs = env_cpu(env); 265 hwaddr pde_ptr; 266 uint32_t pde; 267 MemTxResult result; 268 269 /* 270 * TODO: MMU probe operations are supposed to set the fault 271 * status registers, but we don't do this. 272 */ 273 274 /* Context base + context number */ 275 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + 276 (env->mmuregs[2] << 2); 277 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 278 if (result != MEMTX_OK) { 279 return 0; 280 } 281 282 switch (pde & PTE_ENTRYTYPE_MASK) { 283 default: 284 case 0: /* Invalid */ 285 case 2: /* PTE, maybe should not happen? */ 286 case 3: /* Reserved */ 287 return 0; 288 case 1: /* L1 PDE */ 289 if (mmulev == 3) { 290 return pde; 291 } 292 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 293 pde = address_space_ldl(cs->as, pde_ptr, 294 MEMTXATTRS_UNSPECIFIED, &result); 295 if (result != MEMTX_OK) { 296 return 0; 297 } 298 299 switch (pde & PTE_ENTRYTYPE_MASK) { 300 default: 301 case 0: /* Invalid */ 302 case 3: /* Reserved */ 303 return 0; 304 case 2: /* L1 PTE */ 305 return pde; 306 case 1: /* L2 PDE */ 307 if (mmulev == 2) { 308 return pde; 309 } 310 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 311 pde = address_space_ldl(cs->as, pde_ptr, 312 MEMTXATTRS_UNSPECIFIED, &result); 313 if (result != MEMTX_OK) { 314 return 0; 315 } 316 317 switch (pde & PTE_ENTRYTYPE_MASK) { 318 default: 319 case 0: /* Invalid */ 320 case 3: /* Reserved */ 321 return 0; 322 case 2: /* L2 PTE */ 323 return pde; 324 case 1: /* L3 PDE */ 325 if (mmulev == 1) { 326 return pde; 327 } 328 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 329 pde = address_space_ldl(cs->as, pde_ptr, 330 MEMTXATTRS_UNSPECIFIED, &result); 331 if (result != MEMTX_OK) { 332 return 0; 333 } 334 335 switch (pde & PTE_ENTRYTYPE_MASK) { 336 default: 337 case 0: /* Invalid */ 338 case 1: /* PDE, should not happen */ 339 case 3: /* Reserved */ 340 return 0; 341 case 2: /* L3 PTE */ 342 return pde; 343 } 344 } 345 } 346 } 347 return 0; 348 } 349 350 void dump_mmu(CPUSPARCState *env) 351 { 352 CPUState *cs = env_cpu(env); 353 target_ulong va, va1, va2; 354 unsigned int n, m, o; 355 hwaddr pa; 356 uint32_t pde; 357 358 qemu_printf("Root ptr: " HWADDR_FMT_plx ", ctx: %d\n", 359 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); 360 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { 361 pde = mmu_probe(env, va, 2); 362 if (pde) { 363 pa = cpu_get_phys_page_debug(cs, va); 364 qemu_printf("VA: " TARGET_FMT_lx ", PA: " HWADDR_FMT_plx 365 " PDE: " TARGET_FMT_lx "\n", va, pa, pde); 366 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { 367 pde = mmu_probe(env, va1, 1); 368 if (pde) { 369 pa = cpu_get_phys_page_debug(cs, va1); 370 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 371 HWADDR_FMT_plx " PDE: " TARGET_FMT_lx "\n", 372 va1, pa, pde); 373 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { 374 pde = mmu_probe(env, va2, 0); 375 if (pde) { 376 pa = cpu_get_phys_page_debug(cs, va2); 377 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 378 HWADDR_FMT_plx " PTE: " 379 TARGET_FMT_lx "\n", 380 va2, pa, pde); 381 } 382 } 383 } 384 } 385 } 386 } 387 } 388 389 /* Gdb expects all registers windows to be flushed in ram. This function handles 390 * reads (and only reads) in stack frames as if windows were flushed. We assume 391 * that the sparc ABI is followed. 392 */ 393 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, 394 uint8_t *buf, size_t len, bool is_write) 395 { 396 CPUSPARCState *env = cpu_env(cs); 397 target_ulong addr = address; 398 int i; 399 int len1; 400 int cwp = env->cwp; 401 402 if (!is_write) { 403 for (i = 0; i < env->nwindows; i++) { 404 int off; 405 target_ulong fp = env->regbase[cwp * 16 + 22]; 406 407 /* Assume fp == 0 means end of frame. */ 408 if (fp == 0) { 409 break; 410 } 411 412 cwp = cpu_cwp_inc(env, cwp + 1); 413 414 /* Invalid window ? */ 415 if (env->wim & (1 << cwp)) { 416 break; 417 } 418 419 /* According to the ABI, the stack is growing downward. */ 420 if (addr + len < fp) { 421 break; 422 } 423 424 /* Not in this frame. */ 425 if (addr > fp + 64) { 426 continue; 427 } 428 429 /* Handle access before this window. */ 430 if (addr < fp) { 431 len1 = fp - addr; 432 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { 433 return -1; 434 } 435 addr += len1; 436 len -= len1; 437 buf += len1; 438 } 439 440 /* Access byte per byte to registers. Not very efficient but speed 441 * is not critical. 442 */ 443 off = addr - fp; 444 len1 = 64 - off; 445 446 if (len1 > len) { 447 len1 = len; 448 } 449 450 for (; len1; len1--) { 451 int reg = cwp * 16 + 8 + (off >> 2); 452 union { 453 uint32_t v; 454 uint8_t c[4]; 455 } u; 456 u.v = cpu_to_be32(env->regbase[reg]); 457 *buf++ = u.c[off & 3]; 458 addr++; 459 len--; 460 off++; 461 } 462 463 if (len == 0) { 464 return 0; 465 } 466 } 467 } 468 return cpu_memory_rw_debug(cs, addr, buf, len, is_write); 469 } 470 471 #else /* !TARGET_SPARC64 */ 472 473 /* 41 bit physical address space */ 474 static inline hwaddr ultrasparc_truncate_physical(uint64_t x) 475 { 476 return x & 0x1ffffffffffULL; 477 } 478 479 /* 480 * UltraSparc IIi I/DMMUs 481 */ 482 483 /* Returns true if TTE tag is valid and matches virtual address value 484 in context requires virtual address mask value calculated from TTE 485 entry size */ 486 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, 487 uint64_t address, uint64_t context, 488 hwaddr *physical) 489 { 490 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); 491 492 /* valid, context match, virtual address match? */ 493 if (TTE_IS_VALID(tlb->tte) && 494 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) 495 && compare_masked(address, tlb->tag, mask)) { 496 /* decode physical address */ 497 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; 498 return 1; 499 } 500 501 return 0; 502 } 503 504 static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) 505 { 506 uint64_t sfsr = SFSR_VALID_BIT; 507 508 switch (mmu_idx) { 509 case MMU_PHYS_IDX: 510 sfsr |= SFSR_CT_NOTRANS; 511 break; 512 case MMU_USER_IDX: 513 case MMU_KERNEL_IDX: 514 sfsr |= SFSR_CT_PRIMARY; 515 break; 516 case MMU_USER_SECONDARY_IDX: 517 case MMU_KERNEL_SECONDARY_IDX: 518 sfsr |= SFSR_CT_SECONDARY; 519 break; 520 case MMU_NUCLEUS_IDX: 521 sfsr |= SFSR_CT_NUCLEUS; 522 break; 523 default: 524 g_assert_not_reached(); 525 } 526 527 if (rw == 1) { 528 sfsr |= SFSR_WRITE_BIT; 529 } else if (rw == 4) { 530 sfsr |= SFSR_NF_BIT; 531 } 532 533 if (env->pstate & PS_PRIV) { 534 sfsr |= SFSR_PR_BIT; 535 } 536 537 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ 538 sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */ 539 } 540 541 /* FIXME: ASI field in SFSR must be set */ 542 543 return sfsr; 544 } 545 546 static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full, 547 target_ulong address, int rw, int mmu_idx) 548 { 549 CPUState *cs = env_cpu(env); 550 unsigned int i; 551 uint64_t sfsr; 552 uint64_t context; 553 bool is_user = false; 554 555 sfsr = build_sfsr(env, mmu_idx, rw); 556 557 switch (mmu_idx) { 558 case MMU_PHYS_IDX: 559 g_assert_not_reached(); 560 case MMU_USER_IDX: 561 is_user = true; 562 /* fallthru */ 563 case MMU_KERNEL_IDX: 564 context = env->dmmu.mmu_primary_context & 0x1fff; 565 break; 566 case MMU_USER_SECONDARY_IDX: 567 is_user = true; 568 /* fallthru */ 569 case MMU_KERNEL_SECONDARY_IDX: 570 context = env->dmmu.mmu_secondary_context & 0x1fff; 571 break; 572 default: 573 context = 0; 574 break; 575 } 576 577 for (i = 0; i < 64; i++) { 578 /* ctx match, vaddr match, valid? */ 579 if (ultrasparc_tag_match(&env->dtlb[i], address, context, 580 &full->phys_addr)) { 581 int do_fault = 0; 582 583 if (TTE_IS_IE(env->dtlb[i].tte)) { 584 full->tlb_fill_flags |= TLB_BSWAP; 585 } 586 587 /* access ok? */ 588 /* multiple bits in SFSR.FT may be set on TT_DFAULT */ 589 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { 590 do_fault = 1; 591 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ 592 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); 593 } 594 if (rw == 4) { 595 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { 596 do_fault = 1; 597 sfsr |= SFSR_FT_NF_E_BIT; 598 } 599 } else { 600 if (TTE_IS_NFO(env->dtlb[i].tte)) { 601 do_fault = 1; 602 sfsr |= SFSR_FT_NFO_BIT; 603 } 604 } 605 606 if (do_fault) { 607 /* faults above are reported with TT_DFAULT. */ 608 cs->exception_index = TT_DFAULT; 609 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { 610 do_fault = 1; 611 cs->exception_index = TT_DPROT; 612 613 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); 614 } 615 616 if (!do_fault) { 617 full->prot = PAGE_READ; 618 if (TTE_IS_W_OK(env->dtlb[i].tte)) { 619 full->prot |= PAGE_WRITE; 620 } 621 622 TTE_SET_USED(env->dtlb[i].tte); 623 624 return 0; 625 } 626 627 env->dmmu.sfsr = sfsr; 628 env->dmmu.sfar = address; /* Fault address register */ 629 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 630 return 1; 631 } 632 } 633 634 trace_mmu_helper_dmiss(address, context); 635 636 /* 637 * On MMU misses: 638 * - UltraSPARC IIi: SFSR and SFAR unmodified 639 * - JPS1: SFAR updated and some fields of SFSR updated 640 */ 641 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 642 cs->exception_index = TT_DMISS; 643 return 1; 644 } 645 646 static int get_physical_address_code(CPUSPARCState *env, CPUTLBEntryFull *full, 647 target_ulong address, int mmu_idx) 648 { 649 CPUState *cs = env_cpu(env); 650 unsigned int i; 651 uint64_t context; 652 bool is_user = false; 653 654 switch (mmu_idx) { 655 case MMU_PHYS_IDX: 656 case MMU_USER_SECONDARY_IDX: 657 case MMU_KERNEL_SECONDARY_IDX: 658 g_assert_not_reached(); 659 case MMU_USER_IDX: 660 is_user = true; 661 /* fallthru */ 662 case MMU_KERNEL_IDX: 663 context = env->dmmu.mmu_primary_context & 0x1fff; 664 break; 665 default: 666 context = 0; 667 break; 668 } 669 670 if (env->tl == 0) { 671 /* PRIMARY context */ 672 context = env->dmmu.mmu_primary_context & 0x1fff; 673 } else { 674 /* NUCLEUS context */ 675 context = 0; 676 } 677 678 for (i = 0; i < 64; i++) { 679 /* ctx match, vaddr match, valid? */ 680 if (ultrasparc_tag_match(&env->itlb[i], 681 address, context, &full->phys_addr)) { 682 /* access ok? */ 683 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { 684 /* Fault status register */ 685 if (env->immu.sfsr & SFSR_VALID_BIT) { 686 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before 687 another fault) */ 688 } else { 689 env->immu.sfsr = 0; 690 } 691 if (env->pstate & PS_PRIV) { 692 env->immu.sfsr |= SFSR_PR_BIT; 693 } 694 if (env->tl > 0) { 695 env->immu.sfsr |= SFSR_CT_NUCLEUS; 696 } 697 698 /* FIXME: ASI field in SFSR must be set */ 699 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; 700 cs->exception_index = TT_TFAULT; 701 702 env->immu.tag_access = (address & ~0x1fffULL) | context; 703 704 trace_mmu_helper_tfault(address, context); 705 706 return 1; 707 } 708 full->prot = PAGE_EXEC; 709 TTE_SET_USED(env->itlb[i].tte); 710 return 0; 711 } 712 } 713 714 trace_mmu_helper_tmiss(address, context); 715 716 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ 717 env->immu.tag_access = (address & ~0x1fffULL) | context; 718 cs->exception_index = TT_TMISS; 719 return 1; 720 } 721 722 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, 723 int *access_index, target_ulong address, 724 int rw, int mmu_idx) 725 { 726 /* ??? We treat everything as a small page, then explicitly flush 727 everything when an entry is evicted. */ 728 full->lg_page_size = TARGET_PAGE_BITS; 729 730 /* safety net to catch wrong softmmu index use from dynamic code */ 731 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { 732 if (rw == 2) { 733 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, 734 env->dmmu.mmu_primary_context, 735 env->dmmu.mmu_secondary_context, 736 address); 737 } else { 738 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, 739 env->dmmu.mmu_primary_context, 740 env->dmmu.mmu_secondary_context, 741 address); 742 } 743 } 744 745 if (mmu_idx == MMU_PHYS_IDX) { 746 full->phys_addr = ultrasparc_truncate_physical(address); 747 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 748 return 0; 749 } 750 751 if (rw == 2) { 752 return get_physical_address_code(env, full, address, mmu_idx); 753 } else { 754 return get_physical_address_data(env, full, address, rw, mmu_idx); 755 } 756 } 757 758 /* Perform address translation */ 759 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 760 MMUAccessType access_type, int mmu_idx, 761 bool probe, uintptr_t retaddr) 762 { 763 CPUSPARCState *env = cpu_env(cs); 764 CPUTLBEntryFull full = {}; 765 int error_code = 0, access_index; 766 767 address &= TARGET_PAGE_MASK; 768 error_code = get_physical_address(env, &full, &access_index, 769 address, access_type, mmu_idx); 770 if (likely(error_code == 0)) { 771 trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl, 772 env->dmmu.mmu_primary_context, 773 env->dmmu.mmu_secondary_context); 774 tlb_set_page_full(cs, mmu_idx, address, &full); 775 return true; 776 } 777 if (probe) { 778 return false; 779 } 780 cpu_loop_exit_restore(cs, retaddr); 781 } 782 783 void dump_mmu(CPUSPARCState *env) 784 { 785 unsigned int i; 786 const char *mask; 787 788 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %" 789 PRId64 "\n", 790 env->dmmu.mmu_primary_context, 791 env->dmmu.mmu_secondary_context); 792 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64 793 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target); 794 if ((env->lsu & DMMU_E) == 0) { 795 qemu_printf("DMMU disabled\n"); 796 } else { 797 qemu_printf("DMMU dump\n"); 798 for (i = 0; i < 64; i++) { 799 switch (TTE_PGSIZE(env->dtlb[i].tte)) { 800 default: 801 case 0x0: 802 mask = " 8k"; 803 break; 804 case 0x1: 805 mask = " 64k"; 806 break; 807 case 0x2: 808 mask = "512k"; 809 break; 810 case 0x3: 811 mask = " 4M"; 812 break; 813 } 814 if (TTE_IS_VALID(env->dtlb[i].tte)) { 815 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 816 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n", 817 i, 818 env->dtlb[i].tag & (uint64_t)~0x1fffULL, 819 TTE_PA(env->dtlb[i].tte), 820 mask, 821 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", 822 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", 823 TTE_IS_LOCKED(env->dtlb[i].tte) ? 824 "locked" : "unlocked", 825 TTE_IS_IE(env->dtlb[i].tte) ? 826 "yes" : "no", 827 env->dtlb[i].tag & (uint64_t)0x1fffULL, 828 TTE_IS_GLOBAL(env->dtlb[i].tte) ? 829 "global" : "local"); 830 } 831 } 832 } 833 if ((env->lsu & IMMU_E) == 0) { 834 qemu_printf("IMMU disabled\n"); 835 } else { 836 qemu_printf("IMMU dump\n"); 837 for (i = 0; i < 64; i++) { 838 switch (TTE_PGSIZE(env->itlb[i].tte)) { 839 default: 840 case 0x0: 841 mask = " 8k"; 842 break; 843 case 0x1: 844 mask = " 64k"; 845 break; 846 case 0x2: 847 mask = "512k"; 848 break; 849 case 0x3: 850 mask = " 4M"; 851 break; 852 } 853 if (TTE_IS_VALID(env->itlb[i].tte)) { 854 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 855 ", %s, %s, %s, ctx %" PRId64 " %s\n", 856 i, 857 env->itlb[i].tag & (uint64_t)~0x1fffULL, 858 TTE_PA(env->itlb[i].tte), 859 mask, 860 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", 861 TTE_IS_LOCKED(env->itlb[i].tte) ? 862 "locked" : "unlocked", 863 env->itlb[i].tag & (uint64_t)0x1fffULL, 864 TTE_IS_GLOBAL(env->itlb[i].tte) ? 865 "global" : "local"); 866 } 867 } 868 } 869 } 870 871 #endif /* TARGET_SPARC64 */ 872 873 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, 874 target_ulong addr, int rw, int mmu_idx) 875 { 876 CPUTLBEntryFull full = {}; 877 int access_index, ret; 878 879 ret = get_physical_address(env, &full, &access_index, addr, rw, mmu_idx); 880 if (ret == 0) { 881 *phys = full.phys_addr; 882 } 883 return ret; 884 } 885 886 #if defined(TARGET_SPARC64) 887 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, 888 int mmu_idx) 889 { 890 hwaddr phys_addr; 891 892 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { 893 return -1; 894 } 895 return phys_addr; 896 } 897 #endif 898 899 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 900 { 901 CPUSPARCState *env = cpu_env(cs); 902 hwaddr phys_addr; 903 int mmu_idx = cpu_mmu_index(cs, false); 904 905 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { 906 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { 907 return -1; 908 } 909 } 910 return phys_addr; 911 } 912 913 G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 914 MMUAccessType access_type, 915 int mmu_idx, 916 uintptr_t retaddr) 917 { 918 CPUSPARCState *env = cpu_env(cs); 919 920 #ifdef TARGET_SPARC64 921 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type); 922 env->dmmu.sfar = addr; 923 #else 924 env->mmuregs[4] = addr; 925 #endif 926 927 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); 928 } 929