1 /* 2 * Sparc MMU helpers 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/cputlb.h" 24 #include "accel/tcg/cpu-mmu-index.h" 25 #include "exec/page-protection.h" 26 #include "qemu/qemu-print.h" 27 #include "trace.h" 28 29 /* Sparc MMU emulation */ 30 31 #ifndef TARGET_SPARC64 32 /* 33 * Sparc V8 Reference MMU (SRMMU) 34 */ 35 static const int access_table[8][8] = { 36 { 0, 0, 0, 0, 8, 0, 12, 12 }, 37 { 0, 0, 0, 0, 8, 0, 0, 0 }, 38 { 8, 8, 0, 0, 0, 8, 12, 12 }, 39 { 8, 8, 0, 0, 0, 8, 0, 0 }, 40 { 8, 0, 8, 0, 8, 8, 12, 12 }, 41 { 8, 0, 8, 0, 8, 0, 8, 0 }, 42 { 8, 8, 8, 0, 8, 8, 12, 12 }, 43 { 8, 8, 8, 0, 8, 8, 8, 0 } 44 }; 45 46 static const int perm_table[2][8] = { 47 { 48 PAGE_READ, 49 PAGE_READ | PAGE_WRITE, 50 PAGE_READ | PAGE_EXEC, 51 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 52 PAGE_EXEC, 53 PAGE_READ | PAGE_WRITE, 54 PAGE_READ | PAGE_EXEC, 55 PAGE_READ | PAGE_WRITE | PAGE_EXEC 56 }, 57 { 58 PAGE_READ, 59 PAGE_READ | PAGE_WRITE, 60 PAGE_READ | PAGE_EXEC, 61 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 62 PAGE_EXEC, 63 PAGE_READ, 64 0, 65 0, 66 } 67 }; 68 69 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, 70 int *access_index, target_ulong address, 71 int rw, int mmu_idx) 72 { 73 int access_perms = 0; 74 hwaddr pde_ptr; 75 uint32_t pde; 76 int error_code = 0, is_dirty, is_user; 77 unsigned long page_offset; 78 CPUState *cs = env_cpu(env); 79 MemTxResult result; 80 81 is_user = mmu_idx == MMU_USER_IDX; 82 83 if (mmu_idx == MMU_PHYS_IDX) { 84 full->lg_page_size = TARGET_PAGE_BITS; 85 /* Boot mode: instruction fetches are taken from PROM */ 86 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { 87 full->phys_addr = env->prom_addr | (address & 0x7ffffULL); 88 full->prot = PAGE_READ | PAGE_EXEC; 89 return 0; 90 } 91 full->phys_addr = address; 92 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 93 return 0; 94 } 95 96 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); 97 full->phys_addr = 0xffffffffffff0000ULL; 98 99 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ 100 /* Context base + context number */ 101 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 102 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 103 if (result != MEMTX_OK) { 104 return 4 << 2; /* Translation fault, L = 0 */ 105 } 106 107 /* Ctx pde */ 108 switch (pde & PTE_ENTRYTYPE_MASK) { 109 default: 110 case 0: /* Invalid */ 111 return 1 << 2; 112 case 2: /* L0 PTE, maybe should not happen? */ 113 case 3: /* Reserved */ 114 return 4 << 2; 115 case 1: /* L0 PDE */ 116 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 117 pde = address_space_ldl(cs->as, pde_ptr, 118 MEMTXATTRS_UNSPECIFIED, &result); 119 if (result != MEMTX_OK) { 120 return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */ 121 } 122 123 switch (pde & PTE_ENTRYTYPE_MASK) { 124 default: 125 case 0: /* Invalid */ 126 return (1 << 8) | (1 << 2); 127 case 3: /* Reserved */ 128 return (1 << 8) | (4 << 2); 129 case 1: /* L1 PDE */ 130 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 131 pde = address_space_ldl(cs->as, pde_ptr, 132 MEMTXATTRS_UNSPECIFIED, &result); 133 if (result != MEMTX_OK) { 134 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */ 135 } 136 137 switch (pde & PTE_ENTRYTYPE_MASK) { 138 default: 139 case 0: /* Invalid */ 140 return (2 << 8) | (1 << 2); 141 case 3: /* Reserved */ 142 return (2 << 8) | (4 << 2); 143 case 1: /* L2 PDE */ 144 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 145 pde = address_space_ldl(cs->as, pde_ptr, 146 MEMTXATTRS_UNSPECIFIED, &result); 147 if (result != MEMTX_OK) { 148 return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */ 149 } 150 151 switch (pde & PTE_ENTRYTYPE_MASK) { 152 default: 153 case 0: /* Invalid */ 154 return (3 << 8) | (1 << 2); 155 case 1: /* PDE, should not happen */ 156 case 3: /* Reserved */ 157 return (3 << 8) | (4 << 2); 158 case 2: /* L3 PTE */ 159 page_offset = 0; 160 } 161 full->lg_page_size = TARGET_PAGE_BITS; 162 break; 163 case 2: /* L2 PTE */ 164 page_offset = address & 0x3f000; 165 full->lg_page_size = 18; 166 } 167 break; 168 case 2: /* L1 PTE */ 169 page_offset = address & 0xfff000; 170 full->lg_page_size = 24; 171 break; 172 } 173 } 174 175 /* check access */ 176 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; 177 error_code = access_table[*access_index][access_perms]; 178 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { 179 return error_code; 180 } 181 182 /* update page modified and dirty bits */ 183 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); 184 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 185 pde |= PG_ACCESSED_MASK; 186 if (is_dirty) { 187 pde |= PG_MODIFIED_MASK; 188 } 189 stl_phys_notdirty(cs->as, pde_ptr, pde); 190 } 191 192 /* the page can be put in the TLB */ 193 full->prot = perm_table[is_user][access_perms]; 194 if (!(pde & PG_MODIFIED_MASK)) { 195 /* only set write access if already dirty... otherwise wait 196 for dirty access */ 197 full->prot &= ~PAGE_WRITE; 198 } 199 200 /* Even if large ptes, we map only one 4KB page in the cache to 201 avoid filling it too fast */ 202 full->phys_addr = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; 203 return error_code; 204 } 205 206 /* Perform address translation */ 207 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 208 MMUAccessType access_type, int mmu_idx, 209 bool probe, uintptr_t retaddr) 210 { 211 CPUSPARCState *env = cpu_env(cs); 212 CPUTLBEntryFull full = {}; 213 target_ulong vaddr; 214 int error_code = 0, access_index; 215 216 /* 217 * TODO: If we ever need tlb_vaddr_to_host for this target, 218 * then we must figure out how to manipulate FSR and FAR 219 * when both MMU_NF and probe are set. In the meantime, 220 * do not support this use case. 221 */ 222 assert(!probe); 223 224 address &= TARGET_PAGE_MASK; 225 error_code = get_physical_address(env, &full, &access_index, 226 address, access_type, mmu_idx); 227 vaddr = address; 228 if (likely(error_code == 0)) { 229 qemu_log_mask(CPU_LOG_MMU, 230 "Translate at %" VADDR_PRIx " -> " 231 HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n", 232 address, full.phys_addr, vaddr); 233 tlb_set_page_full(cs, mmu_idx, vaddr, &full); 234 return true; 235 } 236 237 if (env->mmuregs[3]) { /* Fault status register */ 238 env->mmuregs[3] = 1; /* overflow (not read before another fault) */ 239 } 240 env->mmuregs[3] |= (access_index << 5) | error_code | 2; 241 env->mmuregs[4] = address; /* Fault address register */ 242 243 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { 244 /* No fault mode: if a mapping is available, just override 245 permissions. If no mapping is available, redirect accesses to 246 neverland. Fake/overridden mappings will be flushed when 247 switching to normal mode. */ 248 full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 249 tlb_set_page_full(cs, mmu_idx, vaddr, &full); 250 return true; 251 } else { 252 if (access_type == MMU_INST_FETCH) { 253 cs->exception_index = TT_TFAULT; 254 } else { 255 cs->exception_index = TT_DFAULT; 256 } 257 cpu_loop_exit_restore(cs, retaddr); 258 } 259 } 260 261 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 262 { 263 CPUState *cs = env_cpu(env); 264 hwaddr pde_ptr; 265 uint32_t pde; 266 MemTxResult result; 267 268 /* 269 * TODO: MMU probe operations are supposed to set the fault 270 * status registers, but we don't do this. 271 */ 272 273 /* Context base + context number */ 274 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + 275 (env->mmuregs[2] << 2); 276 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 277 if (result != MEMTX_OK) { 278 return 0; 279 } 280 281 switch (pde & PTE_ENTRYTYPE_MASK) { 282 default: 283 case 0: /* Invalid */ 284 case 2: /* PTE, maybe should not happen? */ 285 case 3: /* Reserved */ 286 return 0; 287 case 1: /* L1 PDE */ 288 if (mmulev == 3) { 289 return pde; 290 } 291 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 292 pde = address_space_ldl(cs->as, pde_ptr, 293 MEMTXATTRS_UNSPECIFIED, &result); 294 if (result != MEMTX_OK) { 295 return 0; 296 } 297 298 switch (pde & PTE_ENTRYTYPE_MASK) { 299 default: 300 case 0: /* Invalid */ 301 case 3: /* Reserved */ 302 return 0; 303 case 2: /* L1 PTE */ 304 return pde; 305 case 1: /* L2 PDE */ 306 if (mmulev == 2) { 307 return pde; 308 } 309 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 310 pde = address_space_ldl(cs->as, pde_ptr, 311 MEMTXATTRS_UNSPECIFIED, &result); 312 if (result != MEMTX_OK) { 313 return 0; 314 } 315 316 switch (pde & PTE_ENTRYTYPE_MASK) { 317 default: 318 case 0: /* Invalid */ 319 case 3: /* Reserved */ 320 return 0; 321 case 2: /* L2 PTE */ 322 return pde; 323 case 1: /* L3 PDE */ 324 if (mmulev == 1) { 325 return pde; 326 } 327 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 328 pde = address_space_ldl(cs->as, pde_ptr, 329 MEMTXATTRS_UNSPECIFIED, &result); 330 if (result != MEMTX_OK) { 331 return 0; 332 } 333 334 switch (pde & PTE_ENTRYTYPE_MASK) { 335 default: 336 case 0: /* Invalid */ 337 case 1: /* PDE, should not happen */ 338 case 3: /* Reserved */ 339 return 0; 340 case 2: /* L3 PTE */ 341 return pde; 342 } 343 } 344 } 345 } 346 return 0; 347 } 348 349 void dump_mmu(CPUSPARCState *env) 350 { 351 CPUState *cs = env_cpu(env); 352 target_ulong va, va1, va2; 353 unsigned int n, m, o; 354 hwaddr pa; 355 uint32_t pde; 356 357 qemu_printf("Root ptr: " HWADDR_FMT_plx ", ctx: %d\n", 358 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); 359 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { 360 pde = mmu_probe(env, va, 2); 361 if (pde) { 362 pa = cpu_get_phys_page_debug(cs, va); 363 qemu_printf("VA: " TARGET_FMT_lx ", PA: " HWADDR_FMT_plx 364 " PDE: " TARGET_FMT_lx "\n", va, pa, pde); 365 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { 366 pde = mmu_probe(env, va1, 1); 367 if (pde) { 368 pa = cpu_get_phys_page_debug(cs, va1); 369 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 370 HWADDR_FMT_plx " PDE: " TARGET_FMT_lx "\n", 371 va1, pa, pde); 372 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { 373 pde = mmu_probe(env, va2, 0); 374 if (pde) { 375 pa = cpu_get_phys_page_debug(cs, va2); 376 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 377 HWADDR_FMT_plx " PTE: " 378 TARGET_FMT_lx "\n", 379 va2, pa, pde); 380 } 381 } 382 } 383 } 384 } 385 } 386 } 387 388 /* Gdb expects all registers windows to be flushed in ram. This function handles 389 * reads (and only reads) in stack frames as if windows were flushed. We assume 390 * that the sparc ABI is followed. 391 */ 392 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, 393 uint8_t *buf, size_t len, bool is_write) 394 { 395 CPUSPARCState *env = cpu_env(cs); 396 target_ulong addr = address; 397 int i; 398 int len1; 399 int cwp = env->cwp; 400 401 if (!is_write) { 402 for (i = 0; i < env->nwindows; i++) { 403 int off; 404 target_ulong fp = env->regbase[cwp * 16 + 22]; 405 406 /* Assume fp == 0 means end of frame. */ 407 if (fp == 0) { 408 break; 409 } 410 411 cwp = cpu_cwp_inc(env, cwp + 1); 412 413 /* Invalid window ? */ 414 if (env->wim & (1 << cwp)) { 415 break; 416 } 417 418 /* According to the ABI, the stack is growing downward. */ 419 if (addr + len < fp) { 420 break; 421 } 422 423 /* Not in this frame. */ 424 if (addr > fp + 64) { 425 continue; 426 } 427 428 /* Handle access before this window. */ 429 if (addr < fp) { 430 len1 = fp - addr; 431 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { 432 return -1; 433 } 434 addr += len1; 435 len -= len1; 436 buf += len1; 437 } 438 439 /* Access byte per byte to registers. Not very efficient but speed 440 * is not critical. 441 */ 442 off = addr - fp; 443 len1 = 64 - off; 444 445 if (len1 > len) { 446 len1 = len; 447 } 448 449 for (; len1; len1--) { 450 int reg = cwp * 16 + 8 + (off >> 2); 451 union { 452 uint32_t v; 453 uint8_t c[4]; 454 } u; 455 u.v = cpu_to_be32(env->regbase[reg]); 456 *buf++ = u.c[off & 3]; 457 addr++; 458 len--; 459 off++; 460 } 461 462 if (len == 0) { 463 return 0; 464 } 465 } 466 } 467 return cpu_memory_rw_debug(cs, addr, buf, len, is_write); 468 } 469 470 #else /* !TARGET_SPARC64 */ 471 472 /* 41 bit physical address space */ 473 static inline hwaddr ultrasparc_truncate_physical(uint64_t x) 474 { 475 return x & 0x1ffffffffffULL; 476 } 477 478 /* 479 * UltraSparc IIi I/DMMUs 480 */ 481 482 /* Returns true if TTE tag is valid and matches virtual address value 483 in context requires virtual address mask value calculated from TTE 484 entry size */ 485 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, 486 uint64_t address, uint64_t context, 487 hwaddr *physical) 488 { 489 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); 490 491 /* valid, context match, virtual address match? */ 492 if (TTE_IS_VALID(tlb->tte) && 493 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) 494 && compare_masked(address, tlb->tag, mask)) { 495 /* decode physical address */ 496 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; 497 return 1; 498 } 499 500 return 0; 501 } 502 503 static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) 504 { 505 uint64_t sfsr = SFSR_VALID_BIT; 506 507 switch (mmu_idx) { 508 case MMU_PHYS_IDX: 509 sfsr |= SFSR_CT_NOTRANS; 510 break; 511 case MMU_USER_IDX: 512 case MMU_KERNEL_IDX: 513 sfsr |= SFSR_CT_PRIMARY; 514 break; 515 case MMU_USER_SECONDARY_IDX: 516 case MMU_KERNEL_SECONDARY_IDX: 517 sfsr |= SFSR_CT_SECONDARY; 518 break; 519 case MMU_NUCLEUS_IDX: 520 sfsr |= SFSR_CT_NUCLEUS; 521 break; 522 default: 523 g_assert_not_reached(); 524 } 525 526 if (rw == 1) { 527 sfsr |= SFSR_WRITE_BIT; 528 } else if (rw == 4) { 529 sfsr |= SFSR_NF_BIT; 530 } 531 532 if (env->pstate & PS_PRIV) { 533 sfsr |= SFSR_PR_BIT; 534 } 535 536 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ 537 sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */ 538 } 539 540 /* FIXME: ASI field in SFSR must be set */ 541 542 return sfsr; 543 } 544 545 static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full, 546 target_ulong address, int rw, int mmu_idx) 547 { 548 CPUState *cs = env_cpu(env); 549 unsigned int i; 550 uint64_t sfsr; 551 uint64_t context; 552 bool is_user = false; 553 554 sfsr = build_sfsr(env, mmu_idx, rw); 555 556 switch (mmu_idx) { 557 case MMU_PHYS_IDX: 558 g_assert_not_reached(); 559 case MMU_USER_IDX: 560 is_user = true; 561 /* fallthru */ 562 case MMU_KERNEL_IDX: 563 context = env->dmmu.mmu_primary_context & 0x1fff; 564 break; 565 case MMU_USER_SECONDARY_IDX: 566 is_user = true; 567 /* fallthru */ 568 case MMU_KERNEL_SECONDARY_IDX: 569 context = env->dmmu.mmu_secondary_context & 0x1fff; 570 break; 571 default: 572 context = 0; 573 break; 574 } 575 576 for (i = 0; i < 64; i++) { 577 /* ctx match, vaddr match, valid? */ 578 if (ultrasparc_tag_match(&env->dtlb[i], address, context, 579 &full->phys_addr)) { 580 int do_fault = 0; 581 582 if (TTE_IS_IE(env->dtlb[i].tte)) { 583 full->tlb_fill_flags |= TLB_BSWAP; 584 } 585 586 /* access ok? */ 587 /* multiple bits in SFSR.FT may be set on TT_DFAULT */ 588 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { 589 do_fault = 1; 590 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ 591 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); 592 } 593 if (rw == 4) { 594 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { 595 do_fault = 1; 596 sfsr |= SFSR_FT_NF_E_BIT; 597 } 598 } else { 599 if (TTE_IS_NFO(env->dtlb[i].tte)) { 600 do_fault = 1; 601 sfsr |= SFSR_FT_NFO_BIT; 602 } 603 } 604 605 if (do_fault) { 606 /* faults above are reported with TT_DFAULT. */ 607 cs->exception_index = TT_DFAULT; 608 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { 609 do_fault = 1; 610 cs->exception_index = TT_DPROT; 611 612 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); 613 } 614 615 if (!do_fault) { 616 full->prot = PAGE_READ; 617 if (TTE_IS_W_OK(env->dtlb[i].tte)) { 618 full->prot |= PAGE_WRITE; 619 } 620 621 TTE_SET_USED(env->dtlb[i].tte); 622 623 return 0; 624 } 625 626 env->dmmu.sfsr = sfsr; 627 env->dmmu.sfar = address; /* Fault address register */ 628 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 629 return 1; 630 } 631 } 632 633 trace_mmu_helper_dmiss(address, context); 634 635 /* 636 * On MMU misses: 637 * - UltraSPARC IIi: SFSR and SFAR unmodified 638 * - JPS1: SFAR updated and some fields of SFSR updated 639 */ 640 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 641 cs->exception_index = TT_DMISS; 642 return 1; 643 } 644 645 static int get_physical_address_code(CPUSPARCState *env, CPUTLBEntryFull *full, 646 target_ulong address, int mmu_idx) 647 { 648 CPUState *cs = env_cpu(env); 649 unsigned int i; 650 uint64_t context; 651 bool is_user = false; 652 653 switch (mmu_idx) { 654 case MMU_PHYS_IDX: 655 case MMU_USER_SECONDARY_IDX: 656 case MMU_KERNEL_SECONDARY_IDX: 657 g_assert_not_reached(); 658 case MMU_USER_IDX: 659 is_user = true; 660 /* fallthru */ 661 case MMU_KERNEL_IDX: 662 context = env->dmmu.mmu_primary_context & 0x1fff; 663 break; 664 default: 665 context = 0; 666 break; 667 } 668 669 if (env->tl == 0) { 670 /* PRIMARY context */ 671 context = env->dmmu.mmu_primary_context & 0x1fff; 672 } else { 673 /* NUCLEUS context */ 674 context = 0; 675 } 676 677 for (i = 0; i < 64; i++) { 678 /* ctx match, vaddr match, valid? */ 679 if (ultrasparc_tag_match(&env->itlb[i], 680 address, context, &full->phys_addr)) { 681 /* access ok? */ 682 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { 683 /* Fault status register */ 684 if (env->immu.sfsr & SFSR_VALID_BIT) { 685 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before 686 another fault) */ 687 } else { 688 env->immu.sfsr = 0; 689 } 690 if (env->pstate & PS_PRIV) { 691 env->immu.sfsr |= SFSR_PR_BIT; 692 } 693 if (env->tl > 0) { 694 env->immu.sfsr |= SFSR_CT_NUCLEUS; 695 } 696 697 /* FIXME: ASI field in SFSR must be set */ 698 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; 699 cs->exception_index = TT_TFAULT; 700 701 env->immu.tag_access = (address & ~0x1fffULL) | context; 702 703 trace_mmu_helper_tfault(address, context); 704 705 return 1; 706 } 707 full->prot = PAGE_EXEC; 708 TTE_SET_USED(env->itlb[i].tte); 709 return 0; 710 } 711 } 712 713 trace_mmu_helper_tmiss(address, context); 714 715 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ 716 env->immu.tag_access = (address & ~0x1fffULL) | context; 717 cs->exception_index = TT_TMISS; 718 return 1; 719 } 720 721 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, 722 int *access_index, target_ulong address, 723 int rw, int mmu_idx) 724 { 725 /* ??? We treat everything as a small page, then explicitly flush 726 everything when an entry is evicted. */ 727 full->lg_page_size = TARGET_PAGE_BITS; 728 729 /* safety net to catch wrong softmmu index use from dynamic code */ 730 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { 731 if (rw == 2) { 732 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, 733 env->dmmu.mmu_primary_context, 734 env->dmmu.mmu_secondary_context, 735 address); 736 } else { 737 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, 738 env->dmmu.mmu_primary_context, 739 env->dmmu.mmu_secondary_context, 740 address); 741 } 742 } 743 744 if (mmu_idx == MMU_PHYS_IDX) { 745 full->phys_addr = ultrasparc_truncate_physical(address); 746 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 747 return 0; 748 } 749 750 if (rw == 2) { 751 return get_physical_address_code(env, full, address, mmu_idx); 752 } else { 753 return get_physical_address_data(env, full, address, rw, mmu_idx); 754 } 755 } 756 757 /* Perform address translation */ 758 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 759 MMUAccessType access_type, int mmu_idx, 760 bool probe, uintptr_t retaddr) 761 { 762 CPUSPARCState *env = cpu_env(cs); 763 CPUTLBEntryFull full = {}; 764 int error_code = 0, access_index; 765 766 address &= TARGET_PAGE_MASK; 767 error_code = get_physical_address(env, &full, &access_index, 768 address, access_type, mmu_idx); 769 if (likely(error_code == 0)) { 770 trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl, 771 env->dmmu.mmu_primary_context, 772 env->dmmu.mmu_secondary_context); 773 tlb_set_page_full(cs, mmu_idx, address, &full); 774 return true; 775 } 776 if (probe) { 777 return false; 778 } 779 cpu_loop_exit_restore(cs, retaddr); 780 } 781 782 void dump_mmu(CPUSPARCState *env) 783 { 784 unsigned int i; 785 const char *mask; 786 787 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %" 788 PRId64 "\n", 789 env->dmmu.mmu_primary_context, 790 env->dmmu.mmu_secondary_context); 791 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64 792 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target); 793 if ((env->lsu & DMMU_E) == 0) { 794 qemu_printf("DMMU disabled\n"); 795 } else { 796 qemu_printf("DMMU dump\n"); 797 for (i = 0; i < 64; i++) { 798 switch (TTE_PGSIZE(env->dtlb[i].tte)) { 799 default: 800 case 0x0: 801 mask = " 8k"; 802 break; 803 case 0x1: 804 mask = " 64k"; 805 break; 806 case 0x2: 807 mask = "512k"; 808 break; 809 case 0x3: 810 mask = " 4M"; 811 break; 812 } 813 if (TTE_IS_VALID(env->dtlb[i].tte)) { 814 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 815 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n", 816 i, 817 env->dtlb[i].tag & (uint64_t)~0x1fffULL, 818 TTE_PA(env->dtlb[i].tte), 819 mask, 820 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", 821 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", 822 TTE_IS_LOCKED(env->dtlb[i].tte) ? 823 "locked" : "unlocked", 824 TTE_IS_IE(env->dtlb[i].tte) ? 825 "yes" : "no", 826 env->dtlb[i].tag & (uint64_t)0x1fffULL, 827 TTE_IS_GLOBAL(env->dtlb[i].tte) ? 828 "global" : "local"); 829 } 830 } 831 } 832 if ((env->lsu & IMMU_E) == 0) { 833 qemu_printf("IMMU disabled\n"); 834 } else { 835 qemu_printf("IMMU dump\n"); 836 for (i = 0; i < 64; i++) { 837 switch (TTE_PGSIZE(env->itlb[i].tte)) { 838 default: 839 case 0x0: 840 mask = " 8k"; 841 break; 842 case 0x1: 843 mask = " 64k"; 844 break; 845 case 0x2: 846 mask = "512k"; 847 break; 848 case 0x3: 849 mask = " 4M"; 850 break; 851 } 852 if (TTE_IS_VALID(env->itlb[i].tte)) { 853 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 854 ", %s, %s, %s, ctx %" PRId64 " %s\n", 855 i, 856 env->itlb[i].tag & (uint64_t)~0x1fffULL, 857 TTE_PA(env->itlb[i].tte), 858 mask, 859 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", 860 TTE_IS_LOCKED(env->itlb[i].tte) ? 861 "locked" : "unlocked", 862 env->itlb[i].tag & (uint64_t)0x1fffULL, 863 TTE_IS_GLOBAL(env->itlb[i].tte) ? 864 "global" : "local"); 865 } 866 } 867 } 868 } 869 870 #endif /* TARGET_SPARC64 */ 871 872 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, 873 target_ulong addr, int rw, int mmu_idx) 874 { 875 CPUTLBEntryFull full = {}; 876 int access_index, ret; 877 878 ret = get_physical_address(env, &full, &access_index, addr, rw, mmu_idx); 879 if (ret == 0) { 880 *phys = full.phys_addr; 881 } 882 return ret; 883 } 884 885 #if defined(TARGET_SPARC64) 886 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, 887 int mmu_idx) 888 { 889 hwaddr phys_addr; 890 891 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { 892 return -1; 893 } 894 return phys_addr; 895 } 896 #endif 897 898 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 899 { 900 CPUSPARCState *env = cpu_env(cs); 901 hwaddr phys_addr; 902 int mmu_idx = cpu_mmu_index(cs, false); 903 904 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { 905 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { 906 return -1; 907 } 908 } 909 return phys_addr; 910 } 911 912 G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 913 MMUAccessType access_type, 914 int mmu_idx, 915 uintptr_t retaddr) 916 { 917 CPUSPARCState *env = cpu_env(cs); 918 919 #ifdef TARGET_SPARC64 920 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type); 921 env->dmmu.sfar = addr; 922 #else 923 env->mmuregs[4] = addr; 924 #endif 925 926 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); 927 } 928