1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/page-protection.h" 29 #include "exec/log.h" 30 #include "helper_regs.h" 31 #include "qemu/error-report.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 37 /* #define DUMP_PAGE_TABLES */ 38 39 /* Context used internally during MMU translations */ 40 typedef struct { 41 hwaddr raddr; /* Real address */ 42 hwaddr eaddr; /* Effective address */ 43 int prot; /* Protection bits */ 44 hwaddr hash[2]; /* Pagetable hash values */ 45 target_ulong ptem; /* Virtual segment ID | API */ 46 int key; /* Access key */ 47 int nx; /* Non-execute area */ 48 } mmu_ctx_t; 49 50 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 51 { 52 PowerPCCPU *cpu = env_archcpu(env); 53 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 54 assert(!cpu->env.has_hv_mode || !cpu->vhyp); 55 #if defined(TARGET_PPC64) 56 if (mmu_is_64bit(env->mmu_model)) { 57 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; 58 target_ulong htabsize = value & SDR_64_HTABSIZE; 59 60 if (value & ~sdr_mask) { 61 qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx 62 " set in SDR1", value & ~sdr_mask); 63 value &= sdr_mask; 64 } 65 if (htabsize > 28) { 66 qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx 67 " stored in SDR1", htabsize); 68 return; 69 } 70 } 71 #endif /* defined(TARGET_PPC64) */ 72 /* FIXME: Should check for valid HTABMASK values in 32-bit case */ 73 env->spr[SPR_SDR1] = value; 74 } 75 76 /*****************************************************************************/ 77 /* PowerPC MMU emulation */ 78 79 static int pp_check(int key, int pp, int nx) 80 { 81 int access; 82 83 /* Compute access rights */ 84 access = 0; 85 if (key == 0) { 86 switch (pp) { 87 case 0x0: 88 case 0x1: 89 case 0x2: 90 access |= PAGE_WRITE; 91 /* fall through */ 92 case 0x3: 93 access |= PAGE_READ; 94 break; 95 } 96 } else { 97 switch (pp) { 98 case 0x0: 99 access = 0; 100 break; 101 case 0x1: 102 case 0x3: 103 access = PAGE_READ; 104 break; 105 case 0x2: 106 access = PAGE_READ | PAGE_WRITE; 107 break; 108 } 109 } 110 if (nx == 0) { 111 access |= PAGE_EXEC; 112 } 113 114 return access; 115 } 116 117 static int check_prot(int prot, MMUAccessType access_type) 118 { 119 return prot & prot_for_access_type(access_type) ? 0 : -2; 120 } 121 122 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 123 int way, int is_code) 124 { 125 int nr; 126 127 /* Select TLB num in a way from address */ 128 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 129 /* Select TLB way */ 130 nr += env->tlb_per_way * way; 131 /* 6xx have separate TLBs for instructions and data */ 132 if (is_code && env->id_tlbs == 1) { 133 nr += env->nb_tlb; 134 } 135 136 return nr; 137 } 138 139 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 140 target_ulong pte1, int h, 141 MMUAccessType access_type) 142 { 143 target_ulong ptem, mmask; 144 int access, ret, pteh, ptev, pp; 145 146 ret = -1; 147 /* Check validity and table match */ 148 ptev = pte_is_valid(pte0); 149 pteh = (pte0 >> 6) & 1; 150 if (ptev && h == pteh) { 151 /* Check vsid & api */ 152 ptem = pte0 & PTE_PTEM_MASK; 153 mmask = PTE_CHECK_MASK; 154 pp = pte1 & 0x00000003; 155 if (ptem == ctx->ptem) { 156 if (ctx->raddr != (hwaddr)-1ULL) { 157 /* all matches should have equal RPN, WIMG & PP */ 158 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 159 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 160 return -3; 161 } 162 } 163 /* Compute access rights */ 164 access = pp_check(ctx->key, pp, ctx->nx); 165 /* Keep the matching PTE information */ 166 ctx->raddr = pte1; 167 ctx->prot = access; 168 ret = check_prot(ctx->prot, access_type); 169 if (ret == 0) { 170 /* Access granted */ 171 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 172 } else { 173 /* Access right violation */ 174 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 175 } 176 } 177 } 178 179 return ret; 180 } 181 182 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 183 int ret, MMUAccessType access_type) 184 { 185 int store = 0; 186 187 /* Update page flags */ 188 if (!(*pte1p & 0x00000100)) { 189 /* Update accessed flag */ 190 *pte1p |= 0x00000100; 191 store = 1; 192 } 193 if (!(*pte1p & 0x00000080)) { 194 if (access_type == MMU_DATA_STORE && ret == 0) { 195 /* Update changed flag */ 196 *pte1p |= 0x00000080; 197 store = 1; 198 } else { 199 /* Force page fault for first write access */ 200 ctx->prot &= ~PAGE_WRITE; 201 } 202 } 203 204 return store; 205 } 206 207 /* Software driven TLB helpers */ 208 209 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 210 target_ulong eaddr, MMUAccessType access_type) 211 { 212 ppc6xx_tlb_t *tlb; 213 int nr, best, way; 214 int ret; 215 216 best = -1; 217 ret = -1; /* No TLB found */ 218 for (way = 0; way < env->nb_ways; way++) { 219 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 220 tlb = &env->tlb.tlb6[nr]; 221 /* This test "emulates" the PTE index match for hardware TLBs */ 222 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 223 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx 224 " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", 225 nr, env->nb_tlb, 226 pte_is_valid(tlb->pte0) ? "valid" : "inval", 227 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 228 continue; 229 } 230 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> " 231 TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", 232 nr, env->nb_tlb, 233 pte_is_valid(tlb->pte0) ? "valid" : "inval", 234 tlb->EPN, eaddr, tlb->pte1, 235 access_type == MMU_DATA_STORE ? 'S' : 'L', 236 access_type == MMU_INST_FETCH ? 'I' : 'D'); 237 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 238 0, access_type)) { 239 case -2: 240 /* Access violation */ 241 ret = -2; 242 best = nr; 243 break; 244 case -1: /* No match */ 245 case -3: /* TLB inconsistency */ 246 default: 247 break; 248 case 0: 249 /* access granted */ 250 /* 251 * XXX: we should go on looping to check all TLBs 252 * consistency but we can speed-up the whole thing as 253 * the result would be undefined if TLBs are not 254 * consistent. 255 */ 256 ret = 0; 257 best = nr; 258 goto done; 259 } 260 } 261 if (best != -1) { 262 done: 263 qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx 264 " prot=%01x ret=%d\n", 265 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 266 /* Update page flags */ 267 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 268 } 269 #if defined(DUMP_PAGE_TABLES) 270 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 271 CPUState *cs = env_cpu(env); 272 hwaddr base = ppc_hash32_hpt_base(env_archcpu(env)); 273 hwaddr len = ppc_hash32_hpt_mask(env_archcpu(env)) + 0x80; 274 uint32_t a0, a1, a2, a3; 275 276 qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx "\n", 277 base, len); 278 for (hwaddr curaddr = base; curaddr < base + len; curaddr += 16) { 279 a0 = ldl_phys(cs->as, curaddr); 280 a1 = ldl_phys(cs->as, curaddr + 4); 281 a2 = ldl_phys(cs->as, curaddr + 8); 282 a3 = ldl_phys(cs->as, curaddr + 12); 283 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 284 qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n", 285 curaddr, a0, a1, a2, a3); 286 } 287 } 288 } 289 #endif 290 return ret; 291 } 292 293 /* Perform BAT hit & translation */ 294 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 295 int *validp, int *protp, target_ulong *BATu, 296 target_ulong *BATl) 297 { 298 target_ulong bl; 299 int pp, valid, prot; 300 301 bl = (*BATu & 0x00001FFC) << 15; 302 valid = 0; 303 prot = 0; 304 if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) || 305 (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) { 306 valid = 1; 307 pp = *BATl & 0x00000003; 308 if (pp != 0) { 309 prot = PAGE_READ | PAGE_EXEC; 310 if (pp == 0x2) { 311 prot |= PAGE_WRITE; 312 } 313 } 314 } 315 *blp = bl; 316 *validp = valid; 317 *protp = prot; 318 } 319 320 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 321 target_ulong virtual, MMUAccessType access_type) 322 { 323 target_ulong *BATlt, *BATut, *BATu, *BATl; 324 target_ulong BEPIl, BEPIu, bl; 325 int i, valid, prot; 326 int ret = -1; 327 bool ifetch = access_type == MMU_INST_FETCH; 328 329 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 330 ifetch ? 'I' : 'D', virtual); 331 if (ifetch) { 332 BATlt = env->IBAT[1]; 333 BATut = env->IBAT[0]; 334 } else { 335 BATlt = env->DBAT[1]; 336 BATut = env->DBAT[0]; 337 } 338 for (i = 0; i < env->nb_BATs; i++) { 339 BATu = &BATut[i]; 340 BATl = &BATlt[i]; 341 BEPIu = *BATu & 0xF0000000; 342 BEPIl = *BATu & 0x0FFE0000; 343 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 344 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " 345 TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, 346 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 347 if ((virtual & 0xF0000000) == BEPIu && 348 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 349 /* BAT matches */ 350 if (valid != 0) { 351 /* Get physical address */ 352 ctx->raddr = (*BATl & 0xF0000000) | 353 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 354 (virtual & 0x0001F000); 355 /* Compute access rights */ 356 ctx->prot = prot; 357 ret = check_prot(ctx->prot, access_type); 358 if (ret == 0) { 359 qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx 360 " prot=%c%c\n", i, ctx->raddr, 361 ctx->prot & PAGE_READ ? 'R' : '-', 362 ctx->prot & PAGE_WRITE ? 'W' : '-'); 363 } 364 break; 365 } 366 } 367 } 368 if (ret < 0) { 369 if (qemu_log_enabled()) { 370 qemu_log_mask(CPU_LOG_MMU, "no BAT match for " 371 TARGET_FMT_lx ":\n", virtual); 372 for (i = 0; i < 4; i++) { 373 BATu = &BATut[i]; 374 BATl = &BATlt[i]; 375 BEPIu = *BATu & 0xF0000000; 376 BEPIl = *BATu & 0x0FFE0000; 377 bl = (*BATu & 0x00001FFC) << 15; 378 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx 379 " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx 380 "\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " " 381 TARGET_FMT_lx "\n", __func__, ifetch ? 'I' : 'D', 382 i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl); 383 } 384 } 385 } 386 /* No hit */ 387 return ret; 388 } 389 390 static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 391 target_ulong eaddr, 392 MMUAccessType access_type, int type) 393 { 394 PowerPCCPU *cpu = env_archcpu(env); 395 hwaddr hash; 396 target_ulong vsid, sr, pgidx; 397 int ds, target_page_bits; 398 bool pr; 399 400 /* First try to find a BAT entry if there are any */ 401 if (env->nb_BATs && get_bat_6xx_tlb(env, ctx, eaddr, access_type) == 0) { 402 return 0; 403 } 404 405 /* Perform segment based translation when no BATs matched */ 406 pr = FIELD_EX64(env->msr, MSR, PR); 407 ctx->eaddr = eaddr; 408 409 sr = env->sr[eaddr >> 28]; 410 ctx->key = (((sr & 0x20000000) && pr) || 411 ((sr & 0x40000000) && !pr)) ? 1 : 0; 412 ds = sr & 0x80000000 ? 1 : 0; 413 ctx->nx = sr & 0x10000000 ? 1 : 0; 414 vsid = sr & 0x00FFFFFF; 415 target_page_bits = TARGET_PAGE_BITS; 416 qemu_log_mask(CPU_LOG_MMU, 417 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 418 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 419 " ir=%d dr=%d pr=%d %d t=%d\n", 420 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, 421 (int)FIELD_EX64(env->msr, MSR, IR), 422 (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0, 423 access_type == MMU_DATA_STORE, type); 424 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 425 hash = vsid ^ pgidx; 426 ctx->ptem = (vsid << 7) | (pgidx >> 10); 427 428 qemu_log_mask(CPU_LOG_MMU, "pte segment: key=%d ds %d nx %d vsid " 429 TARGET_FMT_lx "\n", ctx->key, ds, ctx->nx, vsid); 430 if (!ds) { 431 /* Check if instruction fetch is allowed, if needed */ 432 if (type == ACCESS_CODE && ctx->nx) { 433 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 434 return -3; 435 } 436 /* Page address translation */ 437 qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx " htab_mask " 438 HWADDR_FMT_plx " hash " HWADDR_FMT_plx "\n", 439 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 440 ctx->hash[0] = hash; 441 ctx->hash[1] = ~hash; 442 443 /* Initialize real address with an invalid value */ 444 ctx->raddr = (hwaddr)-1ULL; 445 /* Software TLB search */ 446 return ppc6xx_tlb_check(env, ctx, eaddr, access_type); 447 } 448 449 /* Direct-store segment : absolutely *BUGGY* for now */ 450 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 451 switch (type) { 452 case ACCESS_INT: 453 /* Integer load/store : only access allowed */ 454 break; 455 case ACCESS_CODE: 456 /* No code fetch is allowed in direct-store areas */ 457 return -4; 458 case ACCESS_FLOAT: 459 /* Floating point load/store */ 460 return -4; 461 case ACCESS_RES: 462 /* lwarx, ldarx or srwcx. */ 463 return -4; 464 case ACCESS_CACHE: 465 /* 466 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 467 * 468 * Should make the instruction do no-op. As it already do 469 * no-op, it's quite easy :-) 470 */ 471 ctx->raddr = eaddr; 472 return 0; 473 case ACCESS_EXT: 474 /* eciwx or ecowx */ 475 return -4; 476 default: 477 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need address" 478 " translation\n"); 479 return -4; 480 } 481 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 482 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 483 ctx->raddr = eaddr; 484 return 2; 485 } 486 return -2; 487 } 488 489 /* Generic TLB check function for embedded PowerPC implementations */ 490 static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 491 hwaddr *raddrp, 492 target_ulong address, uint32_t pid, int i) 493 { 494 target_ulong mask; 495 496 /* Check valid flag */ 497 if (!(tlb->prot & PAGE_VALID)) { 498 return false; 499 } 500 mask = ~(tlb->size - 1); 501 qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx 502 " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", 503 __func__, i, address, pid, tlb->EPN, 504 mask, (uint32_t)tlb->PID, tlb->prot); 505 /* Check PID */ 506 if (tlb->PID != 0 && tlb->PID != pid) { 507 return false; 508 } 509 /* Check effective address */ 510 if ((address & mask) != tlb->EPN) { 511 return false; 512 } 513 *raddrp = (tlb->RPN & mask) | (address & ~mask); 514 return true; 515 } 516 517 /* Generic TLB search function for PowerPC embedded implementations */ 518 int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) 519 { 520 ppcemb_tlb_t *tlb; 521 hwaddr raddr; 522 int i; 523 524 for (i = 0; i < env->nb_tlb; i++) { 525 tlb = &env->tlb.tlbe[i]; 526 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) { 527 return i; 528 } 529 } 530 return -1; 531 } 532 533 static int mmu40x_get_physical_address(CPUPPCState *env, hwaddr *raddr, 534 int *prot, target_ulong address, 535 MMUAccessType access_type) 536 { 537 ppcemb_tlb_t *tlb; 538 int i, ret, zsel, zpr, pr; 539 540 ret = -1; 541 pr = FIELD_EX64(env->msr, MSR, PR); 542 for (i = 0; i < env->nb_tlb; i++) { 543 tlb = &env->tlb.tlbe[i]; 544 if (!ppcemb_tlb_check(env, tlb, raddr, address, 545 env->spr[SPR_40x_PID], i)) { 546 continue; 547 } 548 zsel = (tlb->attr >> 4) & 0xF; 549 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 550 qemu_log_mask(CPU_LOG_MMU, 551 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 552 __func__, i, zsel, zpr, access_type, tlb->attr); 553 /* Check execute enable bit */ 554 switch (zpr) { 555 case 0x2: 556 if (pr != 0) { 557 goto check_perms; 558 } 559 /* fall through */ 560 case 0x3: 561 /* All accesses granted */ 562 *prot = PAGE_RWX; 563 ret = 0; 564 break; 565 566 case 0x0: 567 if (pr != 0) { 568 /* Raise Zone protection fault. */ 569 env->spr[SPR_40x_ESR] = 1 << 22; 570 *prot = 0; 571 ret = -2; 572 break; 573 } 574 /* fall through */ 575 case 0x1: 576 check_perms: 577 /* Check from TLB entry */ 578 *prot = tlb->prot; 579 ret = check_prot(*prot, access_type); 580 if (ret == -2) { 581 env->spr[SPR_40x_ESR] = 0; 582 } 583 break; 584 } 585 } 586 qemu_log_mask(CPU_LOG_MMU, "%s: access %s " TARGET_FMT_lx " => " 587 HWADDR_FMT_plx " %d %d\n", __func__, 588 ret < 0 ? "refused" : "granted", address, 589 ret < 0 ? 0 : *raddr, *prot, ret); 590 591 return ret; 592 } 593 594 static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb, 595 hwaddr *raddr, target_ulong addr, int i) 596 { 597 if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) { 598 if (!env->nb_pids) { 599 /* Extend the physical address to 36 bits */ 600 *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32; 601 } 602 return true; 603 } else if (!env->nb_pids) { 604 return false; 605 } 606 if (env->spr[SPR_BOOKE_PID1] && 607 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) { 608 return true; 609 } 610 if (env->spr[SPR_BOOKE_PID2] && 611 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) { 612 return true; 613 } 614 return false; 615 } 616 617 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 618 hwaddr *raddr, int *prot, target_ulong address, 619 MMUAccessType access_type, int i) 620 { 621 if (!mmubooke_check_pid(env, tlb, raddr, address, i)) { 622 qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); 623 return -1; 624 } 625 626 /* Check the address space */ 627 if ((access_type == MMU_INST_FETCH ? 628 FIELD_EX64(env->msr, MSR, IR) : 629 FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) { 630 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 631 return -1; 632 } 633 634 if (FIELD_EX64(env->msr, MSR, PR)) { 635 *prot = tlb->prot & 0xF; 636 } else { 637 *prot = (tlb->prot >> 4) & 0xF; 638 } 639 if (*prot & prot_for_access_type(access_type)) { 640 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 641 return 0; 642 } 643 644 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, *prot); 645 return access_type == MMU_INST_FETCH ? -3 : -2; 646 } 647 648 static int mmubooke_get_physical_address(CPUPPCState *env, hwaddr *raddr, 649 int *prot, target_ulong address, 650 MMUAccessType access_type) 651 { 652 ppcemb_tlb_t *tlb; 653 int i, ret = -1; 654 655 for (i = 0; i < env->nb_tlb; i++) { 656 tlb = &env->tlb.tlbe[i]; 657 ret = mmubooke_check_tlb(env, tlb, raddr, prot, address, 658 access_type, i); 659 if (ret != -1) { 660 break; 661 } 662 } 663 qemu_log_mask(CPU_LOG_MMU, 664 "%s: access %s " TARGET_FMT_lx " => " HWADDR_FMT_plx 665 " %d %d\n", __func__, ret < 0 ? "refused" : "granted", 666 address, ret < 0 ? -1 : *raddr, ret == -1 ? 0 : *prot, ret); 667 return ret; 668 } 669 670 hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) 671 { 672 int tlbm_size; 673 674 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 675 676 return 1024ULL << tlbm_size; 677 } 678 679 /* TLB check function for MAS based SoftTLBs */ 680 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, 681 target_ulong address, uint32_t pid) 682 { 683 hwaddr mask; 684 uint32_t tlb_pid; 685 686 if (!FIELD_EX64(env->msr, MSR, CM)) { 687 /* In 32bit mode we can only address 32bit EAs */ 688 address = (uint32_t)address; 689 } 690 691 /* Check valid flag */ 692 if (!(tlb->mas1 & MAS1_VALID)) { 693 return -1; 694 } 695 696 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 697 qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx 698 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" 699 HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", 700 __func__, address, pid, tlb->mas1, tlb->mas2, mask, 701 tlb->mas7_3, tlb->mas8); 702 703 /* Check PID */ 704 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 705 if (tlb_pid != 0 && tlb_pid != pid) { 706 return -1; 707 } 708 709 /* Check effective address */ 710 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 711 return -1; 712 } 713 714 if (raddrp) { 715 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 716 } 717 718 return 0; 719 } 720 721 static bool is_epid_mmu(int mmu_idx) 722 { 723 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 724 } 725 726 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 727 { 728 uint32_t esr = 0; 729 if (access_type == MMU_DATA_STORE) { 730 esr |= ESR_ST; 731 } 732 if (is_epid_mmu(mmu_idx)) { 733 esr |= ESR_EPID; 734 } 735 return esr; 736 } 737 738 /* 739 * Get EPID register given the mmu_idx. If this is regular load, 740 * construct the EPID access bits from current processor state 741 * 742 * Get the effective AS and PR bits and the PID. The PID is returned 743 * only if EPID load is requested, otherwise the caller must detect 744 * the correct EPID. Return true if valid EPID is returned. 745 */ 746 static bool mmubooke206_get_as(CPUPPCState *env, 747 int mmu_idx, uint32_t *epid_out, 748 bool *as_out, bool *pr_out) 749 { 750 if (is_epid_mmu(mmu_idx)) { 751 uint32_t epidr; 752 if (mmu_idx == PPC_TLB_EPID_STORE) { 753 epidr = env->spr[SPR_BOOKE_EPSC]; 754 } else { 755 epidr = env->spr[SPR_BOOKE_EPLC]; 756 } 757 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 758 *as_out = !!(epidr & EPID_EAS); 759 *pr_out = !!(epidr & EPID_EPR); 760 return true; 761 } else { 762 *as_out = FIELD_EX64(env->msr, MSR, DS); 763 *pr_out = FIELD_EX64(env->msr, MSR, PR); 764 return false; 765 } 766 } 767 768 /* Check if the tlb found by hashing really matches */ 769 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 770 hwaddr *raddr, int *prot, 771 target_ulong address, 772 MMUAccessType access_type, int mmu_idx) 773 { 774 uint32_t epid; 775 bool as, pr; 776 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 777 778 if (!use_epid) { 779 if (ppcmas_tlb_check(env, tlb, raddr, address, 780 env->spr[SPR_BOOKE_PID]) >= 0) { 781 goto found_tlb; 782 } 783 784 if (env->spr[SPR_BOOKE_PID1] && 785 ppcmas_tlb_check(env, tlb, raddr, address, 786 env->spr[SPR_BOOKE_PID1]) >= 0) { 787 goto found_tlb; 788 } 789 790 if (env->spr[SPR_BOOKE_PID2] && 791 ppcmas_tlb_check(env, tlb, raddr, address, 792 env->spr[SPR_BOOKE_PID2]) >= 0) { 793 goto found_tlb; 794 } 795 } else { 796 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 797 goto found_tlb; 798 } 799 } 800 801 qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address " 802 "0x" TARGET_FMT_lx "\n", __func__, address); 803 return -1; 804 805 found_tlb: 806 807 /* Check the address space and permissions */ 808 if (access_type == MMU_INST_FETCH) { 809 /* There is no way to fetch code using epid load */ 810 assert(!use_epid); 811 as = FIELD_EX64(env->msr, MSR, IR); 812 } 813 814 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 815 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 816 return -1; 817 } 818 819 *prot = 0; 820 if (pr) { 821 if (tlb->mas7_3 & MAS3_UR) { 822 *prot |= PAGE_READ; 823 } 824 if (tlb->mas7_3 & MAS3_UW) { 825 *prot |= PAGE_WRITE; 826 } 827 if (tlb->mas7_3 & MAS3_UX) { 828 *prot |= PAGE_EXEC; 829 } 830 } else { 831 if (tlb->mas7_3 & MAS3_SR) { 832 *prot |= PAGE_READ; 833 } 834 if (tlb->mas7_3 & MAS3_SW) { 835 *prot |= PAGE_WRITE; 836 } 837 if (tlb->mas7_3 & MAS3_SX) { 838 *prot |= PAGE_EXEC; 839 } 840 } 841 if (*prot & prot_for_access_type(access_type)) { 842 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 843 return 0; 844 } 845 846 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, *prot); 847 return access_type == MMU_INST_FETCH ? -3 : -2; 848 } 849 850 static int mmubooke206_get_physical_address(CPUPPCState *env, hwaddr *raddr, 851 int *prot, target_ulong address, 852 MMUAccessType access_type, 853 int mmu_idx) 854 { 855 ppcmas_tlb_t *tlb; 856 int i, j, ret = -1; 857 858 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 859 int ways = booke206_tlb_ways(env, i); 860 for (j = 0; j < ways; j++) { 861 tlb = booke206_get_tlbm(env, i, address, j); 862 if (!tlb) { 863 continue; 864 } 865 ret = mmubooke206_check_tlb(env, tlb, raddr, prot, address, 866 access_type, mmu_idx); 867 if (ret != -1) { 868 goto found_tlb; 869 } 870 } 871 } 872 873 found_tlb: 874 875 qemu_log_mask(CPU_LOG_MMU, "%s: access %s " TARGET_FMT_lx " => " 876 HWADDR_FMT_plx " %d %d\n", __func__, 877 ret < 0 ? "refused" : "granted", address, 878 ret < 0 ? -1 : *raddr, ret == -1 ? 0 : *prot, ret); 879 return ret; 880 } 881 882 static const char *book3e_tsize_to_str[32] = { 883 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 884 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 885 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 886 "1T", "2T" 887 }; 888 889 static void mmubooke_dump_mmu(CPUPPCState *env) 890 { 891 ppcemb_tlb_t *entry; 892 int i; 893 894 #ifdef CONFIG_KVM 895 if (kvm_enabled() && !env->kvm_sw_tlb) { 896 qemu_printf("Cannot access KVM TLB\n"); 897 return; 898 } 899 #endif 900 901 qemu_printf("\nTLB:\n"); 902 qemu_printf("Effective Physical Size PID Prot " 903 "Attr\n"); 904 905 entry = &env->tlb.tlbe[0]; 906 for (i = 0; i < env->nb_tlb; i++, entry++) { 907 hwaddr ea, pa; 908 target_ulong mask; 909 uint64_t size = (uint64_t)entry->size; 910 char size_buf[20]; 911 912 /* Check valid flag */ 913 if (!(entry->prot & PAGE_VALID)) { 914 continue; 915 } 916 917 mask = ~(entry->size - 1); 918 ea = entry->EPN & mask; 919 pa = entry->RPN & mask; 920 /* Extend the physical address to 36 bits */ 921 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 922 if (size >= 1 * MiB) { 923 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 924 } else { 925 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 926 } 927 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 928 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 929 entry->prot, entry->attr); 930 } 931 932 } 933 934 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 935 int tlbsize) 936 { 937 ppcmas_tlb_t *entry; 938 int i; 939 940 qemu_printf("\nTLB%d:\n", tlbn); 941 qemu_printf("Effective Physical Size TID TS SRWX" 942 " URWX WIMGE U0123\n"); 943 944 entry = &env->tlb.tlbm[offset]; 945 for (i = 0; i < tlbsize; i++, entry++) { 946 hwaddr ea, pa, size; 947 int tsize; 948 949 if (!(entry->mas1 & MAS1_VALID)) { 950 continue; 951 } 952 953 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 954 size = 1024ULL << tsize; 955 ea = entry->mas2 & ~(size - 1); 956 pa = entry->mas7_3 & ~(size - 1); 957 958 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 959 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 960 (uint64_t)ea, (uint64_t)pa, 961 book3e_tsize_to_str[tsize], 962 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 963 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 964 entry->mas7_3 & MAS3_SR ? 'R' : '-', 965 entry->mas7_3 & MAS3_SW ? 'W' : '-', 966 entry->mas7_3 & MAS3_SX ? 'X' : '-', 967 entry->mas7_3 & MAS3_UR ? 'R' : '-', 968 entry->mas7_3 & MAS3_UW ? 'W' : '-', 969 entry->mas7_3 & MAS3_UX ? 'X' : '-', 970 entry->mas2 & MAS2_W ? 'W' : '-', 971 entry->mas2 & MAS2_I ? 'I' : '-', 972 entry->mas2 & MAS2_M ? 'M' : '-', 973 entry->mas2 & MAS2_G ? 'G' : '-', 974 entry->mas2 & MAS2_E ? 'E' : '-', 975 entry->mas7_3 & MAS3_U0 ? '0' : '-', 976 entry->mas7_3 & MAS3_U1 ? '1' : '-', 977 entry->mas7_3 & MAS3_U2 ? '2' : '-', 978 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 979 } 980 } 981 982 static void mmubooke206_dump_mmu(CPUPPCState *env) 983 { 984 int offset = 0; 985 int i; 986 987 #ifdef CONFIG_KVM 988 if (kvm_enabled() && !env->kvm_sw_tlb) { 989 qemu_printf("Cannot access KVM TLB\n"); 990 return; 991 } 992 #endif 993 994 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 995 int size = booke206_tlb_size(env, i); 996 997 if (size == 0) { 998 continue; 999 } 1000 1001 mmubooke206_dump_one_tlb(env, i, offset, size); 1002 offset += size; 1003 } 1004 } 1005 1006 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1007 { 1008 target_ulong *BATlt, *BATut, *BATu, *BATl; 1009 target_ulong BEPIl, BEPIu, bl; 1010 int i; 1011 1012 switch (type) { 1013 case ACCESS_CODE: 1014 BATlt = env->IBAT[1]; 1015 BATut = env->IBAT[0]; 1016 break; 1017 default: 1018 BATlt = env->DBAT[1]; 1019 BATut = env->DBAT[0]; 1020 break; 1021 } 1022 1023 for (i = 0; i < env->nb_BATs; i++) { 1024 BATu = &BATut[i]; 1025 BATl = &BATlt[i]; 1026 BEPIu = *BATu & 0xF0000000; 1027 BEPIl = *BATu & 0x0FFE0000; 1028 bl = (*BATu & 0x00001FFC) << 15; 1029 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1030 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1031 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1032 type == ACCESS_CODE ? "code" : "data", i, 1033 *BATu, *BATl, BEPIu, BEPIl, bl); 1034 } 1035 } 1036 1037 static void mmu6xx_dump_mmu(CPUPPCState *env) 1038 { 1039 PowerPCCPU *cpu = env_archcpu(env); 1040 ppc6xx_tlb_t *tlb; 1041 target_ulong sr; 1042 int type, way, entry, i; 1043 1044 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1045 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1046 1047 qemu_printf("\nSegment registers:\n"); 1048 for (i = 0; i < 32; i++) { 1049 sr = env->sr[i]; 1050 if (sr & 0x80000000) { 1051 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1052 "CNTLR_SPEC=0x%05x\n", i, 1053 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1054 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1055 (uint32_t)(sr & 0xFFFFF)); 1056 } else { 1057 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1058 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1059 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1060 (uint32_t)(sr & 0x00FFFFFF)); 1061 } 1062 } 1063 1064 qemu_printf("\nBATs:\n"); 1065 mmu6xx_dump_BATs(env, ACCESS_INT); 1066 mmu6xx_dump_BATs(env, ACCESS_CODE); 1067 1068 if (env->id_tlbs != 1) { 1069 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1070 " for code and data\n"); 1071 } 1072 1073 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1074 1075 for (type = 0; type < 2; type++) { 1076 for (way = 0; way < env->nb_ways; way++) { 1077 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1078 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1079 entry++) { 1080 1081 tlb = &env->tlb.tlb6[entry]; 1082 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1083 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1084 type ? "code" : "data", entry % env->nb_tlb, 1085 env->nb_tlb, way, 1086 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1087 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1088 } 1089 } 1090 } 1091 } 1092 1093 void dump_mmu(CPUPPCState *env) 1094 { 1095 switch (env->mmu_model) { 1096 case POWERPC_MMU_BOOKE: 1097 mmubooke_dump_mmu(env); 1098 break; 1099 case POWERPC_MMU_BOOKE206: 1100 mmubooke206_dump_mmu(env); 1101 break; 1102 case POWERPC_MMU_SOFT_6xx: 1103 mmu6xx_dump_mmu(env); 1104 break; 1105 #if defined(TARGET_PPC64) 1106 case POWERPC_MMU_64B: 1107 case POWERPC_MMU_2_03: 1108 case POWERPC_MMU_2_06: 1109 case POWERPC_MMU_2_07: 1110 dump_slb(env_archcpu(env)); 1111 break; 1112 case POWERPC_MMU_3_00: 1113 if (ppc64_v3_radix(env_archcpu(env))) { 1114 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1115 __func__); 1116 } else { 1117 dump_slb(env_archcpu(env)); 1118 } 1119 break; 1120 #endif 1121 default: 1122 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1123 } 1124 } 1125 1126 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1127 MMUAccessType access_type, int mmu_idx) 1128 { 1129 uint32_t epid; 1130 bool as, pr; 1131 uint32_t missed_tid = 0; 1132 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1133 1134 if (access_type == MMU_INST_FETCH) { 1135 as = FIELD_EX64(env->msr, MSR, IR); 1136 } 1137 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1138 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1139 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1140 env->spr[SPR_BOOKE_MAS3] = 0; 1141 env->spr[SPR_BOOKE_MAS6] = 0; 1142 env->spr[SPR_BOOKE_MAS7] = 0; 1143 1144 /* AS */ 1145 if (as) { 1146 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1147 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1148 } 1149 1150 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1151 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1152 1153 if (!use_epid) { 1154 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1155 case MAS4_TIDSELD_PID0: 1156 missed_tid = env->spr[SPR_BOOKE_PID]; 1157 break; 1158 case MAS4_TIDSELD_PID1: 1159 missed_tid = env->spr[SPR_BOOKE_PID1]; 1160 break; 1161 case MAS4_TIDSELD_PID2: 1162 missed_tid = env->spr[SPR_BOOKE_PID2]; 1163 break; 1164 } 1165 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1166 } else { 1167 missed_tid = epid; 1168 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1169 } 1170 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1171 1172 1173 /* next victim logic */ 1174 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1175 env->last_way++; 1176 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1177 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1178 } 1179 1180 static bool ppc_booke_xlate(PowerPCCPU *cpu, vaddr eaddr, 1181 MMUAccessType access_type, 1182 hwaddr *raddrp, int *psizep, int *protp, 1183 int mmu_idx, bool guest_visible) 1184 { 1185 CPUState *cs = CPU(cpu); 1186 CPUPPCState *env = &cpu->env; 1187 hwaddr raddr; 1188 int prot, ret; 1189 1190 if (env->mmu_model == POWERPC_MMU_BOOKE206) { 1191 ret = mmubooke206_get_physical_address(env, &raddr, &prot, eaddr, 1192 access_type, mmu_idx); 1193 } else { 1194 ret = mmubooke_get_physical_address(env, &raddr, &prot, eaddr, 1195 access_type); 1196 } 1197 if (ret == 0) { 1198 *raddrp = raddr; 1199 *protp = prot; 1200 *psizep = TARGET_PAGE_BITS; 1201 return true; 1202 } else if (!guest_visible) { 1203 return false; 1204 } 1205 1206 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 1207 env->error_code = 0; 1208 switch (ret) { 1209 case -1: 1210 /* No matches in page tables or TLB */ 1211 if (env->mmu_model == POWERPC_MMU_BOOKE206) { 1212 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); 1213 } 1214 cs->exception_index = (access_type == MMU_INST_FETCH) ? 1215 POWERPC_EXCP_ITLB : POWERPC_EXCP_DTLB; 1216 env->spr[SPR_BOOKE_DEAR] = eaddr; 1217 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1218 break; 1219 case -2: 1220 /* Access rights violation */ 1221 cs->exception_index = (access_type == MMU_INST_FETCH) ? 1222 POWERPC_EXCP_ISI : POWERPC_EXCP_DSI; 1223 if (access_type != MMU_INST_FETCH) { 1224 env->spr[SPR_BOOKE_DEAR] = eaddr; 1225 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1226 } 1227 break; 1228 case -3: 1229 /* No execute protection violation */ 1230 cs->exception_index = POWERPC_EXCP_ISI; 1231 env->spr[SPR_BOOKE_ESR] = 0; 1232 break; 1233 } 1234 1235 return false; 1236 } 1237 1238 static bool ppc_real_mode_xlate(PowerPCCPU *cpu, vaddr eaddr, 1239 MMUAccessType access_type, 1240 hwaddr *raddrp, int *psizep, int *protp) 1241 { 1242 CPUPPCState *env = &cpu->env; 1243 1244 if (access_type == MMU_INST_FETCH ? !FIELD_EX64(env->msr, MSR, IR) 1245 : !FIELD_EX64(env->msr, MSR, DR)) { 1246 *raddrp = eaddr; 1247 *protp = PAGE_RWX; 1248 *psizep = TARGET_PAGE_BITS; 1249 return true; 1250 } else if (env->mmu_model == POWERPC_MMU_REAL) { 1251 cpu_abort(CPU(cpu), "PowerPC in real mode shold not do translation\n"); 1252 } 1253 return false; 1254 } 1255 1256 static bool ppc_40x_xlate(PowerPCCPU *cpu, vaddr eaddr, 1257 MMUAccessType access_type, 1258 hwaddr *raddrp, int *psizep, int *protp, 1259 int mmu_idx, bool guest_visible) 1260 { 1261 CPUState *cs = CPU(cpu); 1262 CPUPPCState *env = &cpu->env; 1263 int ret; 1264 1265 if (ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, protp)) { 1266 return true; 1267 } 1268 1269 ret = mmu40x_get_physical_address(env, raddrp, protp, eaddr, access_type); 1270 if (ret == 0) { 1271 *psizep = TARGET_PAGE_BITS; 1272 return true; 1273 } else if (!guest_visible) { 1274 return false; 1275 } 1276 1277 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 1278 if (access_type == MMU_INST_FETCH) { 1279 switch (ret) { 1280 case -1: 1281 /* No matches in page tables or TLB */ 1282 cs->exception_index = POWERPC_EXCP_ITLB; 1283 env->error_code = 0; 1284 env->spr[SPR_40x_DEAR] = eaddr; 1285 env->spr[SPR_40x_ESR] = 0x00000000; 1286 break; 1287 case -2: 1288 /* Access rights violation */ 1289 cs->exception_index = POWERPC_EXCP_ISI; 1290 env->error_code = 0x08000000; 1291 break; 1292 default: 1293 g_assert_not_reached(); 1294 } 1295 } else { 1296 switch (ret) { 1297 case -1: 1298 /* No matches in page tables or TLB */ 1299 cs->exception_index = POWERPC_EXCP_DTLB; 1300 env->error_code = 0; 1301 env->spr[SPR_40x_DEAR] = eaddr; 1302 if (access_type == MMU_DATA_STORE) { 1303 env->spr[SPR_40x_ESR] = 0x00800000; 1304 } else { 1305 env->spr[SPR_40x_ESR] = 0x00000000; 1306 } 1307 break; 1308 case -2: 1309 /* Access rights violation */ 1310 cs->exception_index = POWERPC_EXCP_DSI; 1311 env->error_code = 0; 1312 env->spr[SPR_40x_DEAR] = eaddr; 1313 if (access_type == MMU_DATA_STORE) { 1314 env->spr[SPR_40x_ESR] |= 0x00800000; 1315 } 1316 break; 1317 default: 1318 g_assert_not_reached(); 1319 } 1320 } 1321 return false; 1322 } 1323 1324 static bool ppc_6xx_xlate(PowerPCCPU *cpu, vaddr eaddr, 1325 MMUAccessType access_type, 1326 hwaddr *raddrp, int *psizep, int *protp, 1327 int mmu_idx, bool guest_visible) 1328 { 1329 CPUState *cs = CPU(cpu); 1330 CPUPPCState *env = &cpu->env; 1331 mmu_ctx_t ctx; 1332 int type; 1333 int ret; 1334 1335 if (ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, protp)) { 1336 return true; 1337 } 1338 1339 if (access_type == MMU_INST_FETCH) { 1340 /* code access */ 1341 type = ACCESS_CODE; 1342 } else if (guest_visible) { 1343 /* data access */ 1344 type = env->access_type; 1345 } else { 1346 type = ACCESS_INT; 1347 } 1348 1349 ctx.prot = 0; 1350 ctx.hash[0] = 0; 1351 ctx.hash[1] = 0; 1352 ret = mmu6xx_get_physical_address(env, &ctx, eaddr, access_type, type); 1353 if (ret == 0) { 1354 *raddrp = ctx.raddr; 1355 *protp = ctx.prot; 1356 *psizep = TARGET_PAGE_BITS; 1357 return true; 1358 } else if (!guest_visible) { 1359 return false; 1360 } 1361 1362 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 1363 if (type == ACCESS_CODE) { 1364 switch (ret) { 1365 case -1: 1366 /* No matches in page tables or TLB */ 1367 cs->exception_index = POWERPC_EXCP_IFTLB; 1368 env->error_code = 1 << 18; 1369 env->spr[SPR_IMISS] = eaddr; 1370 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1371 goto tlb_miss; 1372 case -2: 1373 /* Access rights violation */ 1374 cs->exception_index = POWERPC_EXCP_ISI; 1375 env->error_code = 0x08000000; 1376 break; 1377 case -3: 1378 /* No execute protection violation */ 1379 cs->exception_index = POWERPC_EXCP_ISI; 1380 env->error_code = 0x10000000; 1381 break; 1382 case -4: 1383 /* Direct store exception */ 1384 /* No code fetch is allowed in direct-store areas */ 1385 cs->exception_index = POWERPC_EXCP_ISI; 1386 env->error_code = 0x10000000; 1387 break; 1388 } 1389 } else { 1390 switch (ret) { 1391 case -1: 1392 /* No matches in page tables or TLB */ 1393 if (access_type == MMU_DATA_STORE) { 1394 cs->exception_index = POWERPC_EXCP_DSTLB; 1395 env->error_code = 1 << 16; 1396 } else { 1397 cs->exception_index = POWERPC_EXCP_DLTLB; 1398 env->error_code = 0; 1399 } 1400 env->spr[SPR_DMISS] = eaddr; 1401 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1402 tlb_miss: 1403 env->error_code |= ctx.key << 19; 1404 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1405 get_pteg_offset32(cpu, ctx.hash[0]); 1406 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1407 get_pteg_offset32(cpu, ctx.hash[1]); 1408 break; 1409 case -2: 1410 /* Access rights violation */ 1411 cs->exception_index = POWERPC_EXCP_DSI; 1412 env->error_code = 0; 1413 env->spr[SPR_DAR] = eaddr; 1414 if (access_type == MMU_DATA_STORE) { 1415 env->spr[SPR_DSISR] = 0x0A000000; 1416 } else { 1417 env->spr[SPR_DSISR] = 0x08000000; 1418 } 1419 break; 1420 case -4: 1421 /* Direct store exception */ 1422 switch (type) { 1423 case ACCESS_FLOAT: 1424 /* Floating point load/store */ 1425 cs->exception_index = POWERPC_EXCP_ALIGN; 1426 env->error_code = POWERPC_EXCP_ALIGN_FP; 1427 env->spr[SPR_DAR] = eaddr; 1428 break; 1429 case ACCESS_RES: 1430 /* lwarx, ldarx or stwcx. */ 1431 cs->exception_index = POWERPC_EXCP_DSI; 1432 env->error_code = 0; 1433 env->spr[SPR_DAR] = eaddr; 1434 if (access_type == MMU_DATA_STORE) { 1435 env->spr[SPR_DSISR] = 0x06000000; 1436 } else { 1437 env->spr[SPR_DSISR] = 0x04000000; 1438 } 1439 break; 1440 case ACCESS_EXT: 1441 /* eciwx or ecowx */ 1442 cs->exception_index = POWERPC_EXCP_DSI; 1443 env->error_code = 0; 1444 env->spr[SPR_DAR] = eaddr; 1445 if (access_type == MMU_DATA_STORE) { 1446 env->spr[SPR_DSISR] = 0x06100000; 1447 } else { 1448 env->spr[SPR_DSISR] = 0x04100000; 1449 } 1450 break; 1451 default: 1452 printf("DSI: invalid exception (%d)\n", ret); 1453 cs->exception_index = POWERPC_EXCP_PROGRAM; 1454 env->error_code = POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1455 env->spr[SPR_DAR] = eaddr; 1456 break; 1457 } 1458 break; 1459 } 1460 } 1461 return false; 1462 } 1463 1464 /*****************************************************************************/ 1465 1466 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 1467 hwaddr *raddrp, int *psizep, int *protp, 1468 int mmu_idx, bool guest_visible) 1469 { 1470 switch (cpu->env.mmu_model) { 1471 #if defined(TARGET_PPC64) 1472 case POWERPC_MMU_3_00: 1473 if (ppc64_v3_radix(cpu)) { 1474 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, 1475 psizep, protp, mmu_idx, guest_visible); 1476 } 1477 /* fall through */ 1478 case POWERPC_MMU_64B: 1479 case POWERPC_MMU_2_03: 1480 case POWERPC_MMU_2_06: 1481 case POWERPC_MMU_2_07: 1482 return ppc_hash64_xlate(cpu, eaddr, access_type, 1483 raddrp, psizep, protp, mmu_idx, guest_visible); 1484 #endif 1485 1486 case POWERPC_MMU_32B: 1487 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, 1488 psizep, protp, mmu_idx, guest_visible); 1489 case POWERPC_MMU_BOOKE: 1490 case POWERPC_MMU_BOOKE206: 1491 return ppc_booke_xlate(cpu, eaddr, access_type, raddrp, 1492 psizep, protp, mmu_idx, guest_visible); 1493 case POWERPC_MMU_SOFT_4xx: 1494 return ppc_40x_xlate(cpu, eaddr, access_type, raddrp, 1495 psizep, protp, mmu_idx, guest_visible); 1496 case POWERPC_MMU_SOFT_6xx: 1497 return ppc_6xx_xlate(cpu, eaddr, access_type, raddrp, 1498 psizep, protp, mmu_idx, guest_visible); 1499 case POWERPC_MMU_REAL: 1500 return ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, 1501 protp); 1502 case POWERPC_MMU_MPC8xx: 1503 cpu_abort(env_cpu(&cpu->env), "MPC8xx MMU model is not implemented\n"); 1504 default: 1505 cpu_abort(CPU(cpu), "Unknown or invalid MMU model\n"); 1506 } 1507 } 1508 1509 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1510 { 1511 PowerPCCPU *cpu = POWERPC_CPU(cs); 1512 hwaddr raddr; 1513 int s, p; 1514 1515 /* 1516 * Some MMUs have separate TLBs for code and data. If we only 1517 * try an MMU_DATA_LOAD, we may not be able to read instructions 1518 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 1519 */ 1520 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 1521 ppc_env_mmu_index(&cpu->env, false), false) || 1522 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 1523 ppc_env_mmu_index(&cpu->env, true), false)) { 1524 return raddr & TARGET_PAGE_MASK; 1525 } 1526 return -1; 1527 } 1528