1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/page-protection.h" 29 #include "exec/log.h" 30 #include "helper_regs.h" 31 #include "qemu/error-report.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 #include "mmu-booke.h" 37 38 /* #define DUMP_PAGE_TABLES */ 39 40 /* Context used internally during MMU translations */ 41 typedef struct { 42 hwaddr raddr; /* Real address */ 43 int prot; /* Protection bits */ 44 target_ulong ptem; /* Virtual segment ID | API */ 45 int key; /* Access key */ 46 } mmu_ctx_t; 47 48 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 49 { 50 PowerPCCPU *cpu = env_archcpu(env); 51 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 52 assert(!cpu->env.has_hv_mode || !cpu->vhyp); 53 #if defined(TARGET_PPC64) 54 if (mmu_is_64bit(env->mmu_model)) { 55 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; 56 target_ulong htabsize = value & SDR_64_HTABSIZE; 57 58 if (value & ~sdr_mask) { 59 qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx 60 " set in SDR1", value & ~sdr_mask); 61 value &= sdr_mask; 62 } 63 if (htabsize > 28) { 64 qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx 65 " stored in SDR1", htabsize); 66 return; 67 } 68 } 69 #endif /* defined(TARGET_PPC64) */ 70 /* FIXME: Should check for valid HTABMASK values in 32-bit case */ 71 env->spr[SPR_SDR1] = value; 72 } 73 74 /*****************************************************************************/ 75 /* PowerPC MMU emulation */ 76 77 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 78 int way, int is_code) 79 { 80 int nr; 81 82 /* Select TLB num in a way from address */ 83 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 84 /* Select TLB way */ 85 nr += env->tlb_per_way * way; 86 /* 6xx has separate TLBs for instructions and data */ 87 if (is_code) { 88 nr += env->nb_tlb; 89 } 90 91 return nr; 92 } 93 94 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 95 target_ulong pte1, int pteh, 96 MMUAccessType access_type, bool nx) 97 { 98 /* Check validity and table match */ 99 if (!pte_is_valid(pte0) || ((pte0 >> 6) & 1) != pteh || 100 (pte0 & PTE_PTEM_MASK) != ctx->ptem) { 101 return -1; 102 } 103 /* all matches should have equal RPN, WIMG & PP */ 104 if (ctx->raddr != (hwaddr)-1ULL && 105 (ctx->raddr & PTE_CHECK_MASK) != (pte1 & PTE_CHECK_MASK)) { 106 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 107 return -3; 108 } 109 /* Keep the matching PTE information */ 110 ctx->raddr = pte1; 111 ctx->prot = ppc_hash32_prot(ctx->key, pte1 & HPTE32_R_PP, nx); 112 if (check_prot_access_type(ctx->prot, access_type)) { 113 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 114 return 0; 115 } else { 116 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 117 return -2; 118 } 119 } 120 121 /* Software driven TLB helpers */ 122 123 static int ppc6xx_tlb_check(CPUPPCState *env, 124 mmu_ctx_t *ctx, target_ulong eaddr, 125 MMUAccessType access_type, bool nx) 126 { 127 ppc6xx_tlb_t *tlb; 128 target_ulong *pte1p; 129 int nr, best, way, ret; 130 131 best = -1; 132 ret = -1; /* No TLB found */ 133 for (way = 0; way < env->nb_ways; way++) { 134 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 135 tlb = &env->tlb.tlb6[nr]; 136 /* This test "emulates" the PTE index match for hardware TLBs */ 137 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 138 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx 139 " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", 140 nr, env->nb_tlb, 141 pte_is_valid(tlb->pte0) ? "valid" : "inval", 142 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 143 continue; 144 } 145 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> " 146 TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", 147 nr, env->nb_tlb, 148 pte_is_valid(tlb->pte0) ? "valid" : "inval", 149 tlb->EPN, eaddr, tlb->pte1, 150 access_type == MMU_DATA_STORE ? 'S' : 'L', 151 access_type == MMU_INST_FETCH ? 'I' : 'D'); 152 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 153 0, access_type, nx)) { 154 case -2: 155 /* Access violation */ 156 ret = -2; 157 best = nr; 158 break; 159 case -1: /* No match */ 160 case -3: /* TLB inconsistency */ 161 default: 162 break; 163 case 0: 164 /* access granted */ 165 /* 166 * XXX: we should go on looping to check all TLBs 167 * consistency but we can speed-up the whole thing as 168 * the result would be undefined if TLBs are not 169 * consistent. 170 */ 171 ret = 0; 172 best = nr; 173 goto done; 174 } 175 } 176 if (best != -1) { 177 done: 178 qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx 179 " prot=%01x ret=%d\n", 180 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 181 /* Update page flags */ 182 pte1p = &env->tlb.tlb6[best].pte1; 183 *pte1p |= 0x00000100; /* Update accessed flag */ 184 if (!(*pte1p & 0x00000080)) { 185 if (access_type == MMU_DATA_STORE && ret == 0) { 186 /* Update changed flag */ 187 *pte1p |= 0x00000080; 188 } else { 189 /* Force page fault for first write access */ 190 ctx->prot &= ~PAGE_WRITE; 191 } 192 } 193 } 194 #if defined(DUMP_PAGE_TABLES) 195 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 196 CPUState *cs = env_cpu(env); 197 hwaddr base = ppc_hash32_hpt_base(env_archcpu(env)); 198 hwaddr len = ppc_hash32_hpt_mask(env_archcpu(env)) + 0x80; 199 uint32_t a0, a1, a2, a3; 200 201 qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx "\n", 202 base, len); 203 for (hwaddr curaddr = base; curaddr < base + len; curaddr += 16) { 204 a0 = ldl_phys(cs->as, curaddr); 205 a1 = ldl_phys(cs->as, curaddr + 4); 206 a2 = ldl_phys(cs->as, curaddr + 8); 207 a3 = ldl_phys(cs->as, curaddr + 12); 208 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 209 qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n", 210 curaddr, a0, a1, a2, a3); 211 } 212 } 213 } 214 #endif 215 return ret; 216 } 217 218 /* Perform BAT hit & translation */ 219 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 220 int *validp, int *protp, target_ulong *BATu, 221 target_ulong *BATl) 222 { 223 target_ulong bl; 224 int pp, valid, prot; 225 226 bl = (*BATu & 0x00001FFC) << 15; 227 valid = 0; 228 prot = 0; 229 if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) || 230 (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) { 231 valid = 1; 232 pp = *BATl & 0x00000003; 233 if (pp != 0) { 234 prot = PAGE_READ | PAGE_EXEC; 235 if (pp == 0x2) { 236 prot |= PAGE_WRITE; 237 } 238 } 239 } 240 *blp = bl; 241 *validp = valid; 242 *protp = prot; 243 } 244 245 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 246 target_ulong virtual, MMUAccessType access_type) 247 { 248 target_ulong *BATlt, *BATut, *BATu, *BATl; 249 target_ulong BEPIl, BEPIu, bl; 250 int i, valid, prot; 251 int ret = -1; 252 bool ifetch = access_type == MMU_INST_FETCH; 253 254 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 255 ifetch ? 'I' : 'D', virtual); 256 if (ifetch) { 257 BATlt = env->IBAT[1]; 258 BATut = env->IBAT[0]; 259 } else { 260 BATlt = env->DBAT[1]; 261 BATut = env->DBAT[0]; 262 } 263 for (i = 0; i < env->nb_BATs; i++) { 264 BATu = &BATut[i]; 265 BATl = &BATlt[i]; 266 BEPIu = *BATu & 0xF0000000; 267 BEPIl = *BATu & 0x0FFE0000; 268 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 269 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " 270 TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, 271 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 272 if ((virtual & 0xF0000000) == BEPIu && 273 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 274 /* BAT matches */ 275 if (valid != 0) { 276 /* Get physical address */ 277 ctx->raddr = (*BATl & 0xF0000000) | 278 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 279 (virtual & 0x0001F000); 280 /* Compute access rights */ 281 ctx->prot = prot; 282 if (check_prot_access_type(ctx->prot, access_type)) { 283 qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx 284 " prot=%c%c\n", i, ctx->raddr, 285 ctx->prot & PAGE_READ ? 'R' : '-', 286 ctx->prot & PAGE_WRITE ? 'W' : '-'); 287 ret = 0; 288 } else { 289 ret = -2; 290 } 291 break; 292 } 293 } 294 } 295 if (ret < 0) { 296 if (qemu_log_enabled()) { 297 qemu_log_mask(CPU_LOG_MMU, "no BAT match for " 298 TARGET_FMT_lx ":\n", virtual); 299 for (i = 0; i < 4; i++) { 300 BATu = &BATut[i]; 301 BATl = &BATlt[i]; 302 BEPIu = *BATu & 0xF0000000; 303 BEPIl = *BATu & 0x0FFE0000; 304 bl = (*BATu & 0x00001FFC) << 15; 305 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx 306 " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx 307 "\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " " 308 TARGET_FMT_lx "\n", __func__, ifetch ? 'I' : 'D', 309 i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl); 310 } 311 } 312 } 313 /* No hit */ 314 return ret; 315 } 316 317 static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 318 target_ulong eaddr, hwaddr *hashp, 319 MMUAccessType access_type, int type) 320 { 321 PowerPCCPU *cpu = env_archcpu(env); 322 hwaddr hash; 323 target_ulong vsid, sr, pgidx; 324 bool pr, ds, nx; 325 326 /* First try to find a BAT entry if there are any */ 327 if (env->nb_BATs && get_bat_6xx_tlb(env, ctx, eaddr, access_type) == 0) { 328 return 0; 329 } 330 331 /* Perform segment based translation when no BATs matched */ 332 pr = FIELD_EX64(env->msr, MSR, PR); 333 334 sr = env->sr[eaddr >> 28]; 335 ctx->key = (((sr & 0x20000000) && pr) || 336 ((sr & 0x40000000) && !pr)) ? 1 : 0; 337 ds = sr & SR32_T; 338 nx = sr & SR32_NX; 339 vsid = sr & SR32_VSID; 340 qemu_log_mask(CPU_LOG_MMU, 341 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 342 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 343 " ir=%d dr=%d pr=%d %d t=%d\n", 344 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, 345 (int)FIELD_EX64(env->msr, MSR, IR), 346 (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0, 347 access_type == MMU_DATA_STORE, type); 348 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS; 349 hash = vsid ^ pgidx; 350 ctx->ptem = (vsid << 7) | (pgidx >> 10); 351 352 qemu_log_mask(CPU_LOG_MMU, "pte segment: key=%d ds %d nx %d vsid " 353 TARGET_FMT_lx "\n", ctx->key, ds, nx, vsid); 354 if (!ds) { 355 /* Check if instruction fetch is allowed, if needed */ 356 if (type == ACCESS_CODE && nx) { 357 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 358 return -3; 359 } 360 /* Page address translation */ 361 qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx " htab_mask " 362 HWADDR_FMT_plx " hash " HWADDR_FMT_plx "\n", 363 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 364 *hashp = hash; 365 366 /* Initialize real address with an invalid value */ 367 ctx->raddr = (hwaddr)-1ULL; 368 /* Software TLB search */ 369 return ppc6xx_tlb_check(env, ctx, eaddr, access_type, nx); 370 } 371 372 /* Direct-store segment : absolutely *BUGGY* for now */ 373 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 374 switch (type) { 375 case ACCESS_INT: 376 /* Integer load/store : only access allowed */ 377 break; 378 case ACCESS_CODE: 379 /* No code fetch is allowed in direct-store areas */ 380 return -4; 381 case ACCESS_FLOAT: 382 /* Floating point load/store */ 383 return -4; 384 case ACCESS_RES: 385 /* lwarx, ldarx or srwcx. */ 386 return -4; 387 case ACCESS_CACHE: 388 /* 389 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 390 * 391 * Should make the instruction do no-op. As it already do 392 * no-op, it's quite easy :-) 393 */ 394 ctx->raddr = eaddr; 395 return 0; 396 case ACCESS_EXT: 397 /* eciwx or ecowx */ 398 return -4; 399 default: 400 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need address" 401 " translation\n"); 402 return -4; 403 } 404 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 405 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 406 ctx->raddr = eaddr; 407 return 2; 408 } 409 return -2; 410 } 411 412 static const char *book3e_tsize_to_str[32] = { 413 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 414 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 415 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 416 "1T", "2T" 417 }; 418 419 static void mmubooke_dump_mmu(CPUPPCState *env) 420 { 421 ppcemb_tlb_t *entry; 422 int i; 423 424 #ifdef CONFIG_KVM 425 if (kvm_enabled() && !env->kvm_sw_tlb) { 426 qemu_printf("Cannot access KVM TLB\n"); 427 return; 428 } 429 #endif 430 431 qemu_printf("\nTLB:\n"); 432 qemu_printf("Effective Physical Size PID Prot " 433 "Attr\n"); 434 435 entry = &env->tlb.tlbe[0]; 436 for (i = 0; i < env->nb_tlb; i++, entry++) { 437 hwaddr ea, pa; 438 target_ulong mask; 439 uint64_t size = (uint64_t)entry->size; 440 char size_buf[20]; 441 442 /* Check valid flag */ 443 if (!(entry->prot & PAGE_VALID)) { 444 continue; 445 } 446 447 mask = ~(entry->size - 1); 448 ea = entry->EPN & mask; 449 pa = entry->RPN & mask; 450 /* Extend the physical address to 36 bits */ 451 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 452 if (size >= 1 * MiB) { 453 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 454 } else { 455 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 456 } 457 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 458 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 459 entry->prot, entry->attr); 460 } 461 462 } 463 464 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 465 int tlbsize) 466 { 467 ppcmas_tlb_t *entry; 468 int i; 469 470 qemu_printf("\nTLB%d:\n", tlbn); 471 qemu_printf("Effective Physical Size TID TS SRWX" 472 " URWX WIMGE U0123\n"); 473 474 entry = &env->tlb.tlbm[offset]; 475 for (i = 0; i < tlbsize; i++, entry++) { 476 hwaddr ea, pa, size; 477 int tsize; 478 479 if (!(entry->mas1 & MAS1_VALID)) { 480 continue; 481 } 482 483 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 484 size = 1024ULL << tsize; 485 ea = entry->mas2 & ~(size - 1); 486 pa = entry->mas7_3 & ~(size - 1); 487 488 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 489 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 490 (uint64_t)ea, (uint64_t)pa, 491 book3e_tsize_to_str[tsize], 492 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 493 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 494 entry->mas7_3 & MAS3_SR ? 'R' : '-', 495 entry->mas7_3 & MAS3_SW ? 'W' : '-', 496 entry->mas7_3 & MAS3_SX ? 'X' : '-', 497 entry->mas7_3 & MAS3_UR ? 'R' : '-', 498 entry->mas7_3 & MAS3_UW ? 'W' : '-', 499 entry->mas7_3 & MAS3_UX ? 'X' : '-', 500 entry->mas2 & MAS2_W ? 'W' : '-', 501 entry->mas2 & MAS2_I ? 'I' : '-', 502 entry->mas2 & MAS2_M ? 'M' : '-', 503 entry->mas2 & MAS2_G ? 'G' : '-', 504 entry->mas2 & MAS2_E ? 'E' : '-', 505 entry->mas7_3 & MAS3_U0 ? '0' : '-', 506 entry->mas7_3 & MAS3_U1 ? '1' : '-', 507 entry->mas7_3 & MAS3_U2 ? '2' : '-', 508 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 509 } 510 } 511 512 static void mmubooke206_dump_mmu(CPUPPCState *env) 513 { 514 int offset = 0; 515 int i; 516 517 #ifdef CONFIG_KVM 518 if (kvm_enabled() && !env->kvm_sw_tlb) { 519 qemu_printf("Cannot access KVM TLB\n"); 520 return; 521 } 522 #endif 523 524 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 525 int size = booke206_tlb_size(env, i); 526 527 if (size == 0) { 528 continue; 529 } 530 531 mmubooke206_dump_one_tlb(env, i, offset, size); 532 offset += size; 533 } 534 } 535 536 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 537 { 538 target_ulong *BATlt, *BATut, *BATu, *BATl; 539 target_ulong BEPIl, BEPIu, bl; 540 int i; 541 542 switch (type) { 543 case ACCESS_CODE: 544 BATlt = env->IBAT[1]; 545 BATut = env->IBAT[0]; 546 break; 547 default: 548 BATlt = env->DBAT[1]; 549 BATut = env->DBAT[0]; 550 break; 551 } 552 553 for (i = 0; i < env->nb_BATs; i++) { 554 BATu = &BATut[i]; 555 BATl = &BATlt[i]; 556 BEPIu = *BATu & 0xF0000000; 557 BEPIl = *BATu & 0x0FFE0000; 558 bl = (*BATu & 0x00001FFC) << 15; 559 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 560 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 561 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 562 type == ACCESS_CODE ? "code" : "data", i, 563 *BATu, *BATl, BEPIu, BEPIl, bl); 564 } 565 } 566 567 static void mmu6xx_dump_mmu(CPUPPCState *env) 568 { 569 PowerPCCPU *cpu = env_archcpu(env); 570 ppc6xx_tlb_t *tlb; 571 target_ulong sr; 572 int type, way, entry, i; 573 574 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 575 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 576 577 qemu_printf("\nSegment registers:\n"); 578 for (i = 0; i < 32; i++) { 579 sr = env->sr[i]; 580 if (sr & 0x80000000) { 581 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 582 "CNTLR_SPEC=0x%05x\n", i, 583 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 584 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 585 (uint32_t)(sr & 0xFFFFF)); 586 } else { 587 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 588 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 589 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 590 (uint32_t)(sr & 0x00FFFFFF)); 591 } 592 } 593 594 qemu_printf("\nBATs:\n"); 595 mmu6xx_dump_BATs(env, ACCESS_INT); 596 mmu6xx_dump_BATs(env, ACCESS_CODE); 597 598 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 599 for (type = 0; type < 2; type++) { 600 for (way = 0; way < env->nb_ways; way++) { 601 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 602 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 603 entry++) { 604 605 tlb = &env->tlb.tlb6[entry]; 606 qemu_printf("%s TLB %02d/%02d way:%d %s [" 607 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 608 type ? "code" : "data", entry % env->nb_tlb, 609 env->nb_tlb, way, 610 pte_is_valid(tlb->pte0) ? "valid" : "inval", 611 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 612 } 613 } 614 } 615 } 616 617 void dump_mmu(CPUPPCState *env) 618 { 619 switch (env->mmu_model) { 620 case POWERPC_MMU_BOOKE: 621 mmubooke_dump_mmu(env); 622 break; 623 case POWERPC_MMU_BOOKE206: 624 mmubooke206_dump_mmu(env); 625 break; 626 case POWERPC_MMU_SOFT_6xx: 627 mmu6xx_dump_mmu(env); 628 break; 629 #if defined(TARGET_PPC64) 630 case POWERPC_MMU_64B: 631 case POWERPC_MMU_2_03: 632 case POWERPC_MMU_2_06: 633 case POWERPC_MMU_2_07: 634 dump_slb(env_archcpu(env)); 635 break; 636 case POWERPC_MMU_3_00: 637 if (ppc64_v3_radix(env_archcpu(env))) { 638 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 639 __func__); 640 } else { 641 dump_slb(env_archcpu(env)); 642 } 643 break; 644 #endif 645 default: 646 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 647 } 648 } 649 650 651 static bool ppc_real_mode_xlate(PowerPCCPU *cpu, vaddr eaddr, 652 MMUAccessType access_type, 653 hwaddr *raddrp, int *psizep, int *protp) 654 { 655 CPUPPCState *env = &cpu->env; 656 657 if (access_type == MMU_INST_FETCH ? !FIELD_EX64(env->msr, MSR, IR) 658 : !FIELD_EX64(env->msr, MSR, DR)) { 659 *raddrp = eaddr; 660 *protp = PAGE_RWX; 661 *psizep = TARGET_PAGE_BITS; 662 return true; 663 } else if (env->mmu_model == POWERPC_MMU_REAL) { 664 cpu_abort(CPU(cpu), "PowerPC in real mode shold not do translation\n"); 665 } 666 return false; 667 } 668 669 static bool ppc_40x_xlate(PowerPCCPU *cpu, vaddr eaddr, 670 MMUAccessType access_type, 671 hwaddr *raddrp, int *psizep, int *protp, 672 int mmu_idx, bool guest_visible) 673 { 674 CPUState *cs = CPU(cpu); 675 CPUPPCState *env = &cpu->env; 676 int ret; 677 678 if (ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, protp)) { 679 return true; 680 } 681 682 ret = mmu40x_get_physical_address(env, raddrp, protp, eaddr, access_type); 683 if (ret == 0) { 684 *psizep = TARGET_PAGE_BITS; 685 return true; 686 } else if (!guest_visible) { 687 return false; 688 } 689 690 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 691 if (access_type == MMU_INST_FETCH) { 692 switch (ret) { 693 case -1: 694 /* No matches in page tables or TLB */ 695 cs->exception_index = POWERPC_EXCP_ITLB; 696 env->error_code = 0; 697 env->spr[SPR_40x_DEAR] = eaddr; 698 env->spr[SPR_40x_ESR] = 0x00000000; 699 break; 700 case -2: 701 /* Access rights violation */ 702 cs->exception_index = POWERPC_EXCP_ISI; 703 env->error_code = 0x08000000; 704 break; 705 default: 706 g_assert_not_reached(); 707 } 708 } else { 709 switch (ret) { 710 case -1: 711 /* No matches in page tables or TLB */ 712 cs->exception_index = POWERPC_EXCP_DTLB; 713 env->error_code = 0; 714 env->spr[SPR_40x_DEAR] = eaddr; 715 if (access_type == MMU_DATA_STORE) { 716 env->spr[SPR_40x_ESR] = 0x00800000; 717 } else { 718 env->spr[SPR_40x_ESR] = 0x00000000; 719 } 720 break; 721 case -2: 722 /* Access rights violation */ 723 cs->exception_index = POWERPC_EXCP_DSI; 724 env->error_code = 0; 725 env->spr[SPR_40x_DEAR] = eaddr; 726 if (access_type == MMU_DATA_STORE) { 727 env->spr[SPR_40x_ESR] |= 0x00800000; 728 } 729 break; 730 default: 731 g_assert_not_reached(); 732 } 733 } 734 return false; 735 } 736 737 static bool ppc_6xx_xlate(PowerPCCPU *cpu, vaddr eaddr, 738 MMUAccessType access_type, 739 hwaddr *raddrp, int *psizep, int *protp, 740 int mmu_idx, bool guest_visible) 741 { 742 CPUState *cs = CPU(cpu); 743 CPUPPCState *env = &cpu->env; 744 mmu_ctx_t ctx; 745 hwaddr hash = 0; /* init to 0 to avoid used uninit warning */ 746 int type, ret; 747 748 if (ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, protp)) { 749 return true; 750 } 751 752 if (access_type == MMU_INST_FETCH) { 753 /* code access */ 754 type = ACCESS_CODE; 755 } else if (guest_visible) { 756 /* data access */ 757 type = env->access_type; 758 } else { 759 type = ACCESS_INT; 760 } 761 762 ctx.prot = 0; 763 ret = mmu6xx_get_physical_address(env, &ctx, eaddr, &hash, 764 access_type, type); 765 if (ret == 0) { 766 *raddrp = ctx.raddr; 767 *protp = ctx.prot; 768 *psizep = TARGET_PAGE_BITS; 769 return true; 770 } else if (!guest_visible) { 771 return false; 772 } 773 774 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 775 if (type == ACCESS_CODE) { 776 switch (ret) { 777 case -1: 778 /* No matches in page tables or TLB */ 779 cs->exception_index = POWERPC_EXCP_IFTLB; 780 env->error_code = 1 << 18; 781 env->spr[SPR_IMISS] = eaddr; 782 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 783 goto tlb_miss; 784 case -2: 785 /* Access rights violation */ 786 cs->exception_index = POWERPC_EXCP_ISI; 787 env->error_code = 0x08000000; 788 break; 789 case -3: 790 /* No execute protection violation */ 791 cs->exception_index = POWERPC_EXCP_ISI; 792 env->error_code = 0x10000000; 793 break; 794 case -4: 795 /* Direct store exception */ 796 /* No code fetch is allowed in direct-store areas */ 797 cs->exception_index = POWERPC_EXCP_ISI; 798 env->error_code = 0x10000000; 799 break; 800 } 801 } else { 802 switch (ret) { 803 case -1: 804 /* No matches in page tables or TLB */ 805 if (access_type == MMU_DATA_STORE) { 806 cs->exception_index = POWERPC_EXCP_DSTLB; 807 env->error_code = 1 << 16; 808 } else { 809 cs->exception_index = POWERPC_EXCP_DLTLB; 810 env->error_code = 0; 811 } 812 env->spr[SPR_DMISS] = eaddr; 813 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 814 tlb_miss: 815 env->error_code |= ctx.key << 19; 816 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 817 get_pteg_offset32(cpu, hash); 818 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 819 get_pteg_offset32(cpu, ~hash); 820 break; 821 case -2: 822 /* Access rights violation */ 823 cs->exception_index = POWERPC_EXCP_DSI; 824 env->error_code = 0; 825 env->spr[SPR_DAR] = eaddr; 826 if (access_type == MMU_DATA_STORE) { 827 env->spr[SPR_DSISR] = 0x0A000000; 828 } else { 829 env->spr[SPR_DSISR] = 0x08000000; 830 } 831 break; 832 case -4: 833 /* Direct store exception */ 834 switch (type) { 835 case ACCESS_FLOAT: 836 /* Floating point load/store */ 837 cs->exception_index = POWERPC_EXCP_ALIGN; 838 env->error_code = POWERPC_EXCP_ALIGN_FP; 839 env->spr[SPR_DAR] = eaddr; 840 break; 841 case ACCESS_RES: 842 /* lwarx, ldarx or stwcx. */ 843 cs->exception_index = POWERPC_EXCP_DSI; 844 env->error_code = 0; 845 env->spr[SPR_DAR] = eaddr; 846 if (access_type == MMU_DATA_STORE) { 847 env->spr[SPR_DSISR] = 0x06000000; 848 } else { 849 env->spr[SPR_DSISR] = 0x04000000; 850 } 851 break; 852 case ACCESS_EXT: 853 /* eciwx or ecowx */ 854 cs->exception_index = POWERPC_EXCP_DSI; 855 env->error_code = 0; 856 env->spr[SPR_DAR] = eaddr; 857 if (access_type == MMU_DATA_STORE) { 858 env->spr[SPR_DSISR] = 0x06100000; 859 } else { 860 env->spr[SPR_DSISR] = 0x04100000; 861 } 862 break; 863 default: 864 printf("DSI: invalid exception (%d)\n", ret); 865 cs->exception_index = POWERPC_EXCP_PROGRAM; 866 env->error_code = POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 867 env->spr[SPR_DAR] = eaddr; 868 break; 869 } 870 break; 871 } 872 } 873 return false; 874 } 875 876 /*****************************************************************************/ 877 878 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 879 hwaddr *raddrp, int *psizep, int *protp, 880 int mmu_idx, bool guest_visible) 881 { 882 switch (cpu->env.mmu_model) { 883 #if defined(TARGET_PPC64) 884 case POWERPC_MMU_3_00: 885 if (ppc64_v3_radix(cpu)) { 886 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, 887 psizep, protp, mmu_idx, guest_visible); 888 } 889 /* fall through */ 890 case POWERPC_MMU_64B: 891 case POWERPC_MMU_2_03: 892 case POWERPC_MMU_2_06: 893 case POWERPC_MMU_2_07: 894 return ppc_hash64_xlate(cpu, eaddr, access_type, 895 raddrp, psizep, protp, mmu_idx, guest_visible); 896 #endif 897 898 case POWERPC_MMU_32B: 899 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, 900 psizep, protp, mmu_idx, guest_visible); 901 case POWERPC_MMU_BOOKE: 902 case POWERPC_MMU_BOOKE206: 903 return ppc_booke_xlate(cpu, eaddr, access_type, raddrp, 904 psizep, protp, mmu_idx, guest_visible); 905 case POWERPC_MMU_SOFT_4xx: 906 return ppc_40x_xlate(cpu, eaddr, access_type, raddrp, 907 psizep, protp, mmu_idx, guest_visible); 908 case POWERPC_MMU_SOFT_6xx: 909 return ppc_6xx_xlate(cpu, eaddr, access_type, raddrp, 910 psizep, protp, mmu_idx, guest_visible); 911 case POWERPC_MMU_REAL: 912 return ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, 913 protp); 914 case POWERPC_MMU_MPC8xx: 915 cpu_abort(env_cpu(&cpu->env), "MPC8xx MMU model is not implemented\n"); 916 default: 917 cpu_abort(CPU(cpu), "Unknown or invalid MMU model\n"); 918 } 919 } 920 921 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 922 { 923 PowerPCCPU *cpu = POWERPC_CPU(cs); 924 hwaddr raddr; 925 int s, p; 926 927 /* 928 * Some MMUs have separate TLBs for code and data. If we only 929 * try an MMU_DATA_LOAD, we may not be able to read instructions 930 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 931 */ 932 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 933 ppc_env_mmu_index(&cpu->env, false), false) || 934 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 935 ppc_env_mmu_index(&cpu->env, true), false)) { 936 return raddr & TARGET_PAGE_MASK; 937 } 938 return -1; 939 } 940