1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "system/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/cputlb.h" 28 #include "exec/exec-all.h" 29 #include "exec/page-protection.h" 30 #include "exec/target_page.h" 31 #include "exec/log.h" 32 #include "helper_regs.h" 33 #include "qemu/error-report.h" 34 #include "qemu/qemu-print.h" 35 #include "internal.h" 36 #include "mmu-book3s-v3.h" 37 #include "mmu-radix64.h" 38 #include "mmu-booke.h" 39 #include "exec/helper-proto.h" 40 #include "accel/tcg/cpu-ldst.h" 41 42 /* #define FLUSH_ALL_TLBS */ 43 44 /*****************************************************************************/ 45 /* PowerPC MMU emulation */ 46 47 /* Software driven TLB helpers */ 48 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) 49 { 50 ppc6xx_tlb_t *tlb; 51 int nr, max = 2 * env->nb_tlb; 52 53 for (nr = 0; nr < max; nr++) { 54 tlb = &env->tlb.tlb6[nr]; 55 pte_invalidate(&tlb->pte0); 56 } 57 tlb_flush(env_cpu(env)); 58 } 59 60 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, 61 target_ulong eaddr, 62 int is_code, int match_epn) 63 { 64 #if !defined(FLUSH_ALL_TLBS) 65 CPUState *cs = env_cpu(env); 66 ppc6xx_tlb_t *tlb; 67 int way, nr; 68 69 /* Invalidate ITLB + DTLB, all ways */ 70 for (way = 0; way < env->nb_ways; way++) { 71 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); 72 tlb = &env->tlb.tlb6[nr]; 73 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { 74 qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d " 75 TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr); 76 pte_invalidate(&tlb->pte0); 77 tlb_flush_page(cs, tlb->EPN); 78 } 79 } 80 #else 81 /* XXX: PowerPC specification say this is valid as well */ 82 ppc6xx_tlb_invalidate_all(env); 83 #endif 84 } 85 86 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, 87 target_ulong eaddr, int is_code) 88 { 89 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); 90 } 91 92 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, 93 int is_code, target_ulong pte0, target_ulong pte1) 94 { 95 ppc6xx_tlb_t *tlb; 96 int nr; 97 98 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); 99 tlb = &env->tlb.tlb6[nr]; 100 qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " 101 TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, 102 EPN, pte0, pte1); 103 /* Invalidate any pending reference in QEMU for this virtual address */ 104 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); 105 tlb->pte0 = pte0; 106 tlb->pte1 = pte1; 107 tlb->EPN = EPN; 108 /* Store last way for LRU mechanism */ 109 env->last_way = way; 110 } 111 112 /* Helpers specific to PowerPC 40x implementations */ 113 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) 114 { 115 ppcemb_tlb_t *tlb; 116 int i; 117 118 for (i = 0; i < env->nb_tlb; i++) { 119 tlb = &env->tlb.tlbe[i]; 120 tlb->prot &= ~PAGE_VALID; 121 } 122 tlb_flush(env_cpu(env)); 123 } 124 125 static void booke206_flush_tlb(CPUPPCState *env, int flags, 126 const int check_iprot) 127 { 128 int tlb_size; 129 int i, j; 130 ppcmas_tlb_t *tlb = env->tlb.tlbm; 131 132 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 133 if (flags & (1 << i)) { 134 tlb_size = booke206_tlb_size(env, i); 135 for (j = 0; j < tlb_size; j++) { 136 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { 137 tlb[j].mas1 &= ~MAS1_VALID; 138 } 139 } 140 } 141 tlb += booke206_tlb_size(env, i); 142 } 143 144 tlb_flush(env_cpu(env)); 145 } 146 147 /*****************************************************************************/ 148 /* BATs management */ 149 #if !defined(FLUSH_ALL_TLBS) 150 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, 151 target_ulong mask) 152 { 153 CPUState *cs = env_cpu(env); 154 target_ulong base, end, page; 155 156 base = BATu & ~0x0001FFFF; 157 end = base + mask + 0x00020000; 158 if (((end - base) >> TARGET_PAGE_BITS) > 1024) { 159 /* Flushing 1024 4K pages is slower than a complete flush */ 160 qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n"); 161 tlb_flush(cs); 162 qemu_log_mask(CPU_LOG_MMU, "Flush done\n"); 163 return; 164 } 165 qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx 166 " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", 167 base, end, mask); 168 for (page = base; page != end; page += TARGET_PAGE_SIZE) { 169 tlb_flush_page(cs, page); 170 } 171 qemu_log_mask(CPU_LOG_MMU, "Flush done\n"); 172 } 173 #endif 174 175 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, 176 target_ulong value) 177 { 178 qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " (" 179 TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l', 180 value, env->nip); 181 } 182 183 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) 184 { 185 target_ulong mask; 186 187 dump_store_bat(env, 'I', 0, nr, value); 188 if (env->IBAT[0][nr] != value) { 189 mask = (value << 15) & 0x0FFE0000UL; 190 #if !defined(FLUSH_ALL_TLBS) 191 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 192 #endif 193 /* 194 * When storing valid upper BAT, mask BEPI and BRPN and 195 * invalidate all TLBs covered by this BAT 196 */ 197 mask = (value << 15) & 0x0FFE0000UL; 198 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 199 (value & ~0x0001FFFFUL & ~mask); 200 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | 201 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); 202 #if !defined(FLUSH_ALL_TLBS) 203 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 204 #else 205 tlb_flush(env_cpu(env)); 206 #endif 207 } 208 } 209 210 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) 211 { 212 dump_store_bat(env, 'I', 1, nr, value); 213 env->IBAT[1][nr] = value; 214 } 215 216 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) 217 { 218 target_ulong mask; 219 220 dump_store_bat(env, 'D', 0, nr, value); 221 if (env->DBAT[0][nr] != value) { 222 /* 223 * When storing valid upper BAT, mask BEPI and BRPN and 224 * invalidate all TLBs covered by this BAT 225 */ 226 mask = (value << 15) & 0x0FFE0000UL; 227 #if !defined(FLUSH_ALL_TLBS) 228 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 229 #endif 230 mask = (value << 15) & 0x0FFE0000UL; 231 env->DBAT[0][nr] = (value & 0x00001FFFUL) | 232 (value & ~0x0001FFFFUL & ~mask); 233 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | 234 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); 235 #if !defined(FLUSH_ALL_TLBS) 236 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 237 #else 238 tlb_flush(env_cpu(env)); 239 #endif 240 } 241 } 242 243 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) 244 { 245 dump_store_bat(env, 'D', 1, nr, value); 246 env->DBAT[1][nr] = value; 247 } 248 249 /*****************************************************************************/ 250 /* TLB management */ 251 void ppc_tlb_invalidate_all(CPUPPCState *env) 252 { 253 #if defined(TARGET_PPC64) 254 if (mmu_is_64bit(env->mmu_model)) { 255 env->tlb_need_flush = 0; 256 tlb_flush(env_cpu(env)); 257 } else 258 #endif /* defined(TARGET_PPC64) */ 259 switch (env->mmu_model) { 260 case POWERPC_MMU_SOFT_6xx: 261 ppc6xx_tlb_invalidate_all(env); 262 break; 263 case POWERPC_MMU_SOFT_4xx: 264 ppc4xx_tlb_invalidate_all(env); 265 break; 266 case POWERPC_MMU_REAL: 267 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); 268 break; 269 case POWERPC_MMU_MPC8xx: 270 /* XXX: TODO */ 271 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 272 break; 273 case POWERPC_MMU_BOOKE: 274 tlb_flush(env_cpu(env)); 275 break; 276 case POWERPC_MMU_BOOKE206: 277 booke206_flush_tlb(env, -1, 0); 278 break; 279 case POWERPC_MMU_32B: 280 env->tlb_need_flush = 0; 281 tlb_flush(env_cpu(env)); 282 break; 283 default: 284 /* XXX: TODO */ 285 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); 286 break; 287 } 288 } 289 290 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) 291 { 292 #if !defined(FLUSH_ALL_TLBS) 293 addr &= TARGET_PAGE_MASK; 294 #if defined(TARGET_PPC64) 295 if (mmu_is_64bit(env->mmu_model)) { 296 /* tlbie invalidate TLBs for all segments */ 297 /* 298 * XXX: given the fact that there are too many segments to invalidate, 299 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, 300 * we just invalidate all TLBs 301 */ 302 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 303 } else 304 #endif /* defined(TARGET_PPC64) */ 305 switch (env->mmu_model) { 306 case POWERPC_MMU_SOFT_6xx: 307 ppc6xx_tlb_invalidate_virt(env, addr, 0); 308 ppc6xx_tlb_invalidate_virt(env, addr, 1); 309 break; 310 case POWERPC_MMU_32B: 311 /* 312 * Actual CPUs invalidate entire congruence classes based on 313 * the geometry of their TLBs and some OSes take that into 314 * account, we just mark the TLB to be flushed later (context 315 * synchronizing event or sync instruction on 32-bit). 316 */ 317 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 318 break; 319 default: 320 /* Should never reach here with other MMU models */ 321 g_assert_not_reached(); 322 } 323 #else 324 ppc_tlb_invalidate_all(env); 325 #endif 326 } 327 328 /*****************************************************************************/ 329 /* Special registers manipulation */ 330 331 /* Segment registers load and store */ 332 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) 333 { 334 #if defined(TARGET_PPC64) 335 if (mmu_is_64bit(env->mmu_model)) { 336 /* XXX */ 337 return 0; 338 } 339 #endif 340 return env->sr[sr_num]; 341 } 342 343 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) 344 { 345 qemu_log_mask(CPU_LOG_MMU, 346 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, 347 (int)srnum, value, env->sr[srnum]); 348 #if defined(TARGET_PPC64) 349 if (mmu_is_64bit(env->mmu_model)) { 350 PowerPCCPU *cpu = env_archcpu(env); 351 uint64_t esid, vsid; 352 353 /* ESID = srnum */ 354 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; 355 356 /* VSID = VSID */ 357 vsid = (value & 0xfffffff) << 12; 358 /* flags = flags */ 359 vsid |= ((value >> 27) & 0xf) << 8; 360 361 ppc_store_slb(cpu, srnum, esid, vsid); 362 } else 363 #endif 364 if (env->sr[srnum] != value) { 365 env->sr[srnum] = value; 366 /* 367 * Invalidating 256MB of virtual memory in 4kB pages is way 368 * longer than flushing the whole TLB. 369 */ 370 #if !defined(FLUSH_ALL_TLBS) && 0 371 { 372 target_ulong page, end; 373 /* Invalidate 256 MB of virtual memory */ 374 page = (16 << 20) * srnum; 375 end = page + (16 << 20); 376 for (; page != end; page += TARGET_PAGE_SIZE) { 377 tlb_flush_page(env_cpu(env), page); 378 } 379 } 380 #else 381 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 382 #endif 383 } 384 } 385 386 /* TLB management */ 387 void helper_tlbia(CPUPPCState *env) 388 { 389 ppc_tlb_invalidate_all(env); 390 } 391 392 void helper_tlbie(CPUPPCState *env, target_ulong addr) 393 { 394 ppc_tlb_invalidate_one(env, addr); 395 } 396 397 #if defined(TARGET_PPC64) 398 399 /* Invalidation Selector */ 400 #define TLBIE_IS_VA 0 401 #define TLBIE_IS_PID 1 402 #define TLBIE_IS_LPID 2 403 #define TLBIE_IS_ALL 3 404 405 /* Radix Invalidation Control */ 406 #define TLBIE_RIC_TLB 0 407 #define TLBIE_RIC_PWC 1 408 #define TLBIE_RIC_ALL 2 409 #define TLBIE_RIC_GRP 3 410 411 /* Radix Actual Page sizes */ 412 #define TLBIE_R_AP_4K 0 413 #define TLBIE_R_AP_64K 5 414 #define TLBIE_R_AP_2M 1 415 #define TLBIE_R_AP_1G 2 416 417 /* RB field masks */ 418 #define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51) 419 #define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53) 420 #define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58) 421 422 void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs, 423 uint32_t flags) 424 { 425 unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT; 426 /* 427 * With the exception of the checks for invalid instruction forms, 428 * PRS is currently ignored, because we don't know if a given TLB entry 429 * is process or partition scoped. 430 */ 431 bool prs = flags & TLBIE_F_PRS; 432 bool r = flags & TLBIE_F_R; 433 bool local = flags & TLBIE_F_LOCAL; 434 bool effR; 435 unsigned is = extract64(rb, PPC_BIT_NR(53), 2); 436 unsigned ap; /* actual page size */ 437 target_ulong addr, pgoffs_mask; 438 439 qemu_log_mask(CPU_LOG_MMU, 440 "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n", 441 __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is); 442 443 effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR; 444 445 /* Partial TLB invalidation is supported for Radix only for now. */ 446 if (!effR) { 447 goto inval_all; 448 } 449 450 /* Check for invalid instruction forms (effR=1). */ 451 if (unlikely(ric == TLBIE_RIC_GRP || 452 ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) && 453 is == TLBIE_IS_VA) || 454 (!prs && is == TLBIE_IS_PID))) { 455 qemu_log_mask(LOG_GUEST_ERROR, 456 "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n", 457 __func__, ric, prs, r, is); 458 goto invalid; 459 } 460 461 /* We don't cache Page Walks. */ 462 if (ric == TLBIE_RIC_PWC) { 463 if (local) { 464 unsigned set = extract64(rb, PPC_BIT_NR(51), 12); 465 if (set != 0) { 466 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n", 467 __func__, set); 468 goto invalid; 469 } 470 } 471 return; 472 } 473 474 /* 475 * Invalidation by LPID or PID is not supported, so fallback 476 * to full TLB flush in these cases. 477 */ 478 if (is != TLBIE_IS_VA) { 479 goto inval_all; 480 } 481 482 /* 483 * The results of an attempt to invalidate a translation outside of 484 * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0, 485 * and EA 0:1 != 0b00) are boundedly undefined. 486 */ 487 if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA && 488 (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) { 489 qemu_log_mask(LOG_GUEST_ERROR, 490 "%s: attempt to invalidate a translation outside of quadrant 0\n", 491 __func__); 492 goto inval_all; 493 } 494 495 assert(is == TLBIE_IS_VA); 496 assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL); 497 498 ap = extract64(rb, PPC_BIT_NR(58), 3); 499 switch (ap) { 500 case TLBIE_R_AP_4K: 501 pgoffs_mask = 0xfffull; 502 break; 503 504 case TLBIE_R_AP_64K: 505 pgoffs_mask = 0xffffull; 506 break; 507 508 case TLBIE_R_AP_2M: 509 pgoffs_mask = 0x1fffffull; 510 break; 511 512 case TLBIE_R_AP_1G: 513 pgoffs_mask = 0x3fffffffull; 514 break; 515 516 default: 517 /* 518 * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58, 519 * RB 44:51, or RB 56:63, when it is needed to perform the specified 520 * operation, is not supported by the implementation, the instruction 521 * is treated as if the instruction form were invalid. 522 */ 523 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap); 524 goto invalid; 525 } 526 527 addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask; 528 529 if (local) { 530 tlb_flush_page(env_cpu(env), addr); 531 } else { 532 tlb_flush_page_all_cpus_synced(env_cpu(env), addr); 533 } 534 return; 535 536 inval_all: 537 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 538 if (!local) { 539 env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH; 540 } 541 return; 542 543 invalid: 544 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 545 POWERPC_EXCP_INVAL | 546 POWERPC_EXCP_INVAL_INVAL, GETPC()); 547 } 548 549 #endif 550 551 void helper_tlbiva(CPUPPCState *env, target_ulong addr) 552 { 553 /* tlbiva instruction only exists on BookE */ 554 assert(env->mmu_model == POWERPC_MMU_BOOKE); 555 /* XXX: TODO */ 556 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); 557 } 558 559 /* Software driven TLBs management */ 560 /* PowerPC 602/603 software TLB load instructions helpers */ 561 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 562 { 563 target_ulong RPN, CMP, EPN; 564 int way; 565 566 RPN = env->spr[SPR_RPA]; 567 if (is_code) { 568 CMP = env->spr[SPR_ICMP]; 569 EPN = env->spr[SPR_IMISS]; 570 } else { 571 CMP = env->spr[SPR_DCMP]; 572 EPN = env->spr[SPR_DMISS]; 573 } 574 way = (env->spr[SPR_SRR1] >> 17) & 1; 575 (void)EPN; /* avoid a compiler warning */ 576 qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx 577 " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n", 578 __func__, new_EPN, EPN, CMP, RPN, way); 579 /* Store this TLB */ 580 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 581 way, is_code, CMP, RPN); 582 } 583 584 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) 585 { 586 do_6xx_tlb(env, EPN, 0); 587 } 588 589 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) 590 { 591 do_6xx_tlb(env, EPN, 1); 592 } 593 594 static inline target_ulong booke_tlb_to_page_size(int size) 595 { 596 return 1024 << (2 * size); 597 } 598 599 static inline int booke_page_size_to_tlb(target_ulong page_size) 600 { 601 int size; 602 603 switch (page_size) { 604 case 0x00000400UL: 605 size = 0x0; 606 break; 607 case 0x00001000UL: 608 size = 0x1; 609 break; 610 case 0x00004000UL: 611 size = 0x2; 612 break; 613 case 0x00010000UL: 614 size = 0x3; 615 break; 616 case 0x00040000UL: 617 size = 0x4; 618 break; 619 case 0x00100000UL: 620 size = 0x5; 621 break; 622 case 0x00400000UL: 623 size = 0x6; 624 break; 625 case 0x01000000UL: 626 size = 0x7; 627 break; 628 case 0x04000000UL: 629 size = 0x8; 630 break; 631 case 0x10000000UL: 632 size = 0x9; 633 break; 634 case 0x40000000UL: 635 size = 0xA; 636 break; 637 #if defined(TARGET_PPC64) 638 case 0x000100000000ULL: 639 size = 0xB; 640 break; 641 case 0x000400000000ULL: 642 size = 0xC; 643 break; 644 case 0x001000000000ULL: 645 size = 0xD; 646 break; 647 case 0x004000000000ULL: 648 size = 0xE; 649 break; 650 case 0x010000000000ULL: 651 size = 0xF; 652 break; 653 #endif 654 default: 655 size = -1; 656 break; 657 } 658 659 return size; 660 } 661 662 /* Helpers for 4xx TLB management */ 663 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ 664 665 #define PPC4XX_TLBHI_V 0x00000040 666 #define PPC4XX_TLBHI_E 0x00000020 667 #define PPC4XX_TLBHI_SIZE_MIN 0 668 #define PPC4XX_TLBHI_SIZE_MAX 7 669 #define PPC4XX_TLBHI_SIZE_DEFAULT 1 670 #define PPC4XX_TLBHI_SIZE_SHIFT 7 671 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007 672 673 #define PPC4XX_TLBLO_EX 0x00000200 674 #define PPC4XX_TLBLO_WR 0x00000100 675 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF 676 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 677 678 void helper_store_40x_pid(CPUPPCState *env, target_ulong val) 679 { 680 if (env->spr[SPR_40x_PID] != val) { 681 env->spr[SPR_40x_PID] = val; 682 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 683 } 684 } 685 686 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) 687 { 688 ppcemb_tlb_t *tlb; 689 target_ulong ret; 690 int size; 691 692 entry &= PPC4XX_TLB_ENTRY_MASK; 693 tlb = &env->tlb.tlbe[entry]; 694 ret = tlb->EPN; 695 if (tlb->prot & PAGE_VALID) { 696 ret |= PPC4XX_TLBHI_V; 697 } 698 size = booke_page_size_to_tlb(tlb->size); 699 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { 700 size = PPC4XX_TLBHI_SIZE_DEFAULT; 701 } 702 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; 703 helper_store_40x_pid(env, tlb->PID); 704 return ret; 705 } 706 707 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) 708 { 709 ppcemb_tlb_t *tlb; 710 target_ulong ret; 711 712 entry &= PPC4XX_TLB_ENTRY_MASK; 713 tlb = &env->tlb.tlbe[entry]; 714 ret = tlb->RPN; 715 if (tlb->prot & PAGE_EXEC) { 716 ret |= PPC4XX_TLBLO_EX; 717 } 718 if (tlb->prot & PAGE_WRITE) { 719 ret |= PPC4XX_TLBLO_WR; 720 } 721 return ret; 722 } 723 724 static void ppcemb_tlb_flush(CPUState *cs, ppcemb_tlb_t *tlb) 725 { 726 unsigned mmu_idx = 0; 727 728 if (tlb->prot & 0xf) { 729 mmu_idx |= 0x1; 730 } 731 if ((tlb->prot >> 4) & 0xf) { 732 mmu_idx |= 0x2; 733 } 734 if (tlb->attr & 1) { 735 mmu_idx <<= 2; 736 } 737 738 tlb_flush_range_by_mmuidx(cs, tlb->EPN, tlb->size, mmu_idx, 739 TARGET_LONG_BITS); 740 } 741 742 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, 743 target_ulong val) 744 { 745 CPUState *cs = env_cpu(env); 746 ppcemb_tlb_t *tlb; 747 748 qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n", 749 __func__, (int)entry, 750 val); 751 entry &= PPC4XX_TLB_ENTRY_MASK; 752 tlb = &env->tlb.tlbe[entry]; 753 /* Invalidate previous TLB (if it's valid) */ 754 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) { 755 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " 756 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, 757 (int)entry, tlb->EPN, tlb->EPN + tlb->size); 758 ppcemb_tlb_flush(cs, tlb); 759 } 760 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) 761 & PPC4XX_TLBHI_SIZE_MASK); 762 /* 763 * We cannot handle TLB size < TARGET_PAGE_SIZE. 764 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY 765 */ 766 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { 767 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " 768 "are not supported (%d)\n" 769 "Please implement TARGET_PAGE_BITS_VARY\n", 770 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); 771 } 772 tlb->EPN = val & ~(tlb->size - 1); 773 if (val & PPC4XX_TLBHI_V) { 774 tlb->prot |= PAGE_VALID; 775 if (val & PPC4XX_TLBHI_E) { 776 /* XXX: TO BE FIXED */ 777 cpu_abort(cs, 778 "Little-endian TLB entries are not supported by now\n"); 779 } 780 } else { 781 tlb->prot &= ~PAGE_VALID; 782 } 783 tlb->PID = env->spr[SPR_40x_PID]; /* PID */ 784 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx 785 " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx 786 " prot %c%c%c%c PID %d\n", __func__, 787 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 788 tlb->prot & PAGE_READ ? 'r' : '-', 789 tlb->prot & PAGE_WRITE ? 'w' : '-', 790 tlb->prot & PAGE_EXEC ? 'x' : '-', 791 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 792 } 793 794 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, 795 target_ulong val) 796 { 797 CPUState *cs = env_cpu(env); 798 ppcemb_tlb_t *tlb; 799 800 qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n", 801 __func__, (int)entry, val); 802 entry &= PPC4XX_TLB_ENTRY_MASK; 803 tlb = &env->tlb.tlbe[entry]; 804 /* Invalidate previous TLB (if it's valid) */ 805 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) { 806 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " 807 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, 808 (int)entry, tlb->EPN, tlb->EPN + tlb->size); 809 ppcemb_tlb_flush(cs, tlb); 810 } 811 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; 812 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; 813 tlb->prot = PAGE_READ; 814 if (val & PPC4XX_TLBLO_EX) { 815 tlb->prot |= PAGE_EXEC; 816 } 817 if (val & PPC4XX_TLBLO_WR) { 818 tlb->prot |= PAGE_WRITE; 819 } 820 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx 821 " EPN " TARGET_FMT_lx 822 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 823 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 824 tlb->prot & PAGE_READ ? 'r' : '-', 825 tlb->prot & PAGE_WRITE ? 'w' : '-', 826 tlb->prot & PAGE_EXEC ? 'x' : '-', 827 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 828 } 829 830 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) 831 { 832 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); 833 } 834 835 static bool mmubooke_pid_match(CPUPPCState *env, ppcemb_tlb_t *tlb) 836 { 837 if (tlb->PID == env->spr[SPR_BOOKE_PID]) { 838 return true; 839 } 840 if (!env->nb_pids) { 841 return false; 842 } 843 844 if (env->spr[SPR_BOOKE_PID1] && tlb->PID == env->spr[SPR_BOOKE_PID1]) { 845 return true; 846 } 847 if (env->spr[SPR_BOOKE_PID2] && tlb->PID == env->spr[SPR_BOOKE_PID2]) { 848 return true; 849 } 850 851 return false; 852 } 853 854 /* PowerPC 440 TLB management */ 855 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, 856 target_ulong value) 857 { 858 ppcemb_tlb_t *tlb; 859 860 qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n", 861 __func__, word, (int)entry, value); 862 entry &= 0x3F; 863 tlb = &env->tlb.tlbe[entry]; 864 865 /* Invalidate previous TLB (if it's valid) */ 866 if ((tlb->prot & PAGE_VALID) && mmubooke_pid_match(env, tlb)) { 867 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " 868 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, 869 (int)entry, tlb->EPN, tlb->EPN + tlb->size); 870 ppcemb_tlb_flush(env_cpu(env), tlb); 871 } 872 873 switch (word) { 874 default: 875 /* Just here to please gcc */ 876 case 0: 877 tlb->EPN = value & 0xFFFFFC00; 878 tlb->size = booke_tlb_to_page_size((value >> 4) & 0xF); 879 tlb->attr &= ~0x1; 880 tlb->attr |= (value >> 8) & 1; 881 if (value & 0x200) { 882 tlb->prot |= PAGE_VALID; 883 } else { 884 tlb->prot &= ~PAGE_VALID; 885 } 886 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; 887 break; 888 case 1: 889 tlb->RPN = value & 0xFFFFFC0F; 890 break; 891 case 2: 892 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); 893 tlb->prot = tlb->prot & PAGE_VALID; 894 if (value & 0x1) { 895 tlb->prot |= PAGE_READ << 4; 896 } 897 if (value & 0x2) { 898 tlb->prot |= PAGE_WRITE << 4; 899 } 900 if (value & 0x4) { 901 tlb->prot |= PAGE_EXEC << 4; 902 } 903 if (value & 0x8) { 904 tlb->prot |= PAGE_READ; 905 } 906 if (value & 0x10) { 907 tlb->prot |= PAGE_WRITE; 908 } 909 if (value & 0x20) { 910 tlb->prot |= PAGE_EXEC; 911 } 912 break; 913 } 914 } 915 916 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, 917 target_ulong entry) 918 { 919 ppcemb_tlb_t *tlb; 920 target_ulong ret; 921 int size; 922 923 entry &= 0x3F; 924 tlb = &env->tlb.tlbe[entry]; 925 switch (word) { 926 default: 927 /* Just here to please gcc */ 928 case 0: 929 ret = tlb->EPN; 930 size = booke_page_size_to_tlb(tlb->size); 931 if (size < 0 || size > 0xF) { 932 size = 1; 933 } 934 ret |= size << 4; 935 if (tlb->attr & 0x1) { 936 ret |= 0x100; 937 } 938 if (tlb->prot & PAGE_VALID) { 939 ret |= 0x200; 940 } 941 env->spr[SPR_440_MMUCR] &= ~0x000000FF; 942 env->spr[SPR_440_MMUCR] |= tlb->PID; 943 break; 944 case 1: 945 ret = tlb->RPN; 946 break; 947 case 2: 948 ret = tlb->attr & ~0x1; 949 if (tlb->prot & (PAGE_READ << 4)) { 950 ret |= 0x1; 951 } 952 if (tlb->prot & (PAGE_WRITE << 4)) { 953 ret |= 0x2; 954 } 955 if (tlb->prot & (PAGE_EXEC << 4)) { 956 ret |= 0x4; 957 } 958 if (tlb->prot & PAGE_READ) { 959 ret |= 0x8; 960 } 961 if (tlb->prot & PAGE_WRITE) { 962 ret |= 0x10; 963 } 964 if (tlb->prot & PAGE_EXEC) { 965 ret |= 0x20; 966 } 967 break; 968 } 969 return ret; 970 } 971 972 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) 973 { 974 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); 975 } 976 977 /* PowerPC BookE 2.06 TLB management */ 978 979 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) 980 { 981 uint32_t tlbncfg = 0; 982 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; 983 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); 984 int tlb; 985 986 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 987 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; 988 989 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { 990 cpu_abort(env_cpu(env), "we don't support HES yet\n"); 991 } 992 993 return booke206_get_tlbm(env, tlb, ea, esel); 994 } 995 996 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) 997 { 998 env->spr[pidn] = pid; 999 /* changing PIDs mean we're in a different address space now */ 1000 tlb_flush(env_cpu(env)); 1001 } 1002 1003 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) 1004 { 1005 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; 1006 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); 1007 } 1008 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) 1009 { 1010 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; 1011 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); 1012 } 1013 1014 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) 1015 { 1016 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { 1017 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); 1018 } else { 1019 tlb_flush(env_cpu(env)); 1020 } 1021 } 1022 1023 void helper_booke206_tlbwe(CPUPPCState *env) 1024 { 1025 uint32_t tlbncfg, tlbn; 1026 ppcmas_tlb_t *tlb; 1027 uint32_t size_tlb, size_ps; 1028 target_ulong mask; 1029 1030 1031 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { 1032 case MAS0_WQ_ALWAYS: 1033 /* good to go, write that entry */ 1034 break; 1035 case MAS0_WQ_COND: 1036 /* XXX check if reserved */ 1037 if (0) { 1038 return; 1039 } 1040 break; 1041 case MAS0_WQ_CLR_RSRV: 1042 /* XXX clear entry */ 1043 return; 1044 default: 1045 /* no idea what to do */ 1046 return; 1047 } 1048 1049 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && 1050 !FIELD_EX64(env->msr, MSR, GS)) { 1051 /* XXX we don't support direct LRAT setting yet */ 1052 fprintf(stderr, "cpu: don't support LRAT setting yet\n"); 1053 return; 1054 } 1055 1056 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 1057 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; 1058 1059 tlb = booke206_cur_tlb(env); 1060 1061 if (!tlb) { 1062 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1063 POWERPC_EXCP_INVAL | 1064 POWERPC_EXCP_INVAL_INVAL, GETPC()); 1065 } 1066 1067 /* check that we support the targeted size */ 1068 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 1069 size_ps = booke206_tlbnps(env, tlbn); 1070 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && 1071 !(size_ps & (1 << size_tlb))) { 1072 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1073 POWERPC_EXCP_INVAL | 1074 POWERPC_EXCP_INVAL_INVAL, GETPC()); 1075 } 1076 1077 if (FIELD_EX64(env->msr, MSR, GS)) { 1078 cpu_abort(env_cpu(env), "missing HV implementation\n"); 1079 } 1080 1081 if (tlb->mas1 & MAS1_VALID) { 1082 /* 1083 * Invalidate the page in QEMU TLB if it was a valid entry. 1084 * 1085 * In "PowerPC e500 Core Family Reference Manual, Rev. 1", 1086 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": 1087 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) 1088 * 1089 * "Note that when an L2 TLB entry is written, it may be displacing an 1090 * already valid entry in the same L2 TLB location (a victim). If a 1091 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 1092 * TLB entry is automatically invalidated." 1093 */ 1094 flush_page(env, tlb); 1095 } 1096 1097 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | 1098 env->spr[SPR_BOOKE_MAS3]; 1099 tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; 1100 1101 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 1102 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ 1103 booke206_fixed_size_tlbn(env, tlbn, tlb); 1104 } else { 1105 if (!(tlbncfg & TLBnCFG_AVAIL)) { 1106 /* force !AVAIL TLB entries to correct page size */ 1107 tlb->mas1 &= ~MAS1_TSIZE_MASK; 1108 /* XXX can be configured in MMUCSR0 */ 1109 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; 1110 } 1111 } 1112 1113 /* Make a mask from TLB size to discard invalid bits in EPN field */ 1114 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 1115 /* Add a mask for page attributes */ 1116 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; 1117 1118 if (!FIELD_EX64(env->msr, MSR, CM)) { 1119 /* 1120 * Executing a tlbwe instruction in 32-bit mode will set bits 1121 * 0:31 of the TLB EPN field to zero. 1122 */ 1123 mask &= 0xffffffff; 1124 } 1125 1126 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; 1127 1128 if (!(tlbncfg & TLBnCFG_IPROT)) { 1129 /* no IPROT supported by TLB */ 1130 tlb->mas1 &= ~MAS1_IPROT; 1131 } 1132 1133 flush_page(env, tlb); 1134 } 1135 1136 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) 1137 { 1138 int tlbn = booke206_tlbm_to_tlbn(env, tlb); 1139 int way = booke206_tlbm_to_way(env, tlb); 1140 1141 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; 1142 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; 1143 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1144 1145 env->spr[SPR_BOOKE_MAS1] = tlb->mas1; 1146 env->spr[SPR_BOOKE_MAS2] = tlb->mas2; 1147 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; 1148 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; 1149 } 1150 1151 void helper_booke206_tlbre(CPUPPCState *env) 1152 { 1153 ppcmas_tlb_t *tlb = NULL; 1154 1155 tlb = booke206_cur_tlb(env); 1156 if (!tlb) { 1157 env->spr[SPR_BOOKE_MAS1] = 0; 1158 } else { 1159 booke206_tlb_to_mas(env, tlb); 1160 } 1161 } 1162 1163 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) 1164 { 1165 ppcmas_tlb_t *tlb = NULL; 1166 int i, j; 1167 hwaddr raddr; 1168 uint32_t spid, sas; 1169 1170 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; 1171 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; 1172 1173 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1174 int ways = booke206_tlb_ways(env, i); 1175 1176 for (j = 0; j < ways; j++) { 1177 tlb = booke206_get_tlbm(env, i, address, j); 1178 1179 if (!tlb) { 1180 continue; 1181 } 1182 1183 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { 1184 continue; 1185 } 1186 1187 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 1188 continue; 1189 } 1190 1191 booke206_tlb_to_mas(env, tlb); 1192 return; 1193 } 1194 } 1195 1196 /* no entry found, fill with defaults */ 1197 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1198 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1199 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1200 env->spr[SPR_BOOKE_MAS3] = 0; 1201 env->spr[SPR_BOOKE_MAS7] = 0; 1202 1203 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { 1204 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1205 } 1206 1207 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) 1208 << MAS1_TID_SHIFT; 1209 1210 /* next victim logic */ 1211 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1212 env->last_way++; 1213 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1214 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1215 } 1216 1217 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, 1218 vaddr ea) 1219 { 1220 int i; 1221 int ways = booke206_tlb_ways(env, tlbn); 1222 target_ulong mask; 1223 1224 for (i = 0; i < ways; i++) { 1225 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); 1226 if (!tlb) { 1227 continue; 1228 } 1229 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 1230 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && 1231 !(tlb->mas1 & MAS1_IPROT)) { 1232 tlb->mas1 &= ~MAS1_VALID; 1233 } 1234 } 1235 } 1236 1237 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) 1238 { 1239 CPUState *cs; 1240 1241 if (address & 0x4) { 1242 /* flush all entries */ 1243 if (address & 0x8) { 1244 /* flush all of TLB1 */ 1245 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); 1246 } else { 1247 /* flush all of TLB0 */ 1248 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); 1249 } 1250 return; 1251 } 1252 1253 if (address & 0x8) { 1254 /* flush TLB1 entries */ 1255 booke206_invalidate_ea_tlb(env, 1, address); 1256 CPU_FOREACH(cs) { 1257 tlb_flush(cs); 1258 } 1259 } else { 1260 /* flush TLB0 entries */ 1261 booke206_invalidate_ea_tlb(env, 0, address); 1262 CPU_FOREACH(cs) { 1263 tlb_flush_page(cs, address & MAS2_EPN_MASK); 1264 } 1265 } 1266 } 1267 1268 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) 1269 { 1270 /* XXX missing LPID handling */ 1271 booke206_flush_tlb(env, -1, 1); 1272 } 1273 1274 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) 1275 { 1276 int i, j; 1277 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 1278 ppcmas_tlb_t *tlb = env->tlb.tlbm; 1279 int tlb_size; 1280 1281 /* XXX missing LPID handling */ 1282 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1283 tlb_size = booke206_tlb_size(env, i); 1284 for (j = 0; j < tlb_size; j++) { 1285 if (!(tlb[j].mas1 & MAS1_IPROT) && 1286 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { 1287 tlb[j].mas1 &= ~MAS1_VALID; 1288 } 1289 } 1290 tlb += booke206_tlb_size(env, i); 1291 } 1292 tlb_flush(env_cpu(env)); 1293 } 1294 1295 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) 1296 { 1297 int i, j; 1298 ppcmas_tlb_t *tlb; 1299 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 1300 int pid = tid >> MAS6_SPID_SHIFT; 1301 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; 1302 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; 1303 /* XXX check for unsupported isize and raise an invalid opcode then */ 1304 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; 1305 /* XXX implement MAV2 handling */ 1306 bool mav2 = false; 1307 1308 /* XXX missing LPID handling */ 1309 /* flush by pid and ea */ 1310 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1311 int ways = booke206_tlb_ways(env, i); 1312 1313 for (j = 0; j < ways; j++) { 1314 tlb = booke206_get_tlbm(env, i, address, j); 1315 if (!tlb) { 1316 continue; 1317 } 1318 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || 1319 (tlb->mas1 & MAS1_IPROT) || 1320 ((tlb->mas1 & MAS1_IND) != ind) || 1321 ((tlb->mas8 & MAS8_TGS) != sgs)) { 1322 continue; 1323 } 1324 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { 1325 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ 1326 continue; 1327 } 1328 /* XXX e500mc doesn't match SAS, but other cores might */ 1329 tlb->mas1 &= ~MAS1_VALID; 1330 } 1331 } 1332 tlb_flush(env_cpu(env)); 1333 } 1334 1335 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) 1336 { 1337 int flags = 0; 1338 1339 if (type & 2) { 1340 flags |= BOOKE206_FLUSH_TLB1; 1341 } 1342 1343 if (type & 4) { 1344 flags |= BOOKE206_FLUSH_TLB0; 1345 } 1346 1347 booke206_flush_tlb(env, flags, 1); 1348 } 1349 1350 1351 void helper_check_tlb_flush_local(CPUPPCState *env) 1352 { 1353 check_tlb_flush(env, false); 1354 } 1355 1356 void helper_check_tlb_flush_global(CPUPPCState *env) 1357 { 1358 check_tlb_flush(env, true); 1359 } 1360 1361 1362 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size, 1363 MMUAccessType access_type, int mmu_idx, 1364 bool probe, uintptr_t retaddr) 1365 { 1366 PowerPCCPU *cpu = POWERPC_CPU(cs); 1367 hwaddr raddr; 1368 int page_size, prot; 1369 1370 if (ppc_xlate(cpu, eaddr, access_type, &raddr, 1371 &page_size, &prot, mmu_idx, !probe)) { 1372 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 1373 prot, mmu_idx, 1UL << page_size); 1374 return true; 1375 } 1376 if (probe) { 1377 return false; 1378 } 1379 raise_exception_err_ra(&cpu->env, cs->exception_index, 1380 cpu->env.error_code, retaddr); 1381 } 1382