1 /* 2 * HPPA memory access helper routines 3 * 4 * Copyright (c) 2017 Helge Deller 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/cputlb.h" 25 #include "accel/tcg/cpu-mmu-index.h" 26 #include "exec/page-protection.h" 27 #include "exec/helper-proto.h" 28 #include "hw/core/cpu.h" 29 #include "trace.h" 30 31 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr) 32 { 33 /* 34 * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes 35 * an algorithm in which a 62-bit absolute address is transformed to 36 * a 64-bit physical address. This must then be combined with that 37 * pictured in Figure H-11 "Physical Address Space Mapping", in which 38 * the full physical address is truncated to the N-bit physical address 39 * supported by the implementation. 40 * 41 * Since the supported physical address space is below 54 bits, the 42 * H-8 algorithm is moot and all that is left is to truncate. 43 */ 44 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54); 45 return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS); 46 } 47 48 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) 49 { 50 /* 51 * See Figure H-10, "Absolute Accesses when PSW W-bit is 0", 52 * combined with Figure H-11, as above. 53 */ 54 if (likely(extract32(addr, 28, 4) != 0xf)) { 55 /* Memory address space */ 56 addr = (uint32_t)addr; 57 } else if (extract32(addr, 24, 4) != 0) { 58 /* I/O address space */ 59 addr = (int32_t)addr; 60 } else { 61 /* 62 * PDC address space: 63 * Figures H-10 and H-11 of the parisc2.0 spec do not specify 64 * where to map into the 64-bit PDC address space. 65 * We map with an offset which equals the 32-bit address, which 66 * is what can be seen on physical machines too. 67 */ 68 addr = (uint32_t)addr; 69 addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4); 70 } 71 return addr; 72 } 73 74 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) 75 { 76 IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr); 77 78 if (i) { 79 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); 80 trace_hppa_tlb_find_entry(env, ent, ent->entry_valid, 81 ent->itree.start, ent->itree.last, ent->pa); 82 return ent; 83 } 84 trace_hppa_tlb_find_entry_not_found(env, addr); 85 return NULL; 86 } 87 88 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent, 89 bool force_flush_btlb) 90 { 91 CPUState *cs = env_cpu(env); 92 bool is_btlb; 93 94 if (!ent->entry_valid) { 95 return; 96 } 97 98 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start, 99 ent->itree.last, ent->pa); 100 101 tlb_flush_range_by_mmuidx(cs, ent->itree.start, 102 ent->itree.last - ent->itree.start + 1, 103 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); 104 105 /* Never clear BTLBs, unless forced to do so. */ 106 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; 107 if (is_btlb && !force_flush_btlb) { 108 return; 109 } 110 111 interval_tree_remove(&ent->itree, &env->tlb_root); 112 memset(ent, 0, sizeof(*ent)); 113 114 if (!is_btlb) { 115 ent->unused_next = env->tlb_unused; 116 env->tlb_unused = ent; 117 } 118 } 119 120 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e) 121 { 122 IntervalTreeNode *i, *n; 123 124 i = interval_tree_iter_first(&env->tlb_root, va_b, va_e); 125 for (; i ; i = n) { 126 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); 127 128 /* 129 * Find the next entry now: In the normal case the current entry 130 * will be removed, but in the BTLB case it will remain. 131 */ 132 n = interval_tree_iter_next(i, va_b, va_e); 133 hppa_flush_tlb_ent(env, ent, false); 134 } 135 } 136 137 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) 138 { 139 HPPATLBEntry *ent = env->tlb_unused; 140 141 if (ent == NULL) { 142 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); 143 uint32_t i = env->tlb_last; 144 145 if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { 146 i = btlb_entries; 147 } 148 env->tlb_last = i + 1; 149 150 ent = &env->tlb[i]; 151 hppa_flush_tlb_ent(env, ent, false); 152 } 153 154 env->tlb_unused = ent->unused_next; 155 return ent; 156 } 157 158 #define ACCESS_ID_MASK 0xffff 159 160 /* Return the set of protections allowed by a PID match. */ 161 static int match_prot_id_1(uint32_t access_id, uint32_t prot_id) 162 { 163 if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) { 164 return (prot_id & 1 165 ? PAGE_EXEC | PAGE_READ 166 : PAGE_EXEC | PAGE_READ | PAGE_WRITE); 167 } 168 return 0; 169 } 170 171 static int match_prot_id32(CPUHPPAState *env, uint32_t access_id) 172 { 173 int r, i; 174 175 for (i = CR_PID1; i <= CR_PID4; ++i) { 176 r = match_prot_id_1(access_id, env->cr[i]); 177 if (r) { 178 return r; 179 } 180 } 181 return 0; 182 } 183 184 static int match_prot_id64(CPUHPPAState *env, uint32_t access_id) 185 { 186 int r, i; 187 188 for (i = CR_PID1; i <= CR_PID4; ++i) { 189 r = match_prot_id_1(access_id, env->cr[i]); 190 if (r) { 191 return r; 192 } 193 r = match_prot_id_1(access_id, env->cr[i] >> 32); 194 if (r) { 195 return r; 196 } 197 } 198 return 0; 199 } 200 201 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, 202 int type, MemOp mop, hwaddr *pphys, int *pprot) 203 { 204 hwaddr phys; 205 int prot, r_prot, w_prot, x_prot, priv; 206 HPPATLBEntry *ent; 207 int ret = -1; 208 209 /* Virtual translation disabled. Map absolute to physical. */ 210 if (MMU_IDX_MMU_DISABLED(mmu_idx)) { 211 switch (mmu_idx) { 212 case MMU_ABS_W_IDX: 213 phys = hppa_abs_to_phys_pa2_w1(addr); 214 break; 215 case MMU_ABS_IDX: 216 if (hppa_is_pa20(env)) { 217 phys = hppa_abs_to_phys_pa2_w0(addr); 218 } else { 219 phys = (uint32_t)addr; 220 } 221 break; 222 default: 223 g_assert_not_reached(); 224 } 225 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 226 goto egress_align; 227 } 228 229 /* Find a valid tlb entry that matches the virtual address. */ 230 ent = hppa_find_tlb(env, addr); 231 if (ent == NULL) { 232 phys = 0; 233 prot = 0; 234 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS; 235 goto egress; 236 } 237 238 /* We now know the physical address. */ 239 phys = ent->pa + (addr - ent->itree.start); 240 241 /* Map TLB access_rights field to QEMU protection. */ 242 priv = MMU_IDX_TO_PRIV(mmu_idx); 243 r_prot = (priv <= ent->ar_pl1) * PAGE_READ; 244 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE; 245 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC; 246 switch (ent->ar_type) { 247 case 0: /* read-only: data page */ 248 prot = r_prot; 249 break; 250 case 1: /* read/write: dynamic data page */ 251 prot = r_prot | w_prot; 252 break; 253 case 2: /* read/execute: normal code page */ 254 prot = r_prot | x_prot; 255 break; 256 case 3: /* read/write/execute: dynamic code page */ 257 prot = r_prot | w_prot | x_prot; 258 break; 259 default: /* execute: promote to privilege level type & 3 */ 260 prot = x_prot; 261 break; 262 } 263 264 /* 265 * No guest access type indicates a non-architectural access from 266 * within QEMU. Bypass checks for access, D, B, P and T bits. 267 */ 268 if (type == 0) { 269 goto egress; 270 } 271 272 if (unlikely(!(prot & type))) { 273 /* Not allowed -- Inst/Data Memory Access Rights Fault. */ 274 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; 275 goto egress; 276 } 277 278 /* access_id == 0 means public page and no check is performed */ 279 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { 280 int access_prot = (hppa_is_pa20(env) 281 ? match_prot_id64(env, ent->access_id) 282 : match_prot_id32(env, ent->access_id)); 283 if (unlikely(!(type & access_prot))) { 284 /* Not allowed -- Inst/Data Memory Protection Id Fault. */ 285 ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI; 286 goto egress; 287 } 288 /* Otherwise exclude permissions not allowed (i.e WD). */ 289 prot &= access_prot; 290 } 291 292 /* 293 * In reverse priority order, check for conditions which raise faults. 294 * Remove PROT bits that cover the condition we want to check, 295 * so that the resulting PROT will force a re-check of the 296 * architectural TLB entry for the next access. 297 */ 298 if (unlikely(ent->t)) { 299 prot &= PAGE_EXEC; 300 if (!(type & PAGE_EXEC)) { 301 /* The T bit is set -- Page Reference Fault. */ 302 ret = EXCP_PAGE_REF; 303 } 304 } 305 if (unlikely(!ent->d)) { 306 prot &= PAGE_READ | PAGE_EXEC; 307 if (type & PAGE_WRITE) { 308 /* The D bit is not set -- TLB Dirty Bit Fault. */ 309 ret = EXCP_TLB_DIRTY; 310 } 311 } 312 if (unlikely(ent->b)) { 313 prot &= PAGE_READ | PAGE_EXEC; 314 if (type & PAGE_WRITE) { 315 /* 316 * The B bit is set -- Data Memory Break Fault. 317 * Except when PSW_X is set, allow this single access to succeed. 318 * The write bit will be invalidated for subsequent accesses. 319 */ 320 if (env->psw_xb & PSW_X) { 321 prot |= PAGE_WRITE_INV; 322 } else { 323 ret = EXCP_DMB; 324 } 325 } 326 } 327 328 egress_align: 329 if (addr & ((1u << memop_alignment_bits(mop)) - 1)) { 330 ret = EXCP_UNALIGN; 331 } 332 333 egress: 334 *pphys = phys; 335 *pprot = prot; 336 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys); 337 return ret; 338 } 339 340 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 341 { 342 HPPACPU *cpu = HPPA_CPU(cs); 343 hwaddr phys; 344 int prot, excp, mmu_idx; 345 346 /* If the (data) mmu is disabled, bypass translation. */ 347 /* ??? We really ought to know if the code mmu is disabled too, 348 in order to get the correct debugging dumps. */ 349 mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX : 350 cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); 351 352 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0, 353 &phys, &prot); 354 355 /* Since we're translating for debugging, the only error that is a 356 hard error is no translation at all. Otherwise, while a real cpu 357 access might not have permission, the debugger does. */ 358 return excp == EXCP_DTLB_MISS ? -1 : phys; 359 } 360 361 void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled) 362 { 363 if (env->psw & PSW_Q) { 364 /* 365 * For pa1.x, the offset and space never overlap, and so we 366 * simply extract the high and low part of the virtual address. 367 * 368 * For pa2.0, the formation of these are described in section 369 * "Interruption Parameter Registers", page 2-15. 370 */ 371 env->cr[CR_IOR] = (uint32_t)addr; 372 env->cr[CR_ISR] = addr >> 32; 373 374 if (hppa_is_pa20(env)) { 375 if (mmu_disabled) { 376 /* 377 * If data translation was disabled, the ISR contains 378 * the upper portion of the abs address, zero-extended. 379 */ 380 env->cr[CR_ISR] &= 0x3fffffff; 381 } else { 382 /* 383 * If data translation was enabled, the upper two bits 384 * of the IOR (the b field) are equal to the two space 385 * bits from the base register used to form the gva. 386 */ 387 uint64_t b; 388 389 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0; 390 b >>= (env->psw & PSW_W ? 62 : 30); 391 env->cr[CR_IOR] |= b << 62; 392 } 393 } 394 } 395 } 396 397 G_NORETURN static void 398 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, 399 vaddr addr, bool mmu_disabled) 400 { 401 CPUState *cs = env_cpu(env); 402 403 cs->exception_index = excp; 404 cpu_restore_state(cs, retaddr); 405 hppa_set_ior_and_isr(env, addr, mmu_disabled); 406 407 cpu_loop_exit(cs); 408 } 409 410 void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 411 vaddr addr, unsigned size, 412 MMUAccessType access_type, 413 int mmu_idx, MemTxAttrs attrs, 414 MemTxResult response, uintptr_t retaddr) 415 { 416 CPUHPPAState *env = cpu_env(cs); 417 418 qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx 419 " while accessing I/O at %#08" HWADDR_PRIx "\n", 420 env->iasq_f, env->iaoq_f, physaddr); 421 422 /* FIXME: Enable HPMC exceptions when firmware has clean device probing */ 423 if (0) { 424 raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr, 425 MMU_IDX_MMU_DISABLED(mmu_idx)); 426 } 427 } 428 429 bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, 430 MMUAccessType type, int mmu_idx, 431 MemOp memop, int size, bool probe, uintptr_t ra) 432 { 433 CPUHPPAState *env = cpu_env(cs); 434 int prot, excp, a_prot; 435 hwaddr phys; 436 437 switch (type) { 438 case MMU_INST_FETCH: 439 a_prot = PAGE_EXEC; 440 break; 441 case MMU_DATA_STORE: 442 a_prot = PAGE_WRITE; 443 break; 444 default: 445 a_prot = PAGE_READ; 446 break; 447 } 448 449 excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop, 450 &phys, &prot); 451 if (unlikely(excp >= 0)) { 452 if (probe) { 453 return false; 454 } 455 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); 456 457 /* Failure. Raise the indicated exception. */ 458 raise_exception_with_ior(env, excp, ra, addr, 459 MMU_IDX_MMU_DISABLED(mmu_idx)); 460 } 461 462 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, 463 phys & TARGET_PAGE_MASK, size, type, mmu_idx); 464 465 /* 466 * Success! Store the translation into the QEMU TLB. 467 * Note that we always install a single-page entry, because that 468 * is what works best with softmmu -- anything else will trigger 469 * the large page protection mask. We do not require this, 470 * because we record the large page here in the hppa tlb. 471 */ 472 memset(out, 0, sizeof(*out)); 473 out->phys_addr = phys; 474 out->prot = prot; 475 out->attrs = MEMTXATTRS_UNSPECIFIED; 476 out->lg_page_size = TARGET_PAGE_BITS; 477 478 return true; 479 } 480 481 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ 482 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) 483 { 484 HPPATLBEntry *ent; 485 486 /* Zap any old entries covering ADDR. */ 487 addr &= TARGET_PAGE_MASK; 488 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); 489 490 ent = env->tlb_partial; 491 if (ent == NULL) { 492 ent = hppa_alloc_tlb_ent(env); 493 env->tlb_partial = ent; 494 } 495 496 /* Note that ent->entry_valid == 0 already. */ 497 ent->itree.start = addr; 498 ent->itree.last = addr + TARGET_PAGE_SIZE - 1; 499 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; 500 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); 501 } 502 503 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent, 504 target_ulong reg) 505 { 506 ent->access_id = extract32(reg, 1, 18); 507 ent->u = extract32(reg, 19, 1); 508 ent->ar_pl2 = extract32(reg, 20, 2); 509 ent->ar_pl1 = extract32(reg, 22, 2); 510 ent->ar_type = extract32(reg, 24, 3); 511 ent->b = extract32(reg, 27, 1); 512 ent->d = extract32(reg, 28, 1); 513 ent->t = extract32(reg, 29, 1); 514 ent->entry_valid = 1; 515 516 interval_tree_insert(&ent->itree, &env->tlb_root); 517 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, 518 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); 519 } 520 521 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ 522 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) 523 { 524 HPPATLBEntry *ent = env->tlb_partial; 525 526 if (ent) { 527 env->tlb_partial = NULL; 528 if (ent->itree.start <= addr && addr <= ent->itree.last) { 529 set_access_bits_pa11(env, ent, reg); 530 return; 531 } 532 } 533 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); 534 } 535 536 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1, 537 target_ulong r2, vaddr va_b) 538 { 539 HPPATLBEntry *ent; 540 vaddr va_e; 541 uint64_t va_size; 542 int mask_shift; 543 544 mask_shift = 2 * (r1 & 0xf); 545 va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift; 546 va_b &= -va_size; 547 va_e = va_b + va_size - 1; 548 549 hppa_flush_tlb_range(env, va_b, va_e); 550 ent = hppa_alloc_tlb_ent(env); 551 552 ent->itree.start = va_b; 553 ent->itree.last = va_e; 554 555 /* Extract all 52 bits present in the page table entry. */ 556 ent->pa = r1 << (TARGET_PAGE_BITS - 5); 557 /* Align per the page size. */ 558 ent->pa &= TARGET_PAGE_MASK << mask_shift; 559 /* Ignore the bits beyond physical address space. */ 560 ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS); 561 562 ent->t = extract64(r2, 61, 1); 563 ent->d = extract64(r2, 60, 1); 564 ent->b = extract64(r2, 59, 1); 565 ent->ar_type = extract64(r2, 56, 3); 566 ent->ar_pl1 = extract64(r2, 54, 2); 567 ent->ar_pl2 = extract64(r2, 52, 2); 568 ent->u = extract64(r2, 51, 1); 569 /* o = bit 50 */ 570 /* p = bit 49 */ 571 ent->access_id = extract64(r2, 1, 31); 572 ent->entry_valid = 1; 573 574 interval_tree_insert(&ent->itree, &env->tlb_root); 575 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); 576 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, 577 ent->ar_pl2, ent->ar_pl1, ent->ar_type, 578 ent->b, ent->d, ent->t); 579 } 580 581 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) 582 { 583 vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); 584 itlbt_pa20(env, r1, r2, va_b); 585 } 586 587 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) 588 { 589 vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); 590 itlbt_pa20(env, r1, r2, va_b); 591 } 592 593 /* Purge (Insn/Data) TLB. */ 594 static void ptlb_work(CPUState *cpu, run_on_cpu_data data) 595 { 596 vaddr start = data.target_ptr; 597 vaddr end; 598 599 /* 600 * PA2.0 allows a range of pages encoded into GR[b], which we have 601 * copied into the bottom bits of the otherwise page-aligned address. 602 * PA1.x will always provide zero here, for a single page flush. 603 */ 604 end = start & 0xf; 605 start &= TARGET_PAGE_MASK; 606 end = (vaddr)TARGET_PAGE_SIZE << (2 * end); 607 end = start + end - 1; 608 609 hppa_flush_tlb_range(cpu_env(cpu), start, end); 610 } 611 612 /* This is local to the current cpu. */ 613 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr) 614 { 615 trace_hppa_tlb_ptlb_local(env); 616 ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr)); 617 } 618 619 /* This is synchronous across all processors. */ 620 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) 621 { 622 CPUState *src = env_cpu(env); 623 CPUState *cpu; 624 bool wait = false; 625 626 trace_hppa_tlb_ptlb(env); 627 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); 628 629 CPU_FOREACH(cpu) { 630 if (cpu != src) { 631 async_run_on_cpu(cpu, ptlb_work, data); 632 wait = true; 633 } 634 } 635 if (wait) { 636 async_safe_run_on_cpu(src, ptlb_work, data); 637 } else { 638 ptlb_work(src, data); 639 } 640 } 641 642 void hppa_ptlbe(CPUHPPAState *env) 643 { 644 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); 645 uint32_t i; 646 647 /* Zap the (non-btlb) tlb entries themselves. */ 648 memset(&env->tlb[btlb_entries], 0, 649 sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); 650 env->tlb_last = btlb_entries; 651 env->tlb_partial = NULL; 652 653 /* Put them all onto the unused list. */ 654 env->tlb_unused = &env->tlb[btlb_entries]; 655 for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { 656 env->tlb[i].unused_next = &env->tlb[i + 1]; 657 } 658 659 /* Re-initialize the interval tree with only the btlb entries. */ 660 memset(&env->tlb_root, 0, sizeof(env->tlb_root)); 661 for (i = 0; i < btlb_entries; ++i) { 662 if (env->tlb[i].entry_valid) { 663 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); 664 } 665 } 666 667 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); 668 } 669 670 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined 671 number of pages/entries (we choose all), and is local to the cpu. */ 672 void HELPER(ptlbe)(CPUHPPAState *env) 673 { 674 trace_hppa_tlb_ptlbe(env); 675 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); 676 hppa_ptlbe(env); 677 } 678 679 void cpu_hppa_change_prot_id(CPUHPPAState *env) 680 { 681 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK); 682 } 683 684 void HELPER(change_prot_id)(CPUHPPAState *env) 685 { 686 cpu_hppa_change_prot_id(env); 687 } 688 689 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr) 690 { 691 hwaddr phys; 692 int prot, excp; 693 694 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, 0, 695 &phys, &prot); 696 if (excp >= 0) { 697 if (excp == EXCP_DTLB_MISS) { 698 excp = EXCP_NA_DTLB_MISS; 699 } 700 trace_hppa_tlb_lpa_failed(env, addr); 701 raise_exception_with_ior(env, excp, GETPC(), addr, false); 702 } 703 trace_hppa_tlb_lpa_success(env, addr, phys); 704 return phys; 705 } 706 707 /* 708 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to 709 * allow operating systems to modify the Block TLB (BTLB) entries. 710 * For implementation details see page 1-13 in 711 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf 712 */ 713 void HELPER(diag_btlb)(CPUHPPAState *env) 714 { 715 unsigned int phys_page, len, slot; 716 int mmu_idx = cpu_mmu_index(env_cpu(env), 0); 717 uintptr_t ra = GETPC(); 718 HPPATLBEntry *btlb; 719 uint64_t virt_page; 720 uint32_t *vaddr; 721 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); 722 723 /* BTLBs are not supported on 64-bit CPUs */ 724 if (btlb_entries == 0) { 725 env->gr[28] = -1; /* nonexistent procedure */ 726 return; 727 } 728 729 env->gr[28] = 0; /* PDC_OK */ 730 731 switch (env->gr[25]) { 732 case 0: 733 /* return BTLB parameters */ 734 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n"); 735 vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t), 736 MMU_DATA_STORE, mmu_idx, ra); 737 if (vaddr == NULL) { 738 env->gr[28] = -10; /* invalid argument */ 739 } else { 740 vaddr[0] = cpu_to_be32(1); 741 vaddr[1] = cpu_to_be32(16 * 1024); 742 vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED); 743 vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE); 744 } 745 break; 746 case 1: 747 /* insert BTLB entry */ 748 virt_page = env->gr[24]; /* upper 32 bits */ 749 virt_page <<= 32; 750 virt_page |= env->gr[23]; /* lower 32 bits */ 751 phys_page = env->gr[22]; 752 len = env->gr[21]; 753 slot = env->gr[19]; 754 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT " 755 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d " 756 "into slot %d\n", 757 (long long) virt_page << TARGET_PAGE_BITS, 758 (long long) (virt_page + len) << TARGET_PAGE_BITS, 759 (long long) virt_page, phys_page, len, slot); 760 if (slot < btlb_entries) { 761 btlb = &env->tlb[slot]; 762 763 /* Force flush of possibly existing BTLB entry. */ 764 hppa_flush_tlb_ent(env, btlb, true); 765 766 /* Create new BTLB entry */ 767 btlb->itree.start = virt_page << TARGET_PAGE_BITS; 768 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; 769 btlb->pa = phys_page << TARGET_PAGE_BITS; 770 set_access_bits_pa11(env, btlb, env->gr[20]); 771 btlb->t = 0; 772 btlb->d = 1; 773 } else { 774 env->gr[28] = -10; /* invalid argument */ 775 } 776 break; 777 case 2: 778 /* Purge BTLB entry */ 779 slot = env->gr[22]; 780 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", 781 slot); 782 if (slot < btlb_entries) { 783 btlb = &env->tlb[slot]; 784 hppa_flush_tlb_ent(env, btlb, true); 785 } else { 786 env->gr[28] = -10; /* invalid argument */ 787 } 788 break; 789 case 3: 790 /* Purge all BTLB entries */ 791 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); 792 for (slot = 0; slot < btlb_entries; slot++) { 793 btlb = &env->tlb[slot]; 794 hppa_flush_tlb_ent(env, btlb, true); 795 } 796 break; 797 default: 798 env->gr[28] = -2; /* nonexistent option */ 799 break; 800 } 801 } 802 803 uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f) 804 { 805 uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f); 806 HPPATLBEntry *ent = hppa_find_tlb(env, gva); 807 808 if (ent == NULL) { 809 raise_exception_with_ior(env, EXCP_ITLB_MISS, GETPC(), gva, false); 810 } 811 812 /* 813 * There should be no need to check page permissions, as that will 814 * already have been done by tb_lookup via get_page_addr_code. 815 * All we need at this point is to check the ar_type. 816 * 817 * No change for non-gateway pages or for priv decrease. 818 */ 819 if (ent->ar_type & 4) { 820 int old_priv = iaoq_f & 3; 821 int new_priv = ent->ar_type & 3; 822 823 if (new_priv < old_priv) { 824 iaoq_f = (iaoq_f & -4) | new_priv; 825 } 826 } 827 return iaoq_f; 828 } 829 830 void HELPER(update_gva_offset_mask)(CPUHPPAState *env) 831 { 832 update_gva_offset_mask(env); 833 } 834