1 /* 2 * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * * Neither the name of the Open Source and Linux Lab nor the 13 * names of its contributors may be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qemu/log.h" 30 #include "qemu/qemu-print.h" 31 #include "qemu/units.h" 32 #include "cpu.h" 33 #include "exec/helper-proto.h" 34 #include "qemu/host-utils.h" 35 #include "exec/cputlb.h" 36 #include "accel/tcg/cpu-mmu-index.h" 37 #include "exec/exec-all.h" 38 #include "exec/page-protection.h" 39 40 #define XTENSA_MPU_SEGMENT_MASK 0x0000001f 41 #define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00 42 #define XTENSA_MPU_ACC_RIGHTS_SHIFT 8 43 #define XTENSA_MPU_MEM_TYPE_MASK 0x001ff000 44 #define XTENSA_MPU_MEM_TYPE_SHIFT 12 45 #define XTENSA_MPU_ATTR_MASK 0x001fff00 46 47 #define XTENSA_MPU_PROBE_B 0x40000000 48 #define XTENSA_MPU_PROBE_V 0x80000000 49 50 #define XTENSA_MPU_SYSTEM_TYPE_DEVICE 0x0001 51 #define XTENSA_MPU_SYSTEM_TYPE_NC 0x0002 52 #define XTENSA_MPU_SYSTEM_TYPE_C 0x0003 53 #define XTENSA_MPU_SYSTEM_TYPE_MASK 0x0003 54 55 #define XTENSA_MPU_TYPE_SYS_C 0x0010 56 #define XTENSA_MPU_TYPE_SYS_W 0x0020 57 #define XTENSA_MPU_TYPE_SYS_R 0x0040 58 #define XTENSA_MPU_TYPE_CPU_C 0x0100 59 #define XTENSA_MPU_TYPE_CPU_W 0x0200 60 #define XTENSA_MPU_TYPE_CPU_R 0x0400 61 #define XTENSA_MPU_TYPE_CPU_CACHE 0x0800 62 #define XTENSA_MPU_TYPE_B 0x1000 63 #define XTENSA_MPU_TYPE_INT 0x2000 64 65 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) 66 { 67 /* 68 * Probe the memory; we don't care about the result but 69 * only the side-effects (ie any MMU or other exception) 70 */ 71 probe_access(env, vaddr, 1, MMU_INST_FETCH, 72 cpu_mmu_index(env_cpu(env), true), GETPC()); 73 } 74 75 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) 76 { 77 v = (v & 0xffffff00) | 0x1; 78 if (v != env->sregs[RASID]) { 79 env->sregs[RASID] = v; 80 tlb_flush(env_cpu(env)); 81 } 82 } 83 84 static uint32_t get_page_size(const CPUXtensaState *env, 85 bool dtlb, uint32_t way) 86 { 87 uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; 88 89 switch (way) { 90 case 4: 91 return (tlbcfg >> 16) & 0x3; 92 93 case 5: 94 return (tlbcfg >> 20) & 0x1; 95 96 case 6: 97 return (tlbcfg >> 24) & 0x1; 98 99 default: 100 return 0; 101 } 102 } 103 104 /*! 105 * Get bit mask for the virtual address bits translated by the TLB way 106 */ 107 static uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, 108 bool dtlb, uint32_t way) 109 { 110 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 111 bool varway56 = dtlb ? 112 env->config->dtlb.varway56 : 113 env->config->itlb.varway56; 114 115 switch (way) { 116 case 4: 117 return 0xfff00000 << get_page_size(env, dtlb, way) * 2; 118 119 case 5: 120 if (varway56) { 121 return 0xf8000000 << get_page_size(env, dtlb, way); 122 } else { 123 return 0xf8000000; 124 } 125 126 case 6: 127 if (varway56) { 128 return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); 129 } else { 130 return 0xf0000000; 131 } 132 133 default: 134 return 0xfffff000; 135 } 136 } else { 137 return REGION_PAGE_MASK; 138 } 139 } 140 141 /*! 142 * Get bit mask for the 'VPN without index' field. 143 * See ISA, 4.6.5.6, data format for RxTLB0 144 */ 145 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) 146 { 147 if (way < 4) { 148 bool is32 = (dtlb ? 149 env->config->dtlb.nrefillentries : 150 env->config->itlb.nrefillentries) == 32; 151 return is32 ? 0xffff8000 : 0xffffc000; 152 } else if (way == 4) { 153 return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; 154 } else if (way <= 6) { 155 uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); 156 bool varway56 = dtlb ? 157 env->config->dtlb.varway56 : 158 env->config->itlb.varway56; 159 160 if (varway56) { 161 return mask << (way == 5 ? 2 : 3); 162 } else { 163 return mask << 1; 164 } 165 } else { 166 return 0xfffff000; 167 } 168 } 169 170 /*! 171 * Split virtual address into VPN (with index) and entry index 172 * for the given TLB way 173 */ 174 static void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, 175 bool dtlb, uint32_t *vpn, 176 uint32_t wi, uint32_t *ei) 177 { 178 bool varway56 = dtlb ? 179 env->config->dtlb.varway56 : 180 env->config->itlb.varway56; 181 182 if (!dtlb) { 183 wi &= 7; 184 } 185 186 if (wi < 4) { 187 bool is32 = (dtlb ? 188 env->config->dtlb.nrefillentries : 189 env->config->itlb.nrefillentries) == 32; 190 *ei = (v >> 12) & (is32 ? 0x7 : 0x3); 191 } else { 192 switch (wi) { 193 case 4: 194 { 195 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; 196 *ei = (v >> eibase) & 0x3; 197 } 198 break; 199 200 case 5: 201 if (varway56) { 202 uint32_t eibase = 27 + get_page_size(env, dtlb, wi); 203 *ei = (v >> eibase) & 0x3; 204 } else { 205 *ei = (v >> 27) & 0x1; 206 } 207 break; 208 209 case 6: 210 if (varway56) { 211 uint32_t eibase = 29 - get_page_size(env, dtlb, wi); 212 *ei = (v >> eibase) & 0x7; 213 } else { 214 *ei = (v >> 28) & 0x1; 215 } 216 break; 217 218 default: 219 *ei = 0; 220 break; 221 } 222 } 223 *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); 224 } 225 226 /*! 227 * Split TLB address into TLB way, entry index and VPN (with index). 228 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format 229 */ 230 static bool split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, 231 uint32_t *vpn, uint32_t *wi, uint32_t *ei) 232 { 233 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 234 *wi = v & (dtlb ? 0xf : 0x7); 235 if (*wi < (dtlb ? env->config->dtlb.nways : env->config->itlb.nways)) { 236 split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); 237 return true; 238 } else { 239 return false; 240 } 241 } else { 242 *vpn = v & REGION_PAGE_MASK; 243 *wi = 0; 244 *ei = (v >> 29) & 0x7; 245 return true; 246 } 247 } 248 249 static xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env, bool dtlb, 250 unsigned wi, unsigned ei) 251 { 252 const xtensa_tlb *tlb = dtlb ? &env->config->dtlb : &env->config->itlb; 253 254 assert(wi < tlb->nways && ei < tlb->way_size[wi]); 255 return dtlb ? 256 env->dtlb[wi] + ei : 257 env->itlb[wi] + ei; 258 } 259 260 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, 261 uint32_t v, bool dtlb, uint32_t *pwi) 262 { 263 uint32_t vpn; 264 uint32_t wi; 265 uint32_t ei; 266 267 if (split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei)) { 268 if (pwi) { 269 *pwi = wi; 270 } 271 return xtensa_tlb_get_entry(env, dtlb, wi, ei); 272 } else { 273 return NULL; 274 } 275 } 276 277 static void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, 278 xtensa_tlb_entry *entry, bool dtlb, 279 unsigned wi, unsigned ei, uint32_t vpn, 280 uint32_t pte) 281 { 282 entry->vaddr = vpn; 283 entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); 284 entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; 285 entry->attr = pte & 0xf; 286 } 287 288 static void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, 289 unsigned wi, unsigned ei, 290 uint32_t vpn, uint32_t pte) 291 { 292 CPUState *cs = env_cpu(env); 293 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 294 295 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 296 if (entry->variable) { 297 if (entry->asid) { 298 tlb_flush_page(cs, entry->vaddr); 299 } 300 xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); 301 tlb_flush_page(cs, entry->vaddr); 302 } else { 303 qemu_log_mask(LOG_GUEST_ERROR, 304 "%s %d, %d, %d trying to set immutable entry\n", 305 __func__, dtlb, wi, ei); 306 } 307 } else { 308 tlb_flush_page(cs, entry->vaddr); 309 if (xtensa_option_enabled(env->config, 310 XTENSA_OPTION_REGION_TRANSLATION)) { 311 entry->paddr = pte & REGION_PAGE_MASK; 312 } 313 entry->attr = pte & 0xf; 314 } 315 } 316 317 hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 318 { 319 XtensaCPU *cpu = XTENSA_CPU(cs); 320 uint32_t paddr; 321 uint32_t page_size; 322 unsigned access; 323 324 if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0, 325 &paddr, &page_size, &access) == 0) { 326 return paddr; 327 } 328 if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0, 329 &paddr, &page_size, &access) == 0) { 330 return paddr; 331 } 332 return ~0; 333 } 334 335 static void reset_tlb_mmu_all_ways(CPUXtensaState *env, 336 const xtensa_tlb *tlb, 337 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 338 { 339 unsigned wi, ei; 340 341 for (wi = 0; wi < tlb->nways; ++wi) { 342 for (ei = 0; ei < tlb->way_size[wi]; ++ei) { 343 entry[wi][ei].asid = 0; 344 entry[wi][ei].variable = true; 345 } 346 } 347 } 348 349 static void reset_tlb_mmu_ways56(CPUXtensaState *env, 350 const xtensa_tlb *tlb, 351 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 352 { 353 if (!tlb->varway56) { 354 static const xtensa_tlb_entry way5[] = { 355 { 356 .vaddr = 0xd0000000, 357 .paddr = 0, 358 .asid = 1, 359 .attr = 7, 360 .variable = false, 361 }, { 362 .vaddr = 0xd8000000, 363 .paddr = 0, 364 .asid = 1, 365 .attr = 3, 366 .variable = false, 367 } 368 }; 369 static const xtensa_tlb_entry way6[] = { 370 { 371 .vaddr = 0xe0000000, 372 .paddr = 0xf0000000, 373 .asid = 1, 374 .attr = 7, 375 .variable = false, 376 }, { 377 .vaddr = 0xf0000000, 378 .paddr = 0xf0000000, 379 .asid = 1, 380 .attr = 3, 381 .variable = false, 382 } 383 }; 384 memcpy(entry[5], way5, sizeof(way5)); 385 memcpy(entry[6], way6, sizeof(way6)); 386 } else { 387 uint32_t ei; 388 for (ei = 0; ei < 8; ++ei) { 389 entry[6][ei].vaddr = ei << 29; 390 entry[6][ei].paddr = ei << 29; 391 entry[6][ei].asid = 1; 392 entry[6][ei].attr = 3; 393 } 394 } 395 } 396 397 static void reset_tlb_region_way0(CPUXtensaState *env, 398 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 399 { 400 unsigned ei; 401 402 for (ei = 0; ei < 8; ++ei) { 403 entry[0][ei].vaddr = ei << 29; 404 entry[0][ei].paddr = ei << 29; 405 entry[0][ei].asid = 1; 406 entry[0][ei].attr = 2; 407 entry[0][ei].variable = true; 408 } 409 } 410 411 void reset_mmu(CPUXtensaState *env) 412 { 413 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 414 env->sregs[RASID] = 0x04030201; 415 env->sregs[ITLBCFG] = 0; 416 env->sregs[DTLBCFG] = 0; 417 env->autorefill_idx = 0; 418 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); 419 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); 420 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); 421 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); 422 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 423 unsigned i; 424 425 env->sregs[MPUENB] = 0; 426 env->sregs[MPUCFG] = env->config->n_mpu_fg_segments; 427 env->sregs[CACHEADRDIS] = 0; 428 assert(env->config->n_mpu_bg_segments > 0 && 429 env->config->mpu_bg[0].vaddr == 0); 430 for (i = 1; i < env->config->n_mpu_bg_segments; ++i) { 431 assert(env->config->mpu_bg[i].vaddr >= 432 env->config->mpu_bg[i - 1].vaddr); 433 } 434 } else { 435 env->sregs[CACHEATTR] = 0x22222222; 436 reset_tlb_region_way0(env, env->itlb); 437 reset_tlb_region_way0(env, env->dtlb); 438 } 439 } 440 441 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) 442 { 443 unsigned i; 444 for (i = 0; i < 4; ++i) { 445 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { 446 return i; 447 } 448 } 449 return 0xff; 450 } 451 452 /*! 453 * Lookup xtensa TLB for the given virtual address. 454 * See ISA, 4.6.2.2 455 * 456 * \param pwi: [out] way index 457 * \param pei: [out] entry index 458 * \param pring: [out] access ring 459 * \return 0 if ok, exception cause code otherwise 460 */ 461 static int xtensa_tlb_lookup(const CPUXtensaState *env, 462 uint32_t addr, bool dtlb, 463 uint32_t *pwi, uint32_t *pei, uint8_t *pring) 464 { 465 const xtensa_tlb *tlb = dtlb ? 466 &env->config->dtlb : &env->config->itlb; 467 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? 468 env->dtlb : env->itlb; 469 470 int nhits = 0; 471 unsigned wi; 472 473 for (wi = 0; wi < tlb->nways; ++wi) { 474 uint32_t vpn; 475 uint32_t ei; 476 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); 477 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { 478 unsigned ring = get_ring(env, entry[wi][ei].asid); 479 if (ring < 4) { 480 if (++nhits > 1) { 481 return dtlb ? 482 LOAD_STORE_TLB_MULTI_HIT_CAUSE : 483 INST_TLB_MULTI_HIT_CAUSE; 484 } 485 *pwi = wi; 486 *pei = ei; 487 *pring = ring; 488 } 489 } 490 } 491 return nhits ? 0 : 492 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); 493 } 494 495 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 496 { 497 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 498 uint32_t wi; 499 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 500 501 if (entry) { 502 return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; 503 } else { 504 return 0; 505 } 506 } else { 507 return v & REGION_PAGE_MASK; 508 } 509 } 510 511 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 512 { 513 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); 514 515 if (entry) { 516 return entry->paddr | entry->attr; 517 } else { 518 return 0; 519 } 520 } 521 522 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 523 { 524 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 525 uint32_t wi; 526 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 527 if (entry && entry->variable && entry->asid) { 528 tlb_flush_page(env_cpu(env), entry->vaddr); 529 entry->asid = 0; 530 } 531 } 532 } 533 534 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 535 { 536 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 537 uint32_t wi; 538 uint32_t ei; 539 uint8_t ring; 540 int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); 541 542 switch (res) { 543 case 0: 544 if (ring >= xtensa_get_ring(env)) { 545 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); 546 } 547 break; 548 549 case INST_TLB_MULTI_HIT_CAUSE: 550 case LOAD_STORE_TLB_MULTI_HIT_CAUSE: 551 HELPER(exception_cause_vaddr)(env, env->pc, res, v); 552 break; 553 } 554 return 0; 555 } else { 556 return (v & REGION_PAGE_MASK) | 0x1; 557 } 558 } 559 560 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) 561 { 562 uint32_t vpn; 563 uint32_t wi; 564 uint32_t ei; 565 if (split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei)) { 566 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); 567 } 568 } 569 570 /*! 571 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. 572 * See ISA, 4.6.5.10 573 */ 574 static unsigned mmu_attr_to_access(uint32_t attr) 575 { 576 unsigned access = 0; 577 578 if (attr < 12) { 579 access |= PAGE_READ; 580 if (attr & 0x1) { 581 access |= PAGE_EXEC; 582 } 583 if (attr & 0x2) { 584 access |= PAGE_WRITE; 585 } 586 587 switch (attr & 0xc) { 588 case 0: 589 access |= PAGE_CACHE_BYPASS; 590 break; 591 592 case 4: 593 access |= PAGE_CACHE_WB; 594 break; 595 596 case 8: 597 access |= PAGE_CACHE_WT; 598 break; 599 } 600 } else if (attr == 13) { 601 access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE; 602 } 603 return access; 604 } 605 606 /*! 607 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. 608 * See ISA, 4.6.3.3 609 */ 610 static unsigned region_attr_to_access(uint32_t attr) 611 { 612 static const unsigned access[16] = { 613 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, 614 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, 615 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, 616 [3] = PAGE_EXEC | PAGE_CACHE_WB, 617 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 618 [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 619 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, 620 }; 621 622 return access[attr & 0xf]; 623 } 624 625 /*! 626 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask. 627 * See ISA, A.2.14 The Cache Attribute Register 628 */ 629 static unsigned cacheattr_attr_to_access(uint32_t attr) 630 { 631 static const unsigned access[16] = { 632 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, 633 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, 634 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, 635 [3] = PAGE_EXEC | PAGE_CACHE_WB, 636 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 637 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, 638 }; 639 640 return access[attr & 0xf]; 641 } 642 643 struct attr_pattern { 644 uint32_t mask; 645 uint32_t value; 646 }; 647 648 static int attr_pattern_match(uint32_t attr, 649 const struct attr_pattern *pattern, 650 size_t n) 651 { 652 size_t i; 653 654 for (i = 0; i < n; ++i) { 655 if ((attr & pattern[i].mask) == pattern[i].value) { 656 return 1; 657 } 658 } 659 return 0; 660 } 661 662 static unsigned mpu_attr_to_cpu_cache(uint32_t attr) 663 { 664 static const struct attr_pattern cpu_c[] = { 665 { .mask = 0x18f, .value = 0x089 }, 666 { .mask = 0x188, .value = 0x080 }, 667 { .mask = 0x180, .value = 0x180 }, 668 }; 669 670 unsigned type = 0; 671 672 if (attr_pattern_match(attr, cpu_c, ARRAY_SIZE(cpu_c))) { 673 type |= XTENSA_MPU_TYPE_CPU_CACHE; 674 if (attr & 0x10) { 675 type |= XTENSA_MPU_TYPE_CPU_C; 676 } 677 if (attr & 0x20) { 678 type |= XTENSA_MPU_TYPE_CPU_W; 679 } 680 if (attr & 0x40) { 681 type |= XTENSA_MPU_TYPE_CPU_R; 682 } 683 } 684 return type; 685 } 686 687 static unsigned mpu_attr_to_type(uint32_t attr) 688 { 689 static const struct attr_pattern device_type[] = { 690 { .mask = 0x1f6, .value = 0x000 }, 691 { .mask = 0x1f6, .value = 0x006 }, 692 }; 693 static const struct attr_pattern sys_nc_type[] = { 694 { .mask = 0x1fe, .value = 0x018 }, 695 { .mask = 0x1fe, .value = 0x01e }, 696 { .mask = 0x18f, .value = 0x089 }, 697 }; 698 static const struct attr_pattern sys_c_type[] = { 699 { .mask = 0x1f8, .value = 0x010 }, 700 { .mask = 0x188, .value = 0x080 }, 701 { .mask = 0x1f0, .value = 0x030 }, 702 { .mask = 0x180, .value = 0x180 }, 703 }; 704 static const struct attr_pattern b[] = { 705 { .mask = 0x1f7, .value = 0x001 }, 706 { .mask = 0x1f7, .value = 0x007 }, 707 { .mask = 0x1ff, .value = 0x019 }, 708 { .mask = 0x1ff, .value = 0x01f }, 709 }; 710 711 unsigned type = 0; 712 713 attr = (attr & XTENSA_MPU_MEM_TYPE_MASK) >> XTENSA_MPU_MEM_TYPE_SHIFT; 714 if (attr_pattern_match(attr, device_type, ARRAY_SIZE(device_type))) { 715 type |= XTENSA_MPU_SYSTEM_TYPE_DEVICE; 716 if (attr & 0x80) { 717 type |= XTENSA_MPU_TYPE_INT; 718 } 719 } 720 if (attr_pattern_match(attr, sys_nc_type, ARRAY_SIZE(sys_nc_type))) { 721 type |= XTENSA_MPU_SYSTEM_TYPE_NC; 722 } 723 if (attr_pattern_match(attr, sys_c_type, ARRAY_SIZE(sys_c_type))) { 724 type |= XTENSA_MPU_SYSTEM_TYPE_C; 725 if (attr & 0x1) { 726 type |= XTENSA_MPU_TYPE_SYS_C; 727 } 728 if (attr & 0x2) { 729 type |= XTENSA_MPU_TYPE_SYS_W; 730 } 731 if (attr & 0x4) { 732 type |= XTENSA_MPU_TYPE_SYS_R; 733 } 734 } 735 if (attr_pattern_match(attr, b, ARRAY_SIZE(b))) { 736 type |= XTENSA_MPU_TYPE_B; 737 } 738 type |= mpu_attr_to_cpu_cache(attr); 739 740 return type; 741 } 742 743 static unsigned mpu_attr_to_access(uint32_t attr, unsigned ring) 744 { 745 static const unsigned access[2][16] = { 746 [0] = { 747 [4] = PAGE_READ, 748 [5] = PAGE_READ | PAGE_EXEC, 749 [6] = PAGE_READ | PAGE_WRITE, 750 [7] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 751 [8] = PAGE_WRITE, 752 [9] = PAGE_READ | PAGE_WRITE, 753 [10] = PAGE_READ | PAGE_WRITE, 754 [11] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 755 [12] = PAGE_READ, 756 [13] = PAGE_READ | PAGE_EXEC, 757 [14] = PAGE_READ | PAGE_WRITE, 758 [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 759 }, 760 [1] = { 761 [8] = PAGE_WRITE, 762 [9] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 763 [10] = PAGE_READ, 764 [11] = PAGE_READ | PAGE_EXEC, 765 [12] = PAGE_READ, 766 [13] = PAGE_READ | PAGE_EXEC, 767 [14] = PAGE_READ | PAGE_WRITE, 768 [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 769 }, 770 }; 771 unsigned rv; 772 unsigned type; 773 774 type = mpu_attr_to_cpu_cache(attr); 775 rv = access[ring != 0][(attr & XTENSA_MPU_ACC_RIGHTS_MASK) >> 776 XTENSA_MPU_ACC_RIGHTS_SHIFT]; 777 778 if (type & XTENSA_MPU_TYPE_CPU_CACHE) { 779 rv |= (type & XTENSA_MPU_TYPE_CPU_C) ? PAGE_CACHE_WB : PAGE_CACHE_WT; 780 } else { 781 rv |= PAGE_CACHE_BYPASS; 782 } 783 return rv; 784 } 785 786 static bool is_access_granted(unsigned access, int is_write) 787 { 788 switch (is_write) { 789 case 0: 790 return access & PAGE_READ; 791 792 case 1: 793 return access & PAGE_WRITE; 794 795 case 2: 796 return access & PAGE_EXEC; 797 798 default: 799 return 0; 800 } 801 } 802 803 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); 804 805 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, 806 uint32_t vaddr, int is_write, int mmu_idx, 807 uint32_t *paddr, uint32_t *page_size, 808 unsigned *access, bool may_lookup_pt) 809 { 810 bool dtlb = is_write != 2; 811 uint32_t wi; 812 uint32_t ei; 813 uint8_t ring; 814 uint32_t vpn; 815 uint32_t pte; 816 const xtensa_tlb_entry *entry = NULL; 817 xtensa_tlb_entry tmp_entry; 818 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); 819 820 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && 821 may_lookup_pt && get_pte(env, vaddr, &pte)) { 822 ring = (pte >> 4) & 0x3; 823 wi = 0; 824 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); 825 826 if (update_tlb) { 827 wi = ++env->autorefill_idx & 0x3; 828 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); 829 env->sregs[EXCVADDR] = vaddr; 830 qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n", 831 __func__, vaddr, vpn, pte); 832 } else { 833 xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); 834 entry = &tmp_entry; 835 } 836 ret = 0; 837 } 838 if (ret != 0) { 839 return ret; 840 } 841 842 if (entry == NULL) { 843 entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 844 } 845 846 if (ring < mmu_idx) { 847 return dtlb ? 848 LOAD_STORE_PRIVILEGE_CAUSE : 849 INST_FETCH_PRIVILEGE_CAUSE; 850 } 851 852 *access = mmu_attr_to_access(entry->attr) & 853 ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE); 854 if (!is_access_granted(*access, is_write)) { 855 return dtlb ? 856 (is_write ? 857 STORE_PROHIBITED_CAUSE : 858 LOAD_PROHIBITED_CAUSE) : 859 INST_FETCH_PROHIBITED_CAUSE; 860 } 861 862 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); 863 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; 864 865 return 0; 866 } 867 868 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) 869 { 870 CPUState *cs = env_cpu(env); 871 uint32_t paddr; 872 uint32_t page_size; 873 unsigned access; 874 uint32_t pt_vaddr = 875 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; 876 int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, 877 &paddr, &page_size, &access, false); 878 879 if (ret == 0) { 880 qemu_log_mask(CPU_LOG_MMU, 881 "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n", 882 __func__, vaddr, pt_vaddr, paddr); 883 } else { 884 qemu_log_mask(CPU_LOG_MMU, 885 "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n", 886 __func__, vaddr, pt_vaddr, ret); 887 } 888 889 if (ret == 0) { 890 MemTxResult result; 891 892 *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED, 893 &result); 894 if (result != MEMTX_OK) { 895 qemu_log_mask(CPU_LOG_MMU, 896 "%s: couldn't load PTE: transaction failed (%u)\n", 897 __func__, (unsigned)result); 898 ret = 1; 899 } 900 } 901 return ret == 0; 902 } 903 904 static int get_physical_addr_region(CPUXtensaState *env, 905 uint32_t vaddr, int is_write, int mmu_idx, 906 uint32_t *paddr, uint32_t *page_size, 907 unsigned *access) 908 { 909 bool dtlb = is_write != 2; 910 uint32_t wi = 0; 911 uint32_t ei = (vaddr >> 29) & 0x7; 912 const xtensa_tlb_entry *entry = 913 xtensa_tlb_get_entry(env, dtlb, wi, ei); 914 915 *access = region_attr_to_access(entry->attr); 916 if (!is_access_granted(*access, is_write)) { 917 return dtlb ? 918 (is_write ? 919 STORE_PROHIBITED_CAUSE : 920 LOAD_PROHIBITED_CAUSE) : 921 INST_FETCH_PROHIBITED_CAUSE; 922 } 923 924 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); 925 *page_size = ~REGION_PAGE_MASK + 1; 926 927 return 0; 928 } 929 930 static int xtensa_mpu_lookup(const xtensa_mpu_entry *entry, unsigned n, 931 uint32_t vaddr, unsigned *segment) 932 { 933 unsigned nhits = 0; 934 unsigned i; 935 936 for (i = 0; i < n; ++i) { 937 if (vaddr >= entry[i].vaddr && 938 (i == n - 1 || vaddr < entry[i + 1].vaddr)) { 939 if (nhits++) { 940 break; 941 } 942 *segment = i; 943 } 944 } 945 return nhits; 946 } 947 948 void HELPER(wsr_mpuenb)(CPUXtensaState *env, uint32_t v) 949 { 950 v &= (2u << (env->config->n_mpu_fg_segments - 1)) - 1; 951 952 if (v != env->sregs[MPUENB]) { 953 env->sregs[MPUENB] = v; 954 tlb_flush(env_cpu(env)); 955 } 956 } 957 958 void HELPER(wptlb)(CPUXtensaState *env, uint32_t p, uint32_t v) 959 { 960 unsigned segment = p & XTENSA_MPU_SEGMENT_MASK; 961 962 if (segment < env->config->n_mpu_fg_segments) { 963 env->mpu_fg[segment].vaddr = v & -env->config->mpu_align; 964 env->mpu_fg[segment].attr = p & XTENSA_MPU_ATTR_MASK; 965 env->sregs[MPUENB] = deposit32(env->sregs[MPUENB], segment, 1, v); 966 tlb_flush(env_cpu(env)); 967 } 968 } 969 970 uint32_t HELPER(rptlb0)(CPUXtensaState *env, uint32_t s) 971 { 972 unsigned segment = s & XTENSA_MPU_SEGMENT_MASK; 973 974 if (segment < env->config->n_mpu_fg_segments) { 975 return env->mpu_fg[segment].vaddr | 976 extract32(env->sregs[MPUENB], segment, 1); 977 } else { 978 return 0; 979 } 980 } 981 982 uint32_t HELPER(rptlb1)(CPUXtensaState *env, uint32_t s) 983 { 984 unsigned segment = s & XTENSA_MPU_SEGMENT_MASK; 985 986 if (segment < env->config->n_mpu_fg_segments) { 987 return env->mpu_fg[segment].attr; 988 } else { 989 return 0; 990 } 991 } 992 993 uint32_t HELPER(pptlb)(CPUXtensaState *env, uint32_t v) 994 { 995 unsigned nhits; 996 unsigned segment; 997 unsigned bg_segment; 998 999 nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments, 1000 v, &segment); 1001 if (nhits > 1) { 1002 HELPER(exception_cause_vaddr)(env, env->pc, 1003 LOAD_STORE_TLB_MULTI_HIT_CAUSE, v); 1004 } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) { 1005 return env->mpu_fg[segment].attr | segment | XTENSA_MPU_PROBE_V; 1006 } else { 1007 xtensa_mpu_lookup(env->config->mpu_bg, 1008 env->config->n_mpu_bg_segments, 1009 v, &bg_segment); 1010 return env->config->mpu_bg[bg_segment].attr | XTENSA_MPU_PROBE_B; 1011 } 1012 } 1013 1014 static int get_physical_addr_mpu(CPUXtensaState *env, 1015 uint32_t vaddr, int is_write, int mmu_idx, 1016 uint32_t *paddr, uint32_t *page_size, 1017 unsigned *access) 1018 { 1019 unsigned nhits; 1020 unsigned segment; 1021 uint32_t attr; 1022 1023 nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments, 1024 vaddr, &segment); 1025 if (nhits > 1) { 1026 return is_write < 2 ? 1027 LOAD_STORE_TLB_MULTI_HIT_CAUSE : 1028 INST_TLB_MULTI_HIT_CAUSE; 1029 } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) { 1030 attr = env->mpu_fg[segment].attr; 1031 } else { 1032 xtensa_mpu_lookup(env->config->mpu_bg, 1033 env->config->n_mpu_bg_segments, 1034 vaddr, &segment); 1035 attr = env->config->mpu_bg[segment].attr; 1036 } 1037 1038 *access = mpu_attr_to_access(attr, mmu_idx); 1039 if (!is_access_granted(*access, is_write)) { 1040 return is_write < 2 ? 1041 (is_write ? 1042 STORE_PROHIBITED_CAUSE : 1043 LOAD_PROHIBITED_CAUSE) : 1044 INST_FETCH_PROHIBITED_CAUSE; 1045 } 1046 *paddr = vaddr; 1047 *page_size = env->config->mpu_align; 1048 return 0; 1049 } 1050 1051 /*! 1052 * Convert virtual address to physical addr. 1053 * MMU may issue pagewalk and change xtensa autorefill TLB way entry. 1054 * 1055 * \return 0 if ok, exception cause code otherwise 1056 */ 1057 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, 1058 uint32_t vaddr, int is_write, int mmu_idx, 1059 uint32_t *paddr, uint32_t *page_size, 1060 unsigned *access) 1061 { 1062 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 1063 return get_physical_addr_mmu(env, update_tlb, 1064 vaddr, is_write, mmu_idx, paddr, 1065 page_size, access, true); 1066 } else if (xtensa_option_bits_enabled(env->config, 1067 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | 1068 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { 1069 return get_physical_addr_region(env, vaddr, is_write, mmu_idx, 1070 paddr, page_size, access); 1071 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 1072 return get_physical_addr_mpu(env, vaddr, is_write, mmu_idx, 1073 paddr, page_size, access); 1074 } else { 1075 *paddr = vaddr; 1076 *page_size = TARGET_PAGE_SIZE; 1077 *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >> 1078 ((vaddr & 0xe0000000) >> 27)); 1079 return 0; 1080 } 1081 } 1082 1083 static void dump_tlb(CPUXtensaState *env, bool dtlb) 1084 { 1085 unsigned wi, ei; 1086 const xtensa_tlb *conf = 1087 dtlb ? &env->config->dtlb : &env->config->itlb; 1088 unsigned (*attr_to_access)(uint32_t) = 1089 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? 1090 mmu_attr_to_access : region_attr_to_access; 1091 1092 for (wi = 0; wi < conf->nways; ++wi) { 1093 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; 1094 const char *sz_text; 1095 bool print_header = true; 1096 1097 if (sz >= 0x100000) { 1098 sz /= MiB; 1099 sz_text = "MB"; 1100 } else { 1101 sz /= KiB; 1102 sz_text = "KB"; 1103 } 1104 1105 for (ei = 0; ei < conf->way_size[wi]; ++ei) { 1106 const xtensa_tlb_entry *entry = 1107 xtensa_tlb_get_entry(env, dtlb, wi, ei); 1108 1109 if (entry->asid) { 1110 static const char * const cache_text[8] = { 1111 [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass", 1112 [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT", 1113 [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB", 1114 [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate", 1115 }; 1116 unsigned access = attr_to_access(entry->attr); 1117 unsigned cache_idx = (access & PAGE_CACHE_MASK) >> 1118 PAGE_CACHE_SHIFT; 1119 1120 if (print_header) { 1121 print_header = false; 1122 qemu_printf("Way %u (%d %s)\n", wi, sz, sz_text); 1123 qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n" 1124 "\t---------- ---------- ---- ---- --- -------\n"); 1125 } 1126 qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %s\n", 1127 entry->vaddr, 1128 entry->paddr, 1129 entry->asid, 1130 entry->attr, 1131 (access & PAGE_READ) ? 'R' : '-', 1132 (access & PAGE_WRITE) ? 'W' : '-', 1133 (access & PAGE_EXEC) ? 'X' : '-', 1134 cache_text[cache_idx] ? 1135 cache_text[cache_idx] : "Invalid"); 1136 } 1137 } 1138 } 1139 } 1140 1141 static void dump_mpu(CPUXtensaState *env, 1142 const xtensa_mpu_entry *entry, unsigned n) 1143 { 1144 unsigned i; 1145 1146 qemu_printf("\t%s Vaddr Attr Ring0 Ring1 System Type CPU cache\n" 1147 "\t%s ---------- ---------- ----- ----- ------------- ---------\n", 1148 env ? "En" : " ", 1149 env ? "--" : " "); 1150 1151 for (i = 0; i < n; ++i) { 1152 uint32_t attr = entry[i].attr; 1153 unsigned access0 = mpu_attr_to_access(attr, 0); 1154 unsigned access1 = mpu_attr_to_access(attr, 1); 1155 unsigned type = mpu_attr_to_type(attr); 1156 char cpu_cache = (type & XTENSA_MPU_TYPE_CPU_CACHE) ? '-' : ' '; 1157 1158 qemu_printf("\t %c 0x%08x 0x%08x %c%c%c %c%c%c ", 1159 env ? 1160 ((env->sregs[MPUENB] & (1u << i)) ? '+' : '-') : ' ', 1161 entry[i].vaddr, attr, 1162 (access0 & PAGE_READ) ? 'R' : '-', 1163 (access0 & PAGE_WRITE) ? 'W' : '-', 1164 (access0 & PAGE_EXEC) ? 'X' : '-', 1165 (access1 & PAGE_READ) ? 'R' : '-', 1166 (access1 & PAGE_WRITE) ? 'W' : '-', 1167 (access1 & PAGE_EXEC) ? 'X' : '-'); 1168 1169 switch (type & XTENSA_MPU_SYSTEM_TYPE_MASK) { 1170 case XTENSA_MPU_SYSTEM_TYPE_DEVICE: 1171 qemu_printf("Device %cB %3s\n", 1172 (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n', 1173 (type & XTENSA_MPU_TYPE_INT) ? "int" : ""); 1174 break; 1175 case XTENSA_MPU_SYSTEM_TYPE_NC: 1176 qemu_printf("Sys NC %cB %c%c%c\n", 1177 (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n', 1178 (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache, 1179 (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache, 1180 (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache); 1181 break; 1182 case XTENSA_MPU_SYSTEM_TYPE_C: 1183 qemu_printf("Sys C %c%c%c %c%c%c\n", 1184 (type & XTENSA_MPU_TYPE_SYS_R) ? 'R' : '-', 1185 (type & XTENSA_MPU_TYPE_SYS_W) ? 'W' : '-', 1186 (type & XTENSA_MPU_TYPE_SYS_C) ? 'C' : '-', 1187 (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache, 1188 (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache, 1189 (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache); 1190 break; 1191 default: 1192 qemu_printf("Unknown\n"); 1193 break; 1194 } 1195 } 1196 } 1197 1198 void dump_mmu(CPUXtensaState *env) 1199 { 1200 if (xtensa_option_bits_enabled(env->config, 1201 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | 1202 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | 1203 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { 1204 1205 qemu_printf("ITLB:\n"); 1206 dump_tlb(env, false); 1207 qemu_printf("\nDTLB:\n"); 1208 dump_tlb(env, true); 1209 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 1210 qemu_printf("Foreground map:\n"); 1211 dump_mpu(env, env->mpu_fg, env->config->n_mpu_fg_segments); 1212 qemu_printf("\nBackground map:\n"); 1213 dump_mpu(NULL, env->config->mpu_bg, env->config->n_mpu_bg_segments); 1214 } else { 1215 qemu_printf("No TLB for this CPU core\n"); 1216 } 1217 } 1218