1 /* 2 * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * * Neither the name of the Open Source and Linux Lab nor the 13 * names of its contributors may be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qemu/log.h" 30 #include "qemu/qemu-print.h" 31 #include "qemu/units.h" 32 #include "cpu.h" 33 #include "exec/helper-proto.h" 34 #include "qemu/host-utils.h" 35 #include "exec/cputlb.h" 36 #include "accel/tcg/cpu-mmu-index.h" 37 #include "accel/tcg/probe.h" 38 #include "exec/page-protection.h" 39 #include "exec/target_page.h" 40 #include "system/memory.h" 41 42 #define XTENSA_MPU_SEGMENT_MASK 0x0000001f 43 #define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00 44 #define XTENSA_MPU_ACC_RIGHTS_SHIFT 8 45 #define XTENSA_MPU_MEM_TYPE_MASK 0x001ff000 46 #define XTENSA_MPU_MEM_TYPE_SHIFT 12 47 #define XTENSA_MPU_ATTR_MASK 0x001fff00 48 49 #define XTENSA_MPU_PROBE_B 0x40000000 50 #define XTENSA_MPU_PROBE_V 0x80000000 51 52 #define XTENSA_MPU_SYSTEM_TYPE_DEVICE 0x0001 53 #define XTENSA_MPU_SYSTEM_TYPE_NC 0x0002 54 #define XTENSA_MPU_SYSTEM_TYPE_C 0x0003 55 #define XTENSA_MPU_SYSTEM_TYPE_MASK 0x0003 56 57 #define XTENSA_MPU_TYPE_SYS_C 0x0010 58 #define XTENSA_MPU_TYPE_SYS_W 0x0020 59 #define XTENSA_MPU_TYPE_SYS_R 0x0040 60 #define XTENSA_MPU_TYPE_CPU_C 0x0100 61 #define XTENSA_MPU_TYPE_CPU_W 0x0200 62 #define XTENSA_MPU_TYPE_CPU_R 0x0400 63 #define XTENSA_MPU_TYPE_CPU_CACHE 0x0800 64 #define XTENSA_MPU_TYPE_B 0x1000 65 #define XTENSA_MPU_TYPE_INT 0x2000 66 67 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) 68 { 69 /* 70 * Probe the memory; we don't care about the result but 71 * only the side-effects (ie any MMU or other exception) 72 */ 73 probe_access(env, vaddr, 1, MMU_INST_FETCH, 74 cpu_mmu_index(env_cpu(env), true), GETPC()); 75 } 76 77 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) 78 { 79 v = (v & 0xffffff00) | 0x1; 80 if (v != env->sregs[RASID]) { 81 env->sregs[RASID] = v; 82 tlb_flush(env_cpu(env)); 83 } 84 } 85 86 static uint32_t get_page_size(const CPUXtensaState *env, 87 bool dtlb, uint32_t way) 88 { 89 uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; 90 91 switch (way) { 92 case 4: 93 return (tlbcfg >> 16) & 0x3; 94 95 case 5: 96 return (tlbcfg >> 20) & 0x1; 97 98 case 6: 99 return (tlbcfg >> 24) & 0x1; 100 101 default: 102 return 0; 103 } 104 } 105 106 /*! 107 * Get bit mask for the virtual address bits translated by the TLB way 108 */ 109 static uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, 110 bool dtlb, uint32_t way) 111 { 112 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 113 bool varway56 = dtlb ? 114 env->config->dtlb.varway56 : 115 env->config->itlb.varway56; 116 117 switch (way) { 118 case 4: 119 return 0xfff00000 << get_page_size(env, dtlb, way) * 2; 120 121 case 5: 122 if (varway56) { 123 return 0xf8000000 << get_page_size(env, dtlb, way); 124 } else { 125 return 0xf8000000; 126 } 127 128 case 6: 129 if (varway56) { 130 return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); 131 } else { 132 return 0xf0000000; 133 } 134 135 default: 136 return 0xfffff000; 137 } 138 } else { 139 return REGION_PAGE_MASK; 140 } 141 } 142 143 /*! 144 * Get bit mask for the 'VPN without index' field. 145 * See ISA, 4.6.5.6, data format for RxTLB0 146 */ 147 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) 148 { 149 if (way < 4) { 150 bool is32 = (dtlb ? 151 env->config->dtlb.nrefillentries : 152 env->config->itlb.nrefillentries) == 32; 153 return is32 ? 0xffff8000 : 0xffffc000; 154 } else if (way == 4) { 155 return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; 156 } else if (way <= 6) { 157 uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); 158 bool varway56 = dtlb ? 159 env->config->dtlb.varway56 : 160 env->config->itlb.varway56; 161 162 if (varway56) { 163 return mask << (way == 5 ? 2 : 3); 164 } else { 165 return mask << 1; 166 } 167 } else { 168 return 0xfffff000; 169 } 170 } 171 172 /*! 173 * Split virtual address into VPN (with index) and entry index 174 * for the given TLB way 175 */ 176 static void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, 177 bool dtlb, uint32_t *vpn, 178 uint32_t wi, uint32_t *ei) 179 { 180 bool varway56 = dtlb ? 181 env->config->dtlb.varway56 : 182 env->config->itlb.varway56; 183 184 if (!dtlb) { 185 wi &= 7; 186 } 187 188 if (wi < 4) { 189 bool is32 = (dtlb ? 190 env->config->dtlb.nrefillentries : 191 env->config->itlb.nrefillentries) == 32; 192 *ei = (v >> 12) & (is32 ? 0x7 : 0x3); 193 } else { 194 switch (wi) { 195 case 4: 196 { 197 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; 198 *ei = (v >> eibase) & 0x3; 199 } 200 break; 201 202 case 5: 203 if (varway56) { 204 uint32_t eibase = 27 + get_page_size(env, dtlb, wi); 205 *ei = (v >> eibase) & 0x3; 206 } else { 207 *ei = (v >> 27) & 0x1; 208 } 209 break; 210 211 case 6: 212 if (varway56) { 213 uint32_t eibase = 29 - get_page_size(env, dtlb, wi); 214 *ei = (v >> eibase) & 0x7; 215 } else { 216 *ei = (v >> 28) & 0x1; 217 } 218 break; 219 220 default: 221 *ei = 0; 222 break; 223 } 224 } 225 *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); 226 } 227 228 /*! 229 * Split TLB address into TLB way, entry index and VPN (with index). 230 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format 231 */ 232 static bool split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, 233 uint32_t *vpn, uint32_t *wi, uint32_t *ei) 234 { 235 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 236 *wi = v & (dtlb ? 0xf : 0x7); 237 if (*wi < (dtlb ? env->config->dtlb.nways : env->config->itlb.nways)) { 238 split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); 239 return true; 240 } else { 241 return false; 242 } 243 } else { 244 *vpn = v & REGION_PAGE_MASK; 245 *wi = 0; 246 *ei = (v >> 29) & 0x7; 247 return true; 248 } 249 } 250 251 static xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env, bool dtlb, 252 unsigned wi, unsigned ei) 253 { 254 const xtensa_tlb *tlb = dtlb ? &env->config->dtlb : &env->config->itlb; 255 256 assert(wi < tlb->nways && ei < tlb->way_size[wi]); 257 return dtlb ? 258 env->dtlb[wi] + ei : 259 env->itlb[wi] + ei; 260 } 261 262 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, 263 uint32_t v, bool dtlb, uint32_t *pwi) 264 { 265 uint32_t vpn; 266 uint32_t wi; 267 uint32_t ei; 268 269 if (split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei)) { 270 if (pwi) { 271 *pwi = wi; 272 } 273 return xtensa_tlb_get_entry(env, dtlb, wi, ei); 274 } else { 275 return NULL; 276 } 277 } 278 279 static void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, 280 xtensa_tlb_entry *entry, bool dtlb, 281 unsigned wi, unsigned ei, uint32_t vpn, 282 uint32_t pte) 283 { 284 entry->vaddr = vpn; 285 entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); 286 entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; 287 entry->attr = pte & 0xf; 288 } 289 290 static void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, 291 unsigned wi, unsigned ei, 292 uint32_t vpn, uint32_t pte) 293 { 294 CPUState *cs = env_cpu(env); 295 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 296 297 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 298 if (entry->variable) { 299 if (entry->asid) { 300 tlb_flush_page(cs, entry->vaddr); 301 } 302 xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); 303 tlb_flush_page(cs, entry->vaddr); 304 } else { 305 qemu_log_mask(LOG_GUEST_ERROR, 306 "%s %d, %d, %d trying to set immutable entry\n", 307 __func__, dtlb, wi, ei); 308 } 309 } else { 310 tlb_flush_page(cs, entry->vaddr); 311 if (xtensa_option_enabled(env->config, 312 XTENSA_OPTION_REGION_TRANSLATION)) { 313 entry->paddr = pte & REGION_PAGE_MASK; 314 } 315 entry->attr = pte & 0xf; 316 } 317 } 318 319 hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 320 { 321 XtensaCPU *cpu = XTENSA_CPU(cs); 322 uint32_t paddr; 323 uint32_t page_size; 324 unsigned access; 325 326 if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0, 327 &paddr, &page_size, &access) == 0) { 328 return paddr; 329 } 330 if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0, 331 &paddr, &page_size, &access) == 0) { 332 return paddr; 333 } 334 return ~0; 335 } 336 337 static void reset_tlb_mmu_all_ways(CPUXtensaState *env, 338 const xtensa_tlb *tlb, 339 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 340 { 341 unsigned wi, ei; 342 343 for (wi = 0; wi < tlb->nways; ++wi) { 344 for (ei = 0; ei < tlb->way_size[wi]; ++ei) { 345 entry[wi][ei].asid = 0; 346 entry[wi][ei].variable = true; 347 } 348 } 349 } 350 351 static void reset_tlb_mmu_ways56(CPUXtensaState *env, 352 const xtensa_tlb *tlb, 353 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 354 { 355 if (!tlb->varway56) { 356 static const xtensa_tlb_entry way5[] = { 357 { 358 .vaddr = 0xd0000000, 359 .paddr = 0, 360 .asid = 1, 361 .attr = 7, 362 .variable = false, 363 }, { 364 .vaddr = 0xd8000000, 365 .paddr = 0, 366 .asid = 1, 367 .attr = 3, 368 .variable = false, 369 } 370 }; 371 static const xtensa_tlb_entry way6[] = { 372 { 373 .vaddr = 0xe0000000, 374 .paddr = 0xf0000000, 375 .asid = 1, 376 .attr = 7, 377 .variable = false, 378 }, { 379 .vaddr = 0xf0000000, 380 .paddr = 0xf0000000, 381 .asid = 1, 382 .attr = 3, 383 .variable = false, 384 } 385 }; 386 memcpy(entry[5], way5, sizeof(way5)); 387 memcpy(entry[6], way6, sizeof(way6)); 388 } else { 389 uint32_t ei; 390 for (ei = 0; ei < 8; ++ei) { 391 entry[6][ei].vaddr = ei << 29; 392 entry[6][ei].paddr = ei << 29; 393 entry[6][ei].asid = 1; 394 entry[6][ei].attr = 3; 395 } 396 } 397 } 398 399 static void reset_tlb_region_way0(CPUXtensaState *env, 400 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 401 { 402 unsigned ei; 403 404 for (ei = 0; ei < 8; ++ei) { 405 entry[0][ei].vaddr = ei << 29; 406 entry[0][ei].paddr = ei << 29; 407 entry[0][ei].asid = 1; 408 entry[0][ei].attr = 2; 409 entry[0][ei].variable = true; 410 } 411 } 412 413 void reset_mmu(CPUXtensaState *env) 414 { 415 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 416 env->sregs[RASID] = 0x04030201; 417 env->sregs[ITLBCFG] = 0; 418 env->sregs[DTLBCFG] = 0; 419 env->autorefill_idx = 0; 420 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); 421 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); 422 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); 423 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); 424 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 425 unsigned i; 426 427 env->sregs[MPUENB] = 0; 428 env->sregs[MPUCFG] = env->config->n_mpu_fg_segments; 429 env->sregs[CACHEADRDIS] = 0; 430 assert(env->config->n_mpu_bg_segments > 0 && 431 env->config->mpu_bg[0].vaddr == 0); 432 for (i = 1; i < env->config->n_mpu_bg_segments; ++i) { 433 assert(env->config->mpu_bg[i].vaddr >= 434 env->config->mpu_bg[i - 1].vaddr); 435 } 436 } else { 437 env->sregs[CACHEATTR] = 0x22222222; 438 reset_tlb_region_way0(env, env->itlb); 439 reset_tlb_region_way0(env, env->dtlb); 440 } 441 } 442 443 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) 444 { 445 unsigned i; 446 for (i = 0; i < 4; ++i) { 447 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { 448 return i; 449 } 450 } 451 return 0xff; 452 } 453 454 /*! 455 * Lookup xtensa TLB for the given virtual address. 456 * See ISA, 4.6.2.2 457 * 458 * \param pwi: [out] way index 459 * \param pei: [out] entry index 460 * \param pring: [out] access ring 461 * \return 0 if ok, exception cause code otherwise 462 */ 463 static int xtensa_tlb_lookup(const CPUXtensaState *env, 464 uint32_t addr, bool dtlb, 465 uint32_t *pwi, uint32_t *pei, uint8_t *pring) 466 { 467 const xtensa_tlb *tlb = dtlb ? 468 &env->config->dtlb : &env->config->itlb; 469 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? 470 env->dtlb : env->itlb; 471 472 int nhits = 0; 473 unsigned wi; 474 475 for (wi = 0; wi < tlb->nways; ++wi) { 476 uint32_t vpn; 477 uint32_t ei; 478 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); 479 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { 480 unsigned ring = get_ring(env, entry[wi][ei].asid); 481 if (ring < 4) { 482 if (++nhits > 1) { 483 return dtlb ? 484 LOAD_STORE_TLB_MULTI_HIT_CAUSE : 485 INST_TLB_MULTI_HIT_CAUSE; 486 } 487 *pwi = wi; 488 *pei = ei; 489 *pring = ring; 490 } 491 } 492 } 493 return nhits ? 0 : 494 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); 495 } 496 497 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 498 { 499 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 500 uint32_t wi; 501 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 502 503 if (entry) { 504 return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; 505 } else { 506 return 0; 507 } 508 } else { 509 return v & REGION_PAGE_MASK; 510 } 511 } 512 513 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 514 { 515 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); 516 517 if (entry) { 518 return entry->paddr | entry->attr; 519 } else { 520 return 0; 521 } 522 } 523 524 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 525 { 526 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 527 uint32_t wi; 528 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 529 if (entry && entry->variable && entry->asid) { 530 tlb_flush_page(env_cpu(env), entry->vaddr); 531 entry->asid = 0; 532 } 533 } 534 } 535 536 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 537 { 538 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 539 uint32_t wi; 540 uint32_t ei; 541 uint8_t ring; 542 int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); 543 544 switch (res) { 545 case 0: 546 if (ring >= xtensa_get_ring(env)) { 547 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); 548 } 549 break; 550 551 case INST_TLB_MULTI_HIT_CAUSE: 552 case LOAD_STORE_TLB_MULTI_HIT_CAUSE: 553 HELPER(exception_cause_vaddr)(env, env->pc, res, v); 554 break; 555 } 556 return 0; 557 } else { 558 return (v & REGION_PAGE_MASK) | 0x1; 559 } 560 } 561 562 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) 563 { 564 uint32_t vpn; 565 uint32_t wi; 566 uint32_t ei; 567 if (split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei)) { 568 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); 569 } 570 } 571 572 /*! 573 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. 574 * See ISA, 4.6.5.10 575 */ 576 static unsigned mmu_attr_to_access(uint32_t attr) 577 { 578 unsigned access = 0; 579 580 if (attr < 12) { 581 access |= PAGE_READ; 582 if (attr & 0x1) { 583 access |= PAGE_EXEC; 584 } 585 if (attr & 0x2) { 586 access |= PAGE_WRITE; 587 } 588 589 switch (attr & 0xc) { 590 case 0: 591 access |= PAGE_CACHE_BYPASS; 592 break; 593 594 case 4: 595 access |= PAGE_CACHE_WB; 596 break; 597 598 case 8: 599 access |= PAGE_CACHE_WT; 600 break; 601 } 602 } else if (attr == 13) { 603 access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE; 604 } 605 return access; 606 } 607 608 /*! 609 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. 610 * See ISA, 4.6.3.3 611 */ 612 static unsigned region_attr_to_access(uint32_t attr) 613 { 614 static const unsigned access[16] = { 615 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, 616 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, 617 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, 618 [3] = PAGE_EXEC | PAGE_CACHE_WB, 619 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 620 [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 621 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, 622 }; 623 624 return access[attr & 0xf]; 625 } 626 627 /*! 628 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask. 629 * See ISA, A.2.14 The Cache Attribute Register 630 */ 631 static unsigned cacheattr_attr_to_access(uint32_t attr) 632 { 633 static const unsigned access[16] = { 634 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, 635 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, 636 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, 637 [3] = PAGE_EXEC | PAGE_CACHE_WB, 638 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 639 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, 640 }; 641 642 return access[attr & 0xf]; 643 } 644 645 struct attr_pattern { 646 uint32_t mask; 647 uint32_t value; 648 }; 649 650 static int attr_pattern_match(uint32_t attr, 651 const struct attr_pattern *pattern, 652 size_t n) 653 { 654 size_t i; 655 656 for (i = 0; i < n; ++i) { 657 if ((attr & pattern[i].mask) == pattern[i].value) { 658 return 1; 659 } 660 } 661 return 0; 662 } 663 664 static unsigned mpu_attr_to_cpu_cache(uint32_t attr) 665 { 666 static const struct attr_pattern cpu_c[] = { 667 { .mask = 0x18f, .value = 0x089 }, 668 { .mask = 0x188, .value = 0x080 }, 669 { .mask = 0x180, .value = 0x180 }, 670 }; 671 672 unsigned type = 0; 673 674 if (attr_pattern_match(attr, cpu_c, ARRAY_SIZE(cpu_c))) { 675 type |= XTENSA_MPU_TYPE_CPU_CACHE; 676 if (attr & 0x10) { 677 type |= XTENSA_MPU_TYPE_CPU_C; 678 } 679 if (attr & 0x20) { 680 type |= XTENSA_MPU_TYPE_CPU_W; 681 } 682 if (attr & 0x40) { 683 type |= XTENSA_MPU_TYPE_CPU_R; 684 } 685 } 686 return type; 687 } 688 689 static unsigned mpu_attr_to_type(uint32_t attr) 690 { 691 static const struct attr_pattern device_type[] = { 692 { .mask = 0x1f6, .value = 0x000 }, 693 { .mask = 0x1f6, .value = 0x006 }, 694 }; 695 static const struct attr_pattern sys_nc_type[] = { 696 { .mask = 0x1fe, .value = 0x018 }, 697 { .mask = 0x1fe, .value = 0x01e }, 698 { .mask = 0x18f, .value = 0x089 }, 699 }; 700 static const struct attr_pattern sys_c_type[] = { 701 { .mask = 0x1f8, .value = 0x010 }, 702 { .mask = 0x188, .value = 0x080 }, 703 { .mask = 0x1f0, .value = 0x030 }, 704 { .mask = 0x180, .value = 0x180 }, 705 }; 706 static const struct attr_pattern b[] = { 707 { .mask = 0x1f7, .value = 0x001 }, 708 { .mask = 0x1f7, .value = 0x007 }, 709 { .mask = 0x1ff, .value = 0x019 }, 710 { .mask = 0x1ff, .value = 0x01f }, 711 }; 712 713 unsigned type = 0; 714 715 attr = (attr & XTENSA_MPU_MEM_TYPE_MASK) >> XTENSA_MPU_MEM_TYPE_SHIFT; 716 if (attr_pattern_match(attr, device_type, ARRAY_SIZE(device_type))) { 717 type |= XTENSA_MPU_SYSTEM_TYPE_DEVICE; 718 if (attr & 0x80) { 719 type |= XTENSA_MPU_TYPE_INT; 720 } 721 } 722 if (attr_pattern_match(attr, sys_nc_type, ARRAY_SIZE(sys_nc_type))) { 723 type |= XTENSA_MPU_SYSTEM_TYPE_NC; 724 } 725 if (attr_pattern_match(attr, sys_c_type, ARRAY_SIZE(sys_c_type))) { 726 type |= XTENSA_MPU_SYSTEM_TYPE_C; 727 if (attr & 0x1) { 728 type |= XTENSA_MPU_TYPE_SYS_C; 729 } 730 if (attr & 0x2) { 731 type |= XTENSA_MPU_TYPE_SYS_W; 732 } 733 if (attr & 0x4) { 734 type |= XTENSA_MPU_TYPE_SYS_R; 735 } 736 } 737 if (attr_pattern_match(attr, b, ARRAY_SIZE(b))) { 738 type |= XTENSA_MPU_TYPE_B; 739 } 740 type |= mpu_attr_to_cpu_cache(attr); 741 742 return type; 743 } 744 745 static unsigned mpu_attr_to_access(uint32_t attr, unsigned ring) 746 { 747 static const unsigned access[2][16] = { 748 [0] = { 749 [4] = PAGE_READ, 750 [5] = PAGE_READ | PAGE_EXEC, 751 [6] = PAGE_READ | PAGE_WRITE, 752 [7] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 753 [8] = PAGE_WRITE, 754 [9] = PAGE_READ | PAGE_WRITE, 755 [10] = PAGE_READ | PAGE_WRITE, 756 [11] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 757 [12] = PAGE_READ, 758 [13] = PAGE_READ | PAGE_EXEC, 759 [14] = PAGE_READ | PAGE_WRITE, 760 [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 761 }, 762 [1] = { 763 [8] = PAGE_WRITE, 764 [9] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 765 [10] = PAGE_READ, 766 [11] = PAGE_READ | PAGE_EXEC, 767 [12] = PAGE_READ, 768 [13] = PAGE_READ | PAGE_EXEC, 769 [14] = PAGE_READ | PAGE_WRITE, 770 [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 771 }, 772 }; 773 unsigned rv; 774 unsigned type; 775 776 type = mpu_attr_to_cpu_cache(attr); 777 rv = access[ring != 0][(attr & XTENSA_MPU_ACC_RIGHTS_MASK) >> 778 XTENSA_MPU_ACC_RIGHTS_SHIFT]; 779 780 if (type & XTENSA_MPU_TYPE_CPU_CACHE) { 781 rv |= (type & XTENSA_MPU_TYPE_CPU_C) ? PAGE_CACHE_WB : PAGE_CACHE_WT; 782 } else { 783 rv |= PAGE_CACHE_BYPASS; 784 } 785 return rv; 786 } 787 788 static bool is_access_granted(unsigned access, int is_write) 789 { 790 switch (is_write) { 791 case 0: 792 return access & PAGE_READ; 793 794 case 1: 795 return access & PAGE_WRITE; 796 797 case 2: 798 return access & PAGE_EXEC; 799 800 default: 801 return 0; 802 } 803 } 804 805 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); 806 807 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, 808 uint32_t vaddr, int is_write, int mmu_idx, 809 uint32_t *paddr, uint32_t *page_size, 810 unsigned *access, bool may_lookup_pt) 811 { 812 bool dtlb = is_write != 2; 813 uint32_t wi; 814 uint32_t ei; 815 uint8_t ring; 816 uint32_t vpn; 817 uint32_t pte; 818 const xtensa_tlb_entry *entry = NULL; 819 xtensa_tlb_entry tmp_entry; 820 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); 821 822 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && 823 may_lookup_pt && get_pte(env, vaddr, &pte)) { 824 ring = (pte >> 4) & 0x3; 825 wi = 0; 826 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); 827 828 if (update_tlb) { 829 wi = ++env->autorefill_idx & 0x3; 830 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); 831 env->sregs[EXCVADDR] = vaddr; 832 qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n", 833 __func__, vaddr, vpn, pte); 834 } else { 835 xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); 836 entry = &tmp_entry; 837 } 838 ret = 0; 839 } 840 if (ret != 0) { 841 return ret; 842 } 843 844 if (entry == NULL) { 845 entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 846 } 847 848 if (ring < mmu_idx) { 849 return dtlb ? 850 LOAD_STORE_PRIVILEGE_CAUSE : 851 INST_FETCH_PRIVILEGE_CAUSE; 852 } 853 854 *access = mmu_attr_to_access(entry->attr) & 855 ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE); 856 if (!is_access_granted(*access, is_write)) { 857 return dtlb ? 858 (is_write ? 859 STORE_PROHIBITED_CAUSE : 860 LOAD_PROHIBITED_CAUSE) : 861 INST_FETCH_PROHIBITED_CAUSE; 862 } 863 864 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); 865 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; 866 867 return 0; 868 } 869 870 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) 871 { 872 CPUState *cs = env_cpu(env); 873 uint32_t paddr; 874 uint32_t page_size; 875 unsigned access; 876 uint32_t pt_vaddr = 877 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; 878 int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, 879 &paddr, &page_size, &access, false); 880 881 if (ret == 0) { 882 qemu_log_mask(CPU_LOG_MMU, 883 "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n", 884 __func__, vaddr, pt_vaddr, paddr); 885 } else { 886 qemu_log_mask(CPU_LOG_MMU, 887 "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n", 888 __func__, vaddr, pt_vaddr, ret); 889 } 890 891 if (ret == 0) { 892 MemTxResult result; 893 894 *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED, 895 &result); 896 if (result != MEMTX_OK) { 897 qemu_log_mask(CPU_LOG_MMU, 898 "%s: couldn't load PTE: transaction failed (%u)\n", 899 __func__, (unsigned)result); 900 ret = 1; 901 } 902 } 903 return ret == 0; 904 } 905 906 static int get_physical_addr_region(CPUXtensaState *env, 907 uint32_t vaddr, int is_write, int mmu_idx, 908 uint32_t *paddr, uint32_t *page_size, 909 unsigned *access) 910 { 911 bool dtlb = is_write != 2; 912 uint32_t wi = 0; 913 uint32_t ei = (vaddr >> 29) & 0x7; 914 const xtensa_tlb_entry *entry = 915 xtensa_tlb_get_entry(env, dtlb, wi, ei); 916 917 *access = region_attr_to_access(entry->attr); 918 if (!is_access_granted(*access, is_write)) { 919 return dtlb ? 920 (is_write ? 921 STORE_PROHIBITED_CAUSE : 922 LOAD_PROHIBITED_CAUSE) : 923 INST_FETCH_PROHIBITED_CAUSE; 924 } 925 926 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); 927 *page_size = ~REGION_PAGE_MASK + 1; 928 929 return 0; 930 } 931 932 static int xtensa_mpu_lookup(const xtensa_mpu_entry *entry, unsigned n, 933 uint32_t vaddr, unsigned *segment) 934 { 935 unsigned nhits = 0; 936 unsigned i; 937 938 for (i = 0; i < n; ++i) { 939 if (vaddr >= entry[i].vaddr && 940 (i == n - 1 || vaddr < entry[i + 1].vaddr)) { 941 if (nhits++) { 942 break; 943 } 944 *segment = i; 945 } 946 } 947 return nhits; 948 } 949 950 void HELPER(wsr_mpuenb)(CPUXtensaState *env, uint32_t v) 951 { 952 v &= (2u << (env->config->n_mpu_fg_segments - 1)) - 1; 953 954 if (v != env->sregs[MPUENB]) { 955 env->sregs[MPUENB] = v; 956 tlb_flush(env_cpu(env)); 957 } 958 } 959 960 void HELPER(wptlb)(CPUXtensaState *env, uint32_t p, uint32_t v) 961 { 962 unsigned segment = p & XTENSA_MPU_SEGMENT_MASK; 963 964 if (segment < env->config->n_mpu_fg_segments) { 965 env->mpu_fg[segment].vaddr = v & -env->config->mpu_align; 966 env->mpu_fg[segment].attr = p & XTENSA_MPU_ATTR_MASK; 967 env->sregs[MPUENB] = deposit32(env->sregs[MPUENB], segment, 1, v); 968 tlb_flush(env_cpu(env)); 969 } 970 } 971 972 uint32_t HELPER(rptlb0)(CPUXtensaState *env, uint32_t s) 973 { 974 unsigned segment = s & XTENSA_MPU_SEGMENT_MASK; 975 976 if (segment < env->config->n_mpu_fg_segments) { 977 return env->mpu_fg[segment].vaddr | 978 extract32(env->sregs[MPUENB], segment, 1); 979 } else { 980 return 0; 981 } 982 } 983 984 uint32_t HELPER(rptlb1)(CPUXtensaState *env, uint32_t s) 985 { 986 unsigned segment = s & XTENSA_MPU_SEGMENT_MASK; 987 988 if (segment < env->config->n_mpu_fg_segments) { 989 return env->mpu_fg[segment].attr; 990 } else { 991 return 0; 992 } 993 } 994 995 uint32_t HELPER(pptlb)(CPUXtensaState *env, uint32_t v) 996 { 997 unsigned nhits; 998 unsigned segment; 999 unsigned bg_segment; 1000 1001 nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments, 1002 v, &segment); 1003 if (nhits > 1) { 1004 HELPER(exception_cause_vaddr)(env, env->pc, 1005 LOAD_STORE_TLB_MULTI_HIT_CAUSE, v); 1006 } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) { 1007 return env->mpu_fg[segment].attr | segment | XTENSA_MPU_PROBE_V; 1008 } else { 1009 xtensa_mpu_lookup(env->config->mpu_bg, 1010 env->config->n_mpu_bg_segments, 1011 v, &bg_segment); 1012 return env->config->mpu_bg[bg_segment].attr | XTENSA_MPU_PROBE_B; 1013 } 1014 } 1015 1016 static int get_physical_addr_mpu(CPUXtensaState *env, 1017 uint32_t vaddr, int is_write, int mmu_idx, 1018 uint32_t *paddr, uint32_t *page_size, 1019 unsigned *access) 1020 { 1021 unsigned nhits; 1022 unsigned segment; 1023 uint32_t attr; 1024 1025 nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments, 1026 vaddr, &segment); 1027 if (nhits > 1) { 1028 return is_write < 2 ? 1029 LOAD_STORE_TLB_MULTI_HIT_CAUSE : 1030 INST_TLB_MULTI_HIT_CAUSE; 1031 } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) { 1032 attr = env->mpu_fg[segment].attr; 1033 } else { 1034 xtensa_mpu_lookup(env->config->mpu_bg, 1035 env->config->n_mpu_bg_segments, 1036 vaddr, &segment); 1037 attr = env->config->mpu_bg[segment].attr; 1038 } 1039 1040 *access = mpu_attr_to_access(attr, mmu_idx); 1041 if (!is_access_granted(*access, is_write)) { 1042 return is_write < 2 ? 1043 (is_write ? 1044 STORE_PROHIBITED_CAUSE : 1045 LOAD_PROHIBITED_CAUSE) : 1046 INST_FETCH_PROHIBITED_CAUSE; 1047 } 1048 *paddr = vaddr; 1049 *page_size = env->config->mpu_align; 1050 return 0; 1051 } 1052 1053 /*! 1054 * Convert virtual address to physical addr. 1055 * MMU may issue pagewalk and change xtensa autorefill TLB way entry. 1056 * 1057 * \return 0 if ok, exception cause code otherwise 1058 */ 1059 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, 1060 uint32_t vaddr, int is_write, int mmu_idx, 1061 uint32_t *paddr, uint32_t *page_size, 1062 unsigned *access) 1063 { 1064 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 1065 return get_physical_addr_mmu(env, update_tlb, 1066 vaddr, is_write, mmu_idx, paddr, 1067 page_size, access, true); 1068 } else if (xtensa_option_bits_enabled(env->config, 1069 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | 1070 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { 1071 return get_physical_addr_region(env, vaddr, is_write, mmu_idx, 1072 paddr, page_size, access); 1073 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 1074 return get_physical_addr_mpu(env, vaddr, is_write, mmu_idx, 1075 paddr, page_size, access); 1076 } else { 1077 *paddr = vaddr; 1078 *page_size = TARGET_PAGE_SIZE; 1079 *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >> 1080 ((vaddr & 0xe0000000) >> 27)); 1081 return 0; 1082 } 1083 } 1084 1085 static void dump_tlb(CPUXtensaState *env, bool dtlb) 1086 { 1087 unsigned wi, ei; 1088 const xtensa_tlb *conf = 1089 dtlb ? &env->config->dtlb : &env->config->itlb; 1090 unsigned (*attr_to_access)(uint32_t) = 1091 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? 1092 mmu_attr_to_access : region_attr_to_access; 1093 1094 for (wi = 0; wi < conf->nways; ++wi) { 1095 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; 1096 const char *sz_text; 1097 bool print_header = true; 1098 1099 if (sz >= 0x100000) { 1100 sz /= MiB; 1101 sz_text = "MB"; 1102 } else { 1103 sz /= KiB; 1104 sz_text = "KB"; 1105 } 1106 1107 for (ei = 0; ei < conf->way_size[wi]; ++ei) { 1108 const xtensa_tlb_entry *entry = 1109 xtensa_tlb_get_entry(env, dtlb, wi, ei); 1110 1111 if (entry->asid) { 1112 static const char * const cache_text[8] = { 1113 [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass", 1114 [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT", 1115 [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB", 1116 [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate", 1117 }; 1118 unsigned access = attr_to_access(entry->attr); 1119 unsigned cache_idx = (access & PAGE_CACHE_MASK) >> 1120 PAGE_CACHE_SHIFT; 1121 1122 if (print_header) { 1123 print_header = false; 1124 qemu_printf("Way %u (%d %s)\n", wi, sz, sz_text); 1125 qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n" 1126 "\t---------- ---------- ---- ---- --- -------\n"); 1127 } 1128 qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %s\n", 1129 entry->vaddr, 1130 entry->paddr, 1131 entry->asid, 1132 entry->attr, 1133 (access & PAGE_READ) ? 'R' : '-', 1134 (access & PAGE_WRITE) ? 'W' : '-', 1135 (access & PAGE_EXEC) ? 'X' : '-', 1136 cache_text[cache_idx] ? 1137 cache_text[cache_idx] : "Invalid"); 1138 } 1139 } 1140 } 1141 } 1142 1143 static void dump_mpu(CPUXtensaState *env, 1144 const xtensa_mpu_entry *entry, unsigned n) 1145 { 1146 unsigned i; 1147 1148 qemu_printf("\t%s Vaddr Attr Ring0 Ring1 System Type CPU cache\n" 1149 "\t%s ---------- ---------- ----- ----- ------------- ---------\n", 1150 env ? "En" : " ", 1151 env ? "--" : " "); 1152 1153 for (i = 0; i < n; ++i) { 1154 uint32_t attr = entry[i].attr; 1155 unsigned access0 = mpu_attr_to_access(attr, 0); 1156 unsigned access1 = mpu_attr_to_access(attr, 1); 1157 unsigned type = mpu_attr_to_type(attr); 1158 char cpu_cache = (type & XTENSA_MPU_TYPE_CPU_CACHE) ? '-' : ' '; 1159 1160 qemu_printf("\t %c 0x%08x 0x%08x %c%c%c %c%c%c ", 1161 env ? 1162 ((env->sregs[MPUENB] & (1u << i)) ? '+' : '-') : ' ', 1163 entry[i].vaddr, attr, 1164 (access0 & PAGE_READ) ? 'R' : '-', 1165 (access0 & PAGE_WRITE) ? 'W' : '-', 1166 (access0 & PAGE_EXEC) ? 'X' : '-', 1167 (access1 & PAGE_READ) ? 'R' : '-', 1168 (access1 & PAGE_WRITE) ? 'W' : '-', 1169 (access1 & PAGE_EXEC) ? 'X' : '-'); 1170 1171 switch (type & XTENSA_MPU_SYSTEM_TYPE_MASK) { 1172 case XTENSA_MPU_SYSTEM_TYPE_DEVICE: 1173 qemu_printf("Device %cB %3s\n", 1174 (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n', 1175 (type & XTENSA_MPU_TYPE_INT) ? "int" : ""); 1176 break; 1177 case XTENSA_MPU_SYSTEM_TYPE_NC: 1178 qemu_printf("Sys NC %cB %c%c%c\n", 1179 (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n', 1180 (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache, 1181 (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache, 1182 (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache); 1183 break; 1184 case XTENSA_MPU_SYSTEM_TYPE_C: 1185 qemu_printf("Sys C %c%c%c %c%c%c\n", 1186 (type & XTENSA_MPU_TYPE_SYS_R) ? 'R' : '-', 1187 (type & XTENSA_MPU_TYPE_SYS_W) ? 'W' : '-', 1188 (type & XTENSA_MPU_TYPE_SYS_C) ? 'C' : '-', 1189 (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache, 1190 (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache, 1191 (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache); 1192 break; 1193 default: 1194 qemu_printf("Unknown\n"); 1195 break; 1196 } 1197 } 1198 } 1199 1200 void dump_mmu(CPUXtensaState *env) 1201 { 1202 if (xtensa_option_bits_enabled(env->config, 1203 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | 1204 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | 1205 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { 1206 1207 qemu_printf("ITLB:\n"); 1208 dump_tlb(env, false); 1209 qemu_printf("\nDTLB:\n"); 1210 dump_tlb(env, true); 1211 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 1212 qemu_printf("Foreground map:\n"); 1213 dump_mpu(env, env->mpu_fg, env->config->n_mpu_fg_segments); 1214 qemu_printf("\nBackground map:\n"); 1215 dump_mpu(NULL, env->config->mpu_bg, env->config->n_mpu_bg_segments); 1216 } else { 1217 qemu_printf("No TLB for this CPU core\n"); 1218 } 1219 } 1220