1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "internals.h" 25 #include "pmu.h" 26 #include "exec/exec-all.h" 27 #include "exec/page-protection.h" 28 #include "instmap.h" 29 #include "tcg/tcg-op.h" 30 #include "trace.h" 31 #include "semihosting/common-semi.h" 32 #include "system/cpu-timers.h" 33 #include "cpu_bits.h" 34 #include "debug.h" 35 #include "tcg/oversized-guest.h" 36 #include "pmp.h" 37 38 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch) 39 { 40 #ifdef CONFIG_USER_ONLY 41 return 0; 42 #else 43 bool virt = env->virt_enabled; 44 int mode = env->priv; 45 46 /* All priv -> mmu_idx mapping are here */ 47 if (!ifetch) { 48 uint64_t status = env->mstatus; 49 50 if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) { 51 mode = get_field(env->mstatus, MSTATUS_MPP); 52 virt = get_field(env->mstatus, MSTATUS_MPV) && 53 (mode != PRV_M); 54 if (virt) { 55 status = env->vsstatus; 56 } 57 } 58 if (mode == PRV_S && get_field(status, MSTATUS_SUM)) { 59 mode = MMUIdx_S_SUM; 60 } 61 } 62 63 return mode | (virt ? MMU_2STAGE_BIT : 0); 64 #endif 65 } 66 67 bool cpu_get_fcfien(CPURISCVState *env) 68 { 69 /* no cfi extension, return false */ 70 if (!env_archcpu(env)->cfg.ext_zicfilp) { 71 return false; 72 } 73 74 switch (env->priv) { 75 case PRV_U: 76 if (riscv_has_ext(env, RVS)) { 77 return env->senvcfg & SENVCFG_LPE; 78 } 79 return env->menvcfg & MENVCFG_LPE; 80 #ifndef CONFIG_USER_ONLY 81 case PRV_S: 82 if (env->virt_enabled) { 83 return env->henvcfg & HENVCFG_LPE; 84 } 85 return env->menvcfg & MENVCFG_LPE; 86 case PRV_M: 87 return env->mseccfg & MSECCFG_MLPE; 88 #endif 89 default: 90 g_assert_not_reached(); 91 } 92 } 93 94 bool cpu_get_bcfien(CPURISCVState *env) 95 { 96 /* no cfi extension, return false */ 97 if (!env_archcpu(env)->cfg.ext_zicfiss) { 98 return false; 99 } 100 101 switch (env->priv) { 102 case PRV_U: 103 /* 104 * If S is not implemented then shadow stack for U can't be turned on 105 * It is checked in `riscv_cpu_validate_set_extensions`, so no need to 106 * check here or assert here 107 */ 108 return env->senvcfg & SENVCFG_SSE; 109 #ifndef CONFIG_USER_ONLY 110 case PRV_S: 111 if (env->virt_enabled) { 112 return env->henvcfg & HENVCFG_SSE; 113 } 114 return env->menvcfg & MENVCFG_SSE; 115 case PRV_M: /* M-mode shadow stack is always off */ 116 return false; 117 #endif 118 default: 119 g_assert_not_reached(); 120 } 121 } 122 123 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, 124 uint64_t *cs_base, uint32_t *pflags) 125 { 126 RISCVCPU *cpu = env_archcpu(env); 127 RISCVExtStatus fs, vs; 128 uint32_t flags = 0; 129 130 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; 131 *cs_base = 0; 132 133 if (cpu->cfg.ext_zve32x) { 134 /* 135 * If env->vl equals to VLMAX, we can use generic vector operation 136 * expanders (GVEC) to accerlate the vector operations. 137 * However, as LMUL could be a fractional number. The maximum 138 * vector size can be operated might be less than 8 bytes, 139 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true 140 * only when maxsz >= 8 bytes. 141 */ 142 143 /* lmul encoded as in DisasContext::lmul */ 144 int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); 145 uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); 146 uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); 147 uint32_t maxsz = vlmax << vsew; 148 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && 149 (maxsz >= 8); 150 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); 151 flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); 152 flags = FIELD_DP32(flags, TB_FLAGS, LMUL, 153 FIELD_EX64(env->vtype, VTYPE, VLMUL)); 154 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); 155 flags = FIELD_DP32(flags, TB_FLAGS, VTA, 156 FIELD_EX64(env->vtype, VTYPE, VTA)); 157 flags = FIELD_DP32(flags, TB_FLAGS, VMA, 158 FIELD_EX64(env->vtype, VTYPE, VMA)); 159 flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); 160 } else { 161 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); 162 } 163 164 if (cpu_get_fcfien(env)) { 165 /* 166 * For Forward CFI, only the expectation of a lpad at 167 * the start of the block is tracked via env->elp. env->elp 168 * is turned on during jalr translation. 169 */ 170 flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp); 171 flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1); 172 } 173 174 if (cpu_get_bcfien(env)) { 175 flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1); 176 } 177 178 #ifdef CONFIG_USER_ONLY 179 fs = EXT_STATUS_DIRTY; 180 vs = EXT_STATUS_DIRTY; 181 #else 182 flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); 183 184 flags |= riscv_env_mmu_index(env, 0); 185 fs = get_field(env->mstatus, MSTATUS_FS); 186 vs = get_field(env->mstatus, MSTATUS_VS); 187 188 if (env->virt_enabled) { 189 flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); 190 /* 191 * Merge DISABLED and !DIRTY states using MIN. 192 * We will set both fields when dirtying. 193 */ 194 fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); 195 vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); 196 } 197 198 /* With Zfinx, floating point is enabled/disabled by Smstateen. */ 199 if (!riscv_has_ext(env, RVF)) { 200 fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE) 201 ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED; 202 } 203 204 if (cpu->cfg.debug && !icount_enabled()) { 205 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); 206 } 207 #endif 208 209 flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); 210 flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); 211 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); 212 flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env)); 213 if (env->cur_pmmask != 0) { 214 flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1); 215 } 216 if (env->cur_pmbase != 0) { 217 flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1); 218 } 219 220 *pflags = flags; 221 } 222 223 void riscv_cpu_update_mask(CPURISCVState *env) 224 { 225 target_ulong mask = 0, base = 0; 226 RISCVMXL xl = env->xl; 227 /* 228 * TODO: Current RVJ spec does not specify 229 * how the extension interacts with XLEN. 230 */ 231 #ifndef CONFIG_USER_ONLY 232 int mode = cpu_address_mode(env); 233 xl = cpu_get_xl(env, mode); 234 if (riscv_has_ext(env, RVJ)) { 235 switch (mode) { 236 case PRV_M: 237 if (env->mmte & M_PM_ENABLE) { 238 mask = env->mpmmask; 239 base = env->mpmbase; 240 } 241 break; 242 case PRV_S: 243 if (env->mmte & S_PM_ENABLE) { 244 mask = env->spmmask; 245 base = env->spmbase; 246 } 247 break; 248 case PRV_U: 249 if (env->mmte & U_PM_ENABLE) { 250 mask = env->upmmask; 251 base = env->upmbase; 252 } 253 break; 254 default: 255 g_assert_not_reached(); 256 } 257 } 258 #endif 259 if (xl == MXL_RV32) { 260 env->cur_pmmask = mask & UINT32_MAX; 261 env->cur_pmbase = base & UINT32_MAX; 262 } else { 263 env->cur_pmmask = mask; 264 env->cur_pmbase = base; 265 } 266 } 267 268 #ifndef CONFIG_USER_ONLY 269 270 /* 271 * The HS-mode is allowed to configure priority only for the 272 * following VS-mode local interrupts: 273 * 274 * 0 (Reserved interrupt, reads as zero) 275 * 1 Supervisor software interrupt 276 * 4 (Reserved interrupt, reads as zero) 277 * 5 Supervisor timer interrupt 278 * 8 (Reserved interrupt, reads as zero) 279 * 13 (Reserved interrupt) 280 * 14 " 281 * 15 " 282 * 16 " 283 * 17 " 284 * 18 " 285 * 19 " 286 * 20 " 287 * 21 " 288 * 22 " 289 * 23 " 290 */ 291 292 static const int hviprio_index2irq[] = { 293 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }; 294 static const int hviprio_index2rdzero[] = { 295 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 296 297 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero) 298 { 299 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) { 300 return -EINVAL; 301 } 302 303 if (out_irq) { 304 *out_irq = hviprio_index2irq[index]; 305 } 306 307 if (out_rdzero) { 308 *out_rdzero = hviprio_index2rdzero[index]; 309 } 310 311 return 0; 312 } 313 314 /* 315 * Default priorities of local interrupts are defined in the 316 * RISC-V Advanced Interrupt Architecture specification. 317 * 318 * ---------------------------------------------------------------- 319 * Default | 320 * Priority | Major Interrupt Numbers 321 * ---------------------------------------------------------------- 322 * Highest | 47, 23, 46, 45, 22, 44, 323 * | 43, 21, 42, 41, 20, 40 324 * | 325 * | 11 (0b), 3 (03), 7 (07) 326 * | 9 (09), 1 (01), 5 (05) 327 * | 12 (0c) 328 * | 10 (0a), 2 (02), 6 (06) 329 * | 330 * | 39, 19, 38, 37, 18, 36, 331 * Lowest | 35, 17, 34, 33, 16, 32 332 * ---------------------------------------------------------------- 333 */ 334 static const uint8_t default_iprio[64] = { 335 /* Custom interrupts 48 to 63 */ 336 [63] = IPRIO_MMAXIPRIO, 337 [62] = IPRIO_MMAXIPRIO, 338 [61] = IPRIO_MMAXIPRIO, 339 [60] = IPRIO_MMAXIPRIO, 340 [59] = IPRIO_MMAXIPRIO, 341 [58] = IPRIO_MMAXIPRIO, 342 [57] = IPRIO_MMAXIPRIO, 343 [56] = IPRIO_MMAXIPRIO, 344 [55] = IPRIO_MMAXIPRIO, 345 [54] = IPRIO_MMAXIPRIO, 346 [53] = IPRIO_MMAXIPRIO, 347 [52] = IPRIO_MMAXIPRIO, 348 [51] = IPRIO_MMAXIPRIO, 349 [50] = IPRIO_MMAXIPRIO, 350 [49] = IPRIO_MMAXIPRIO, 351 [48] = IPRIO_MMAXIPRIO, 352 353 /* Custom interrupts 24 to 31 */ 354 [31] = IPRIO_MMAXIPRIO, 355 [30] = IPRIO_MMAXIPRIO, 356 [29] = IPRIO_MMAXIPRIO, 357 [28] = IPRIO_MMAXIPRIO, 358 [27] = IPRIO_MMAXIPRIO, 359 [26] = IPRIO_MMAXIPRIO, 360 [25] = IPRIO_MMAXIPRIO, 361 [24] = IPRIO_MMAXIPRIO, 362 363 [47] = IPRIO_DEFAULT_UPPER, 364 [23] = IPRIO_DEFAULT_UPPER + 1, 365 [46] = IPRIO_DEFAULT_UPPER + 2, 366 [45] = IPRIO_DEFAULT_UPPER + 3, 367 [22] = IPRIO_DEFAULT_UPPER + 4, 368 [44] = IPRIO_DEFAULT_UPPER + 5, 369 370 [43] = IPRIO_DEFAULT_UPPER + 6, 371 [21] = IPRIO_DEFAULT_UPPER + 7, 372 [42] = IPRIO_DEFAULT_UPPER + 8, 373 [41] = IPRIO_DEFAULT_UPPER + 9, 374 [20] = IPRIO_DEFAULT_UPPER + 10, 375 [40] = IPRIO_DEFAULT_UPPER + 11, 376 377 [11] = IPRIO_DEFAULT_M, 378 [3] = IPRIO_DEFAULT_M + 1, 379 [7] = IPRIO_DEFAULT_M + 2, 380 381 [9] = IPRIO_DEFAULT_S, 382 [1] = IPRIO_DEFAULT_S + 1, 383 [5] = IPRIO_DEFAULT_S + 2, 384 385 [12] = IPRIO_DEFAULT_SGEXT, 386 387 [10] = IPRIO_DEFAULT_VS, 388 [2] = IPRIO_DEFAULT_VS + 1, 389 [6] = IPRIO_DEFAULT_VS + 2, 390 391 [39] = IPRIO_DEFAULT_LOWER, 392 [19] = IPRIO_DEFAULT_LOWER + 1, 393 [38] = IPRIO_DEFAULT_LOWER + 2, 394 [37] = IPRIO_DEFAULT_LOWER + 3, 395 [18] = IPRIO_DEFAULT_LOWER + 4, 396 [36] = IPRIO_DEFAULT_LOWER + 5, 397 398 [35] = IPRIO_DEFAULT_LOWER + 6, 399 [17] = IPRIO_DEFAULT_LOWER + 7, 400 [34] = IPRIO_DEFAULT_LOWER + 8, 401 [33] = IPRIO_DEFAULT_LOWER + 9, 402 [16] = IPRIO_DEFAULT_LOWER + 10, 403 [32] = IPRIO_DEFAULT_LOWER + 11, 404 }; 405 406 uint8_t riscv_cpu_default_priority(int irq) 407 { 408 if (irq < 0 || irq > 63) { 409 return IPRIO_MMAXIPRIO; 410 } 411 412 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO; 413 }; 414 415 static int riscv_cpu_pending_to_irq(CPURISCVState *env, 416 int extirq, unsigned int extirq_def_prio, 417 uint64_t pending, uint8_t *iprio) 418 { 419 int irq, best_irq = RISCV_EXCP_NONE; 420 unsigned int prio, best_prio = UINT_MAX; 421 422 if (!pending) { 423 return RISCV_EXCP_NONE; 424 } 425 426 irq = ctz64(pending); 427 if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia : 428 riscv_cpu_cfg(env)->ext_ssaia)) { 429 return irq; 430 } 431 432 pending = pending >> irq; 433 while (pending) { 434 prio = iprio[irq]; 435 if (!prio) { 436 if (irq == extirq) { 437 prio = extirq_def_prio; 438 } else { 439 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ? 440 1 : IPRIO_MMAXIPRIO; 441 } 442 } 443 if ((pending & 0x1) && (prio <= best_prio)) { 444 best_irq = irq; 445 best_prio = prio; 446 } 447 irq++; 448 pending = pending >> 1; 449 } 450 451 return best_irq; 452 } 453 454 /* 455 * Doesn't report interrupts inserted using mvip from M-mode firmware or 456 * using hvip bits 13:63 from HS-mode. Those are returned in 457 * riscv_cpu_sirq_pending() and riscv_cpu_vsirq_pending(). 458 */ 459 uint64_t riscv_cpu_all_pending(CPURISCVState *env) 460 { 461 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN); 462 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 463 uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0; 464 465 return (env->mip | vsgein | vstip) & env->mie; 466 } 467 468 int riscv_cpu_mirq_pending(CPURISCVState *env) 469 { 470 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg & 471 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 472 473 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 474 irqs, env->miprio); 475 } 476 477 int riscv_cpu_sirq_pending(CPURISCVState *env) 478 { 479 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 480 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 481 uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie; 482 483 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 484 irqs | irqs_f, env->siprio); 485 } 486 487 int riscv_cpu_vsirq_pending(CPURISCVState *env) 488 { 489 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg; 490 uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie; 491 uint64_t vsbits; 492 493 /* Bring VS-level bits to correct position */ 494 vsbits = irqs & VS_MODE_INTERRUPTS; 495 irqs &= ~VS_MODE_INTERRUPTS; 496 irqs |= vsbits >> 1; 497 498 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 499 (irqs | irqs_f_vs), env->hviprio); 500 } 501 502 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 503 { 504 uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs; 505 uint64_t vsbits, irq_delegated; 506 int virq; 507 508 /* Determine interrupt enable state of all privilege modes */ 509 if (env->virt_enabled) { 510 mie = 1; 511 hsie = 1; 512 vsie = (env->priv < PRV_S) || 513 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 514 } else { 515 mie = (env->priv < PRV_M) || 516 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE)); 517 hsie = (env->priv < PRV_S) || 518 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 519 vsie = 0; 520 } 521 522 /* Determine all pending interrupts */ 523 pending = riscv_cpu_all_pending(env); 524 525 /* Check M-mode interrupts */ 526 irqs = pending & ~env->mideleg & -mie; 527 if (irqs) { 528 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 529 irqs, env->miprio); 530 } 531 532 /* Check for virtual S-mode interrupts. */ 533 irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie; 534 535 /* Check HS-mode interrupts */ 536 irqs = ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie; 537 if (irqs) { 538 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 539 irqs, env->siprio); 540 } 541 542 /* Check for virtual VS-mode interrupts. */ 543 irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie; 544 545 /* Check VS-mode interrupts */ 546 irq_delegated = pending & env->mideleg & env->hideleg; 547 548 /* Bring VS-level bits to correct position */ 549 vsbits = irq_delegated & VS_MODE_INTERRUPTS; 550 irq_delegated &= ~VS_MODE_INTERRUPTS; 551 irq_delegated |= vsbits >> 1; 552 553 irqs = (irq_delegated | irqs_f_vs) & -vsie; 554 if (irqs) { 555 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 556 irqs, env->hviprio); 557 if (virq <= 0 || (virq > 12 && virq <= 63)) { 558 return virq; 559 } else { 560 return virq + 1; 561 } 562 } 563 564 /* Indicate no pending interrupt */ 565 return RISCV_EXCP_NONE; 566 } 567 568 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 569 { 570 if (interrupt_request & CPU_INTERRUPT_HARD) { 571 RISCVCPU *cpu = RISCV_CPU(cs); 572 CPURISCVState *env = &cpu->env; 573 int interruptno = riscv_cpu_local_irq_pending(env); 574 if (interruptno >= 0) { 575 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 576 riscv_cpu_do_interrupt(cs); 577 return true; 578 } 579 } 580 return false; 581 } 582 583 /* Return true is floating point support is currently enabled */ 584 bool riscv_cpu_fp_enabled(CPURISCVState *env) 585 { 586 if (env->mstatus & MSTATUS_FS) { 587 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) { 588 return false; 589 } 590 return true; 591 } 592 593 return false; 594 } 595 596 /* Return true is vector support is currently enabled */ 597 bool riscv_cpu_vector_enabled(CPURISCVState *env) 598 { 599 if (env->mstatus & MSTATUS_VS) { 600 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) { 601 return false; 602 } 603 return true; 604 } 605 606 return false; 607 } 608 609 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) 610 { 611 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | 612 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | 613 MSTATUS64_UXL | MSTATUS_VS; 614 615 if (riscv_has_ext(env, RVF)) { 616 mstatus_mask |= MSTATUS_FS; 617 } 618 bool current_virt = env->virt_enabled; 619 620 /* 621 * If zicfilp extension available and henvcfg.LPE = 1, 622 * then apply SPELP mask on mstatus 623 */ 624 if (env_archcpu(env)->cfg.ext_zicfilp && 625 get_field(env->henvcfg, HENVCFG_LPE)) { 626 mstatus_mask |= SSTATUS_SPELP; 627 } 628 629 g_assert(riscv_has_ext(env, RVH)); 630 631 if (current_virt) { 632 /* Current V=1 and we are about to change to V=0 */ 633 env->vsstatus = env->mstatus & mstatus_mask; 634 env->mstatus &= ~mstatus_mask; 635 env->mstatus |= env->mstatus_hs; 636 637 env->vstvec = env->stvec; 638 env->stvec = env->stvec_hs; 639 640 env->vsscratch = env->sscratch; 641 env->sscratch = env->sscratch_hs; 642 643 env->vsepc = env->sepc; 644 env->sepc = env->sepc_hs; 645 646 env->vscause = env->scause; 647 env->scause = env->scause_hs; 648 649 env->vstval = env->stval; 650 env->stval = env->stval_hs; 651 652 env->vsatp = env->satp; 653 env->satp = env->satp_hs; 654 } else { 655 /* Current V=0 and we are about to change to V=1 */ 656 env->mstatus_hs = env->mstatus & mstatus_mask; 657 env->mstatus &= ~mstatus_mask; 658 env->mstatus |= env->vsstatus; 659 660 env->stvec_hs = env->stvec; 661 env->stvec = env->vstvec; 662 663 env->sscratch_hs = env->sscratch; 664 env->sscratch = env->vsscratch; 665 666 env->sepc_hs = env->sepc; 667 env->sepc = env->vsepc; 668 669 env->scause_hs = env->scause; 670 env->scause = env->vscause; 671 672 env->stval_hs = env->stval; 673 env->stval = env->vstval; 674 675 env->satp_hs = env->satp; 676 env->satp = env->vsatp; 677 } 678 } 679 680 target_ulong riscv_cpu_get_geilen(CPURISCVState *env) 681 { 682 if (!riscv_has_ext(env, RVH)) { 683 return 0; 684 } 685 686 return env->geilen; 687 } 688 689 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) 690 { 691 if (!riscv_has_ext(env, RVH)) { 692 return; 693 } 694 695 if (geilen > (TARGET_LONG_BITS - 1)) { 696 return; 697 } 698 699 env->geilen = geilen; 700 } 701 702 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) 703 { 704 CPURISCVState *env = &cpu->env; 705 if (env->miclaim & interrupts) { 706 return -1; 707 } else { 708 env->miclaim |= interrupts; 709 return 0; 710 } 711 } 712 713 void riscv_cpu_interrupt(CPURISCVState *env) 714 { 715 uint64_t gein, vsgein = 0, vstip = 0, irqf = 0; 716 CPUState *cs = env_cpu(env); 717 718 BQL_LOCK_GUARD(); 719 720 if (env->virt_enabled) { 721 gein = get_field(env->hstatus, HSTATUS_VGEIN); 722 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 723 irqf = env->hvien & env->hvip & env->vsie; 724 } else { 725 irqf = env->mvien & env->mvip & env->sie; 726 } 727 728 vstip = env->vstime_irq ? MIP_VSTIP : 0; 729 730 if (env->mip | vsgein | vstip | irqf) { 731 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 732 } else { 733 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 734 } 735 } 736 737 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value) 738 { 739 uint64_t old = env->mip; 740 741 /* No need to update mip for VSTIP */ 742 mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask; 743 744 BQL_LOCK_GUARD(); 745 746 env->mip = (env->mip & ~mask) | (value & mask); 747 748 riscv_cpu_interrupt(env); 749 750 return old; 751 } 752 753 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), 754 void *arg) 755 { 756 env->rdtime_fn = fn; 757 env->rdtime_fn_arg = arg; 758 } 759 760 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, 761 int (*rmw_fn)(void *arg, 762 target_ulong reg, 763 target_ulong *val, 764 target_ulong new_val, 765 target_ulong write_mask), 766 void *rmw_fn_arg) 767 { 768 if (priv <= PRV_M) { 769 env->aia_ireg_rmw_fn[priv] = rmw_fn; 770 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg; 771 } 772 } 773 774 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en) 775 { 776 g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED); 777 778 if (newpriv != env->priv || env->virt_enabled != virt_en) { 779 if (icount_enabled()) { 780 riscv_itrigger_update_priv(env); 781 } 782 783 riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en); 784 } 785 786 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 787 env->priv = newpriv; 788 env->xl = cpu_recompute_xl(env); 789 riscv_cpu_update_mask(env); 790 791 /* 792 * Clear the load reservation - otherwise a reservation placed in one 793 * context/process can be used by another, resulting in an SC succeeding 794 * incorrectly. Version 2.2 of the ISA specification explicitly requires 795 * this behaviour, while later revisions say that the kernel "should" use 796 * an SC instruction to force the yielding of a load reservation on a 797 * preemptive context switch. As a result, do both. 798 */ 799 env->load_res = -1; 800 801 if (riscv_has_ext(env, RVH)) { 802 /* Flush the TLB on all virt mode changes. */ 803 if (env->virt_enabled != virt_en) { 804 tlb_flush(env_cpu(env)); 805 } 806 807 env->virt_enabled = virt_en; 808 if (virt_en) { 809 /* 810 * The guest external interrupts from an interrupt controller are 811 * delivered only when the Guest/VM is running (i.e. V=1). This 812 * means any guest external interrupt which is triggered while the 813 * Guest/VM is not running (i.e. V=0) will be missed on QEMU 814 * resulting in guest with sluggish response to serial console 815 * input and other I/O events. 816 * 817 * To solve this, we check and inject interrupt after setting V=1. 818 */ 819 riscv_cpu_update_mip(env, 0, 0); 820 } 821 } 822 } 823 824 /* 825 * get_physical_address_pmp - check PMP permission for this physical address 826 * 827 * Match the PMP region and check permission for this physical address and it's 828 * TLB page. Returns 0 if the permission checking was successful 829 * 830 * @env: CPURISCVState 831 * @prot: The returned protection attributes 832 * @addr: The physical address to be checked permission 833 * @access_type: The type of MMU access 834 * @mode: Indicates current privilege level. 835 */ 836 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr, 837 int size, MMUAccessType access_type, 838 int mode) 839 { 840 pmp_priv_t pmp_priv; 841 bool pmp_has_privs; 842 843 if (!riscv_cpu_cfg(env)->pmp) { 844 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 845 return TRANSLATE_SUCCESS; 846 } 847 848 pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type, 849 &pmp_priv, mode); 850 if (!pmp_has_privs) { 851 *prot = 0; 852 return TRANSLATE_PMP_FAIL; 853 } 854 855 *prot = pmp_priv_to_page_prot(pmp_priv); 856 857 return TRANSLATE_SUCCESS; 858 } 859 860 /* Returns 'true' if a svukte address check is needed */ 861 static bool do_svukte_check(CPURISCVState *env, bool first_stage, 862 int mode, bool virt) 863 { 864 /* Svukte extension depends on Sv39. */ 865 if (!(env_archcpu(env)->cfg.ext_svukte || 866 !first_stage || 867 VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) { 868 return false; 869 } 870 871 /* 872 * Check hstatus.HUKTE if the effective mode is switched to VU-mode by 873 * executing HLV/HLVX/HSV in U-mode. 874 * For other cases, check senvcfg.UKTE. 875 */ 876 if (env->priv == PRV_U && !env->virt_enabled && virt) { 877 if (!get_field(env->hstatus, HSTATUS_HUKTE)) { 878 return false; 879 } 880 } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) { 881 return false; 882 } 883 884 /* 885 * Svukte extension is qualified only in U or VU-mode. 886 * 887 * Effective mode can be switched to U or VU-mode by: 888 * - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode. 889 * - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0. 890 * - U-mode. 891 * - VU-mode. 892 * - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1. 893 */ 894 if (mode != PRV_U) { 895 return false; 896 } 897 898 return true; 899 } 900 901 static bool check_svukte_addr(CPURISCVState *env, vaddr addr) 902 { 903 /* svukte extension excludes RV32 */ 904 uint32_t sxlen = 32 * riscv_cpu_sxl(env); 905 uint64_t high_bit = addr & (1UL << (sxlen - 1)); 906 return !high_bit; 907 } 908 909 /* 910 * get_physical_address - get the physical address for this virtual address 911 * 912 * Do a page table walk to obtain the physical address corresponding to a 913 * virtual address. Returns 0 if the translation was successful 914 * 915 * Adapted from Spike's mmu_t::translate and mmu_t::walk 916 * 917 * @env: CPURISCVState 918 * @physical: This will be set to the calculated physical address 919 * @prot: The returned protection attributes 920 * @addr: The virtual address or guest physical address to be translated 921 * @fault_pte_addr: If not NULL, this will be set to fault pte address 922 * when a error occurs on pte address translation. 923 * This will already be shifted to match htval. 924 * @access_type: The type of MMU access 925 * @mmu_idx: Indicates current privilege level 926 * @first_stage: Are we in first stage translation? 927 * Second stage is used for hypervisor guest translation 928 * @two_stage: Are we going to perform two stage translation 929 * @is_debug: Is this access from a debugger or the monitor? 930 */ 931 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 932 int *ret_prot, vaddr addr, 933 target_ulong *fault_pte_addr, 934 int access_type, int mmu_idx, 935 bool first_stage, bool two_stage, 936 bool is_debug, bool is_probe) 937 { 938 /* 939 * NOTE: the env->pc value visible here will not be 940 * correct, but the value visible to the exception handler 941 * (riscv_cpu_do_interrupt) is correct 942 */ 943 MemTxResult res; 944 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 945 int mode = mmuidx_priv(mmu_idx); 946 bool virt = mmuidx_2stage(mmu_idx); 947 bool use_background = false; 948 hwaddr ppn; 949 int napot_bits = 0; 950 target_ulong napot_mask; 951 bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE); 952 bool sstack_page = false; 953 954 if (do_svukte_check(env, first_stage, mode, virt) && 955 !check_svukte_addr(env, addr)) { 956 return TRANSLATE_FAIL; 957 } 958 959 /* 960 * Check if we should use the background registers for the two 961 * stage translation. We don't need to check if we actually need 962 * two stage translation as that happened before this function 963 * was called. Background registers will be used if the guest has 964 * forced a two stage translation to be on (in HS or M mode). 965 */ 966 if (!env->virt_enabled && two_stage) { 967 use_background = true; 968 } 969 970 if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) { 971 *physical = addr; 972 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 973 return TRANSLATE_SUCCESS; 974 } 975 976 *ret_prot = 0; 977 978 hwaddr base; 979 int levels, ptidxbits, ptesize, vm, widened; 980 981 if (first_stage == true) { 982 if (use_background) { 983 if (riscv_cpu_mxl(env) == MXL_RV32) { 984 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; 985 vm = get_field(env->vsatp, SATP32_MODE); 986 } else { 987 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT; 988 vm = get_field(env->vsatp, SATP64_MODE); 989 } 990 } else { 991 if (riscv_cpu_mxl(env) == MXL_RV32) { 992 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; 993 vm = get_field(env->satp, SATP32_MODE); 994 } else { 995 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT; 996 vm = get_field(env->satp, SATP64_MODE); 997 } 998 } 999 widened = 0; 1000 } else { 1001 if (riscv_cpu_mxl(env) == MXL_RV32) { 1002 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; 1003 vm = get_field(env->hgatp, SATP32_MODE); 1004 } else { 1005 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT; 1006 vm = get_field(env->hgatp, SATP64_MODE); 1007 } 1008 widened = 2; 1009 } 1010 1011 switch (vm) { 1012 case VM_1_10_SV32: 1013 levels = 2; ptidxbits = 10; ptesize = 4; break; 1014 case VM_1_10_SV39: 1015 levels = 3; ptidxbits = 9; ptesize = 8; break; 1016 case VM_1_10_SV48: 1017 levels = 4; ptidxbits = 9; ptesize = 8; break; 1018 case VM_1_10_SV57: 1019 levels = 5; ptidxbits = 9; ptesize = 8; break; 1020 case VM_1_10_MBARE: 1021 *physical = addr; 1022 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 1023 return TRANSLATE_SUCCESS; 1024 default: 1025 g_assert_not_reached(); 1026 } 1027 1028 CPUState *cs = env_cpu(env); 1029 int va_bits = PGSHIFT + levels * ptidxbits + widened; 1030 int sxlen = 16 << riscv_cpu_sxl(env); 1031 int sxlen_bytes = sxlen / 8; 1032 1033 if (first_stage == true) { 1034 target_ulong mask, masked_msbs; 1035 1036 if (sxlen > (va_bits - 1)) { 1037 mask = (1L << (sxlen - (va_bits - 1))) - 1; 1038 } else { 1039 mask = 0; 1040 } 1041 masked_msbs = (addr >> (va_bits - 1)) & mask; 1042 1043 if (masked_msbs != 0 && masked_msbs != mask) { 1044 return TRANSLATE_FAIL; 1045 } 1046 } else { 1047 if (vm != VM_1_10_SV32 && addr >> va_bits != 0) { 1048 return TRANSLATE_FAIL; 1049 } 1050 } 1051 1052 bool pbmte = env->menvcfg & MENVCFG_PBMTE; 1053 bool svade = riscv_cpu_cfg(env)->ext_svade; 1054 bool svadu = riscv_cpu_cfg(env)->ext_svadu; 1055 bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade; 1056 1057 if (first_stage && two_stage && env->virt_enabled) { 1058 pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); 1059 adue = adue && (env->henvcfg & HENVCFG_ADUE); 1060 } 1061 1062 int ptshift = (levels - 1) * ptidxbits; 1063 target_ulong pte; 1064 hwaddr pte_addr; 1065 int i; 1066 1067 #if !TCG_OVERSIZED_GUEST 1068 restart: 1069 #endif 1070 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 1071 target_ulong idx; 1072 if (i == 0) { 1073 idx = (addr >> (PGSHIFT + ptshift)) & 1074 ((1 << (ptidxbits + widened)) - 1); 1075 } else { 1076 idx = (addr >> (PGSHIFT + ptshift)) & 1077 ((1 << ptidxbits) - 1); 1078 } 1079 1080 /* check that physical address of PTE is legal */ 1081 1082 if (two_stage && first_stage) { 1083 int vbase_prot; 1084 hwaddr vbase; 1085 1086 /* Do the second stage translation on the base PTE address. */ 1087 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, 1088 base, NULL, MMU_DATA_LOAD, 1089 MMUIdx_U, false, true, 1090 is_debug, false); 1091 1092 if (vbase_ret != TRANSLATE_SUCCESS) { 1093 if (fault_pte_addr) { 1094 *fault_pte_addr = (base + idx * ptesize) >> 2; 1095 } 1096 return TRANSLATE_G_STAGE_FAIL; 1097 } 1098 1099 pte_addr = vbase + idx * ptesize; 1100 } else { 1101 pte_addr = base + idx * ptesize; 1102 } 1103 1104 int pmp_prot; 1105 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr, 1106 sxlen_bytes, 1107 MMU_DATA_LOAD, PRV_S); 1108 if (pmp_ret != TRANSLATE_SUCCESS) { 1109 return TRANSLATE_PMP_FAIL; 1110 } 1111 1112 if (riscv_cpu_mxl(env) == MXL_RV32) { 1113 pte = address_space_ldl(cs->as, pte_addr, attrs, &res); 1114 } else { 1115 pte = address_space_ldq(cs->as, pte_addr, attrs, &res); 1116 } 1117 1118 if (res != MEMTX_OK) { 1119 return TRANSLATE_FAIL; 1120 } 1121 1122 if (riscv_cpu_sxl(env) == MXL_RV32) { 1123 ppn = pte >> PTE_PPN_SHIFT; 1124 } else { 1125 if (pte & PTE_RESERVED) { 1126 return TRANSLATE_FAIL; 1127 } 1128 1129 if (!pbmte && (pte & PTE_PBMT)) { 1130 return TRANSLATE_FAIL; 1131 } 1132 1133 if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) { 1134 return TRANSLATE_FAIL; 1135 } 1136 1137 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; 1138 } 1139 1140 if (!(pte & PTE_V)) { 1141 /* Invalid PTE */ 1142 return TRANSLATE_FAIL; 1143 } 1144 if (pte & (PTE_R | PTE_W | PTE_X)) { 1145 goto leaf; 1146 } 1147 1148 /* Inner PTE, continue walking */ 1149 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { 1150 return TRANSLATE_FAIL; 1151 } 1152 base = ppn << PGSHIFT; 1153 } 1154 1155 /* No leaf pte at any translation level. */ 1156 return TRANSLATE_FAIL; 1157 1158 leaf: 1159 if (ppn & ((1ULL << ptshift) - 1)) { 1160 /* Misaligned PPN */ 1161 return TRANSLATE_FAIL; 1162 } 1163 if (!pbmte && (pte & PTE_PBMT)) { 1164 /* Reserved without Svpbmt. */ 1165 return TRANSLATE_FAIL; 1166 } 1167 1168 target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X); 1169 /* Check for reserved combinations of RWX flags. */ 1170 switch (rwx) { 1171 case PTE_W | PTE_X: 1172 return TRANSLATE_FAIL; 1173 case PTE_W: 1174 /* if bcfi enabled, PTE_W is not reserved and shadow stack page */ 1175 if (cpu_get_bcfien(env) && first_stage) { 1176 sstack_page = true; 1177 /* 1178 * if ss index, read and write allowed. else if not a probe 1179 * then only read allowed 1180 */ 1181 rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R); 1182 break; 1183 } 1184 return TRANSLATE_FAIL; 1185 case PTE_R: 1186 /* 1187 * no matter what's the `access_type`, shadow stack access to readonly 1188 * memory are always store page faults. During unwind, loads will be 1189 * promoted as store fault. 1190 */ 1191 if (is_sstack_idx) { 1192 return TRANSLATE_FAIL; 1193 } 1194 break; 1195 } 1196 1197 int prot = 0; 1198 if (rwx & PTE_R) { 1199 prot |= PAGE_READ; 1200 } 1201 if (rwx & PTE_W) { 1202 prot |= PAGE_WRITE; 1203 } 1204 if (rwx & PTE_X) { 1205 bool mxr = false; 1206 1207 /* 1208 * Use mstatus for first stage or for the second stage without 1209 * virt_enabled (MPRV+MPV) 1210 */ 1211 if (first_stage || !env->virt_enabled) { 1212 mxr = get_field(env->mstatus, MSTATUS_MXR); 1213 } 1214 1215 /* MPRV+MPV case, check VSSTATUS */ 1216 if (first_stage && two_stage && !env->virt_enabled) { 1217 mxr |= get_field(env->vsstatus, MSTATUS_MXR); 1218 } 1219 1220 /* 1221 * Setting MXR at HS-level overrides both VS-stage and G-stage 1222 * execute-only permissions 1223 */ 1224 if (env->virt_enabled) { 1225 mxr |= get_field(env->mstatus_hs, MSTATUS_MXR); 1226 } 1227 1228 if (mxr) { 1229 prot |= PAGE_READ; 1230 } 1231 prot |= PAGE_EXEC; 1232 } 1233 1234 if (pte & PTE_U) { 1235 if (mode != PRV_U) { 1236 if (!mmuidx_sum(mmu_idx)) { 1237 return TRANSLATE_FAIL; 1238 } 1239 /* SUM allows only read+write, not execute. */ 1240 prot &= PAGE_READ | PAGE_WRITE; 1241 } 1242 } else if (mode != PRV_S) { 1243 /* Supervisor PTE flags when not S mode */ 1244 return TRANSLATE_FAIL; 1245 } 1246 1247 if (!((prot >> access_type) & 1)) { 1248 /* 1249 * Access check failed, access check failures for shadow stack are 1250 * access faults. 1251 */ 1252 return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL; 1253 } 1254 1255 target_ulong updated_pte = pte; 1256 1257 /* 1258 * If ADUE is enabled, set accessed and dirty bits. 1259 * Otherwise raise an exception if necessary. 1260 */ 1261 if (adue) { 1262 updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0); 1263 } else if (!(pte & PTE_A) || 1264 (access_type == MMU_DATA_STORE && !(pte & PTE_D))) { 1265 return TRANSLATE_FAIL; 1266 } 1267 1268 /* Page table updates need to be atomic with MTTCG enabled */ 1269 if (updated_pte != pte && !is_debug) { 1270 if (!adue) { 1271 return TRANSLATE_FAIL; 1272 } 1273 1274 /* 1275 * - if accessed or dirty bits need updating, and the PTE is 1276 * in RAM, then we do so atomically with a compare and swap. 1277 * - if the PTE is in IO space or ROM, then it can't be updated 1278 * and we return TRANSLATE_FAIL. 1279 * - if the PTE changed by the time we went to update it, then 1280 * it is no longer valid and we must re-walk the page table. 1281 */ 1282 MemoryRegion *mr; 1283 hwaddr l = sxlen_bytes, addr1; 1284 mr = address_space_translate(cs->as, pte_addr, &addr1, &l, 1285 false, MEMTXATTRS_UNSPECIFIED); 1286 if (memory_region_is_ram(mr)) { 1287 target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1); 1288 #if TCG_OVERSIZED_GUEST 1289 /* 1290 * MTTCG is not enabled on oversized TCG guests so 1291 * page table updates do not need to be atomic 1292 */ 1293 *pte_pa = pte = updated_pte; 1294 #else 1295 target_ulong old_pte; 1296 if (riscv_cpu_sxl(env) == MXL_RV32) { 1297 old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte); 1298 } else { 1299 old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte); 1300 } 1301 if (old_pte != pte) { 1302 goto restart; 1303 } 1304 pte = updated_pte; 1305 #endif 1306 } else { 1307 /* 1308 * Misconfigured PTE in ROM (AD bits are not preset) or 1309 * PTE is in IO space and can't be updated atomically. 1310 */ 1311 return TRANSLATE_FAIL; 1312 } 1313 } 1314 1315 /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */ 1316 target_ulong vpn = addr >> PGSHIFT; 1317 1318 if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) { 1319 napot_bits = ctzl(ppn) + 1; 1320 if ((i != (levels - 1)) || (napot_bits != 4)) { 1321 return TRANSLATE_FAIL; 1322 } 1323 } 1324 1325 napot_mask = (1 << napot_bits) - 1; 1326 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | 1327 (vpn & (((target_ulong)1 << ptshift) - 1)) 1328 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); 1329 1330 /* 1331 * Remove write permission unless this is a store, or the page is 1332 * already dirty, so that we TLB miss on later writes to update 1333 * the dirty bit. 1334 */ 1335 if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) { 1336 prot &= ~PAGE_WRITE; 1337 } 1338 *ret_prot = prot; 1339 1340 return TRANSLATE_SUCCESS; 1341 } 1342 1343 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 1344 MMUAccessType access_type, bool pmp_violation, 1345 bool first_stage, bool two_stage, 1346 bool two_stage_indirect) 1347 { 1348 CPUState *cs = env_cpu(env); 1349 1350 switch (access_type) { 1351 case MMU_INST_FETCH: 1352 if (pmp_violation) { 1353 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT; 1354 } else if (env->virt_enabled && !first_stage) { 1355 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; 1356 } else { 1357 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; 1358 } 1359 break; 1360 case MMU_DATA_LOAD: 1361 if (pmp_violation) { 1362 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 1363 } else if (two_stage && !first_stage) { 1364 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; 1365 } else { 1366 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; 1367 } 1368 break; 1369 case MMU_DATA_STORE: 1370 if (pmp_violation) { 1371 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1372 } else if (two_stage && !first_stage) { 1373 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 1374 } else { 1375 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; 1376 } 1377 break; 1378 default: 1379 g_assert_not_reached(); 1380 } 1381 env->badaddr = address; 1382 env->two_stage_lookup = two_stage; 1383 env->two_stage_indirect_lookup = two_stage_indirect; 1384 } 1385 1386 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1387 { 1388 RISCVCPU *cpu = RISCV_CPU(cs); 1389 CPURISCVState *env = &cpu->env; 1390 hwaddr phys_addr; 1391 int prot; 1392 int mmu_idx = riscv_env_mmu_index(&cpu->env, false); 1393 1394 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, 1395 true, env->virt_enabled, true, false)) { 1396 return -1; 1397 } 1398 1399 if (env->virt_enabled) { 1400 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, 1401 0, MMUIdx_U, false, true, true, false)) { 1402 return -1; 1403 } 1404 } 1405 1406 return phys_addr & TARGET_PAGE_MASK; 1407 } 1408 1409 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 1410 vaddr addr, unsigned size, 1411 MMUAccessType access_type, 1412 int mmu_idx, MemTxAttrs attrs, 1413 MemTxResult response, uintptr_t retaddr) 1414 { 1415 RISCVCPU *cpu = RISCV_CPU(cs); 1416 CPURISCVState *env = &cpu->env; 1417 1418 if (access_type == MMU_DATA_STORE) { 1419 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1420 } else if (access_type == MMU_DATA_LOAD) { 1421 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 1422 } else { 1423 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT; 1424 } 1425 1426 env->badaddr = addr; 1427 env->two_stage_lookup = mmuidx_2stage(mmu_idx); 1428 env->two_stage_indirect_lookup = false; 1429 cpu_loop_exit_restore(cs, retaddr); 1430 } 1431 1432 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 1433 MMUAccessType access_type, int mmu_idx, 1434 uintptr_t retaddr) 1435 { 1436 RISCVCPU *cpu = RISCV_CPU(cs); 1437 CPURISCVState *env = &cpu->env; 1438 switch (access_type) { 1439 case MMU_INST_FETCH: 1440 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 1441 break; 1442 case MMU_DATA_LOAD: 1443 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 1444 /* shadow stack mis aligned accesses are access faults */ 1445 if (mmu_idx & MMU_IDX_SS_WRITE) { 1446 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 1447 } 1448 break; 1449 case MMU_DATA_STORE: 1450 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 1451 /* shadow stack mis aligned accesses are access faults */ 1452 if (mmu_idx & MMU_IDX_SS_WRITE) { 1453 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1454 } 1455 break; 1456 default: 1457 g_assert_not_reached(); 1458 } 1459 env->badaddr = addr; 1460 env->two_stage_lookup = mmuidx_2stage(mmu_idx); 1461 env->two_stage_indirect_lookup = false; 1462 cpu_loop_exit_restore(cs, retaddr); 1463 } 1464 1465 1466 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type) 1467 { 1468 enum riscv_pmu_event_idx pmu_event_type; 1469 1470 switch (access_type) { 1471 case MMU_INST_FETCH: 1472 pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS; 1473 break; 1474 case MMU_DATA_LOAD: 1475 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS; 1476 break; 1477 case MMU_DATA_STORE: 1478 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS; 1479 break; 1480 default: 1481 return; 1482 } 1483 1484 riscv_pmu_incr_ctr(cpu, pmu_event_type); 1485 } 1486 1487 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 1488 MMUAccessType access_type, int mmu_idx, 1489 bool probe, uintptr_t retaddr) 1490 { 1491 RISCVCPU *cpu = RISCV_CPU(cs); 1492 CPURISCVState *env = &cpu->env; 1493 vaddr im_address; 1494 hwaddr pa = 0; 1495 int prot, prot2, prot_pmp; 1496 bool pmp_violation = false; 1497 bool first_stage_error = true; 1498 bool two_stage_lookup = mmuidx_2stage(mmu_idx); 1499 bool two_stage_indirect_error = false; 1500 int ret = TRANSLATE_FAIL; 1501 int mode = mmuidx_priv(mmu_idx); 1502 /* default TLB page size */ 1503 hwaddr tlb_size = TARGET_PAGE_SIZE; 1504 1505 env->guest_phys_fault_addr = 0; 1506 1507 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 1508 __func__, address, access_type, mmu_idx); 1509 1510 pmu_tlb_fill_incr_ctr(cpu, access_type); 1511 if (two_stage_lookup) { 1512 /* Two stage lookup */ 1513 ret = get_physical_address(env, &pa, &prot, address, 1514 &env->guest_phys_fault_addr, access_type, 1515 mmu_idx, true, true, false, probe); 1516 1517 /* 1518 * A G-stage exception may be triggered during two state lookup. 1519 * And the env->guest_phys_fault_addr has already been set in 1520 * get_physical_address(). 1521 */ 1522 if (ret == TRANSLATE_G_STAGE_FAIL) { 1523 first_stage_error = false; 1524 two_stage_indirect_error = true; 1525 } 1526 1527 qemu_log_mask(CPU_LOG_MMU, 1528 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " 1529 HWADDR_FMT_plx " prot %d\n", 1530 __func__, address, ret, pa, prot); 1531 1532 if (ret == TRANSLATE_SUCCESS) { 1533 /* Second stage lookup */ 1534 im_address = pa; 1535 1536 ret = get_physical_address(env, &pa, &prot2, im_address, NULL, 1537 access_type, MMUIdx_U, false, true, 1538 false, probe); 1539 1540 qemu_log_mask(CPU_LOG_MMU, 1541 "%s 2nd-stage address=%" VADDR_PRIx 1542 " ret %d physical " 1543 HWADDR_FMT_plx " prot %d\n", 1544 __func__, im_address, ret, pa, prot2); 1545 1546 prot &= prot2; 1547 1548 if (ret == TRANSLATE_SUCCESS) { 1549 ret = get_physical_address_pmp(env, &prot_pmp, pa, 1550 size, access_type, mode); 1551 tlb_size = pmp_get_tlb_size(env, pa); 1552 1553 qemu_log_mask(CPU_LOG_MMU, 1554 "%s PMP address=" HWADDR_FMT_plx " ret %d prot" 1555 " %d tlb_size %" HWADDR_PRIu "\n", 1556 __func__, pa, ret, prot_pmp, tlb_size); 1557 1558 prot &= prot_pmp; 1559 } else { 1560 /* 1561 * Guest physical address translation failed, this is a HS 1562 * level exception 1563 */ 1564 first_stage_error = false; 1565 if (ret != TRANSLATE_PMP_FAIL) { 1566 env->guest_phys_fault_addr = (im_address | 1567 (address & 1568 (TARGET_PAGE_SIZE - 1))) >> 2; 1569 } 1570 } 1571 } 1572 } else { 1573 /* Single stage lookup */ 1574 ret = get_physical_address(env, &pa, &prot, address, NULL, 1575 access_type, mmu_idx, true, false, false, 1576 probe); 1577 1578 qemu_log_mask(CPU_LOG_MMU, 1579 "%s address=%" VADDR_PRIx " ret %d physical " 1580 HWADDR_FMT_plx " prot %d\n", 1581 __func__, address, ret, pa, prot); 1582 1583 if (ret == TRANSLATE_SUCCESS) { 1584 ret = get_physical_address_pmp(env, &prot_pmp, pa, 1585 size, access_type, mode); 1586 tlb_size = pmp_get_tlb_size(env, pa); 1587 1588 qemu_log_mask(CPU_LOG_MMU, 1589 "%s PMP address=" HWADDR_FMT_plx " ret %d prot" 1590 " %d tlb_size %" HWADDR_PRIu "\n", 1591 __func__, pa, ret, prot_pmp, tlb_size); 1592 1593 prot &= prot_pmp; 1594 } 1595 } 1596 1597 if (ret == TRANSLATE_PMP_FAIL) { 1598 pmp_violation = true; 1599 } 1600 1601 if (ret == TRANSLATE_SUCCESS) { 1602 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), 1603 prot, mmu_idx, tlb_size); 1604 return true; 1605 } else if (probe) { 1606 return false; 1607 } else { 1608 raise_mmu_exception(env, address, access_type, pmp_violation, 1609 first_stage_error, two_stage_lookup, 1610 two_stage_indirect_error); 1611 cpu_loop_exit_restore(cs, retaddr); 1612 } 1613 1614 return true; 1615 } 1616 1617 static target_ulong riscv_transformed_insn(CPURISCVState *env, 1618 target_ulong insn, 1619 target_ulong taddr) 1620 { 1621 target_ulong xinsn = 0; 1622 target_ulong access_rs1 = 0, access_imm = 0, access_size = 0; 1623 1624 /* 1625 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to 1626 * be uncompressed. The Quadrant 1 of RVC instruction space need 1627 * not be transformed because these instructions won't generate 1628 * any load/store trap. 1629 */ 1630 1631 if ((insn & 0x3) != 0x3) { 1632 /* Transform 16bit instruction into 32bit instruction */ 1633 switch (GET_C_OP(insn)) { 1634 case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */ 1635 switch (GET_C_FUNC(insn)) { 1636 case OPC_RISC_C_FUNC_FLD_LQ: 1637 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */ 1638 xinsn = OPC_RISC_FLD; 1639 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1640 access_rs1 = GET_C_RS1S(insn); 1641 access_imm = GET_C_LD_IMM(insn); 1642 access_size = 8; 1643 } 1644 break; 1645 case OPC_RISC_C_FUNC_LW: /* C.LW */ 1646 xinsn = OPC_RISC_LW; 1647 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1648 access_rs1 = GET_C_RS1S(insn); 1649 access_imm = GET_C_LW_IMM(insn); 1650 access_size = 4; 1651 break; 1652 case OPC_RISC_C_FUNC_FLW_LD: 1653 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */ 1654 xinsn = OPC_RISC_FLW; 1655 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1656 access_rs1 = GET_C_RS1S(insn); 1657 access_imm = GET_C_LW_IMM(insn); 1658 access_size = 4; 1659 } else { /* C.LD (RV64/RV128) */ 1660 xinsn = OPC_RISC_LD; 1661 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1662 access_rs1 = GET_C_RS1S(insn); 1663 access_imm = GET_C_LD_IMM(insn); 1664 access_size = 8; 1665 } 1666 break; 1667 case OPC_RISC_C_FUNC_FSD_SQ: 1668 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */ 1669 xinsn = OPC_RISC_FSD; 1670 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1671 access_rs1 = GET_C_RS1S(insn); 1672 access_imm = GET_C_SD_IMM(insn); 1673 access_size = 8; 1674 } 1675 break; 1676 case OPC_RISC_C_FUNC_SW: /* C.SW */ 1677 xinsn = OPC_RISC_SW; 1678 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1679 access_rs1 = GET_C_RS1S(insn); 1680 access_imm = GET_C_SW_IMM(insn); 1681 access_size = 4; 1682 break; 1683 case OPC_RISC_C_FUNC_FSW_SD: 1684 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */ 1685 xinsn = OPC_RISC_FSW; 1686 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1687 access_rs1 = GET_C_RS1S(insn); 1688 access_imm = GET_C_SW_IMM(insn); 1689 access_size = 4; 1690 } else { /* C.SD (RV64/RV128) */ 1691 xinsn = OPC_RISC_SD; 1692 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1693 access_rs1 = GET_C_RS1S(insn); 1694 access_imm = GET_C_SD_IMM(insn); 1695 access_size = 8; 1696 } 1697 break; 1698 default: 1699 break; 1700 } 1701 break; 1702 case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */ 1703 switch (GET_C_FUNC(insn)) { 1704 case OPC_RISC_C_FUNC_FLDSP_LQSP: 1705 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */ 1706 xinsn = OPC_RISC_FLD; 1707 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1708 access_rs1 = 2; 1709 access_imm = GET_C_LDSP_IMM(insn); 1710 access_size = 8; 1711 } 1712 break; 1713 case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */ 1714 xinsn = OPC_RISC_LW; 1715 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1716 access_rs1 = 2; 1717 access_imm = GET_C_LWSP_IMM(insn); 1718 access_size = 4; 1719 break; 1720 case OPC_RISC_C_FUNC_FLWSP_LDSP: 1721 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */ 1722 xinsn = OPC_RISC_FLW; 1723 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1724 access_rs1 = 2; 1725 access_imm = GET_C_LWSP_IMM(insn); 1726 access_size = 4; 1727 } else { /* C.LDSP (RV64/RV128) */ 1728 xinsn = OPC_RISC_LD; 1729 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1730 access_rs1 = 2; 1731 access_imm = GET_C_LDSP_IMM(insn); 1732 access_size = 8; 1733 } 1734 break; 1735 case OPC_RISC_C_FUNC_FSDSP_SQSP: 1736 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */ 1737 xinsn = OPC_RISC_FSD; 1738 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1739 access_rs1 = 2; 1740 access_imm = GET_C_SDSP_IMM(insn); 1741 access_size = 8; 1742 } 1743 break; 1744 case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */ 1745 xinsn = OPC_RISC_SW; 1746 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1747 access_rs1 = 2; 1748 access_imm = GET_C_SWSP_IMM(insn); 1749 access_size = 4; 1750 break; 1751 case 7: 1752 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */ 1753 xinsn = OPC_RISC_FSW; 1754 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1755 access_rs1 = 2; 1756 access_imm = GET_C_SWSP_IMM(insn); 1757 access_size = 4; 1758 } else { /* C.SDSP (RV64/RV128) */ 1759 xinsn = OPC_RISC_SD; 1760 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1761 access_rs1 = 2; 1762 access_imm = GET_C_SDSP_IMM(insn); 1763 access_size = 8; 1764 } 1765 break; 1766 default: 1767 break; 1768 } 1769 break; 1770 default: 1771 break; 1772 } 1773 1774 /* 1775 * Clear Bit1 of transformed instruction to indicate that 1776 * original insruction was a 16bit instruction 1777 */ 1778 xinsn &= ~((target_ulong)0x2); 1779 } else { 1780 /* Transform 32bit (or wider) instructions */ 1781 switch (MASK_OP_MAJOR(insn)) { 1782 case OPC_RISC_ATOMIC: 1783 xinsn = insn; 1784 access_rs1 = GET_RS1(insn); 1785 access_size = 1 << GET_FUNCT3(insn); 1786 break; 1787 case OPC_RISC_LOAD: 1788 case OPC_RISC_FP_LOAD: 1789 xinsn = SET_I_IMM(insn, 0); 1790 access_rs1 = GET_RS1(insn); 1791 access_imm = GET_IMM(insn); 1792 access_size = 1 << GET_FUNCT3(insn); 1793 break; 1794 case OPC_RISC_STORE: 1795 case OPC_RISC_FP_STORE: 1796 xinsn = SET_S_IMM(insn, 0); 1797 access_rs1 = GET_RS1(insn); 1798 access_imm = GET_STORE_IMM(insn); 1799 access_size = 1 << GET_FUNCT3(insn); 1800 break; 1801 case OPC_RISC_SYSTEM: 1802 if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) { 1803 xinsn = insn; 1804 access_rs1 = GET_RS1(insn); 1805 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3); 1806 access_size = 1 << access_size; 1807 } 1808 break; 1809 default: 1810 break; 1811 } 1812 } 1813 1814 if (access_size) { 1815 xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) & 1816 (access_size - 1)); 1817 } 1818 1819 return xinsn; 1820 } 1821 1822 static target_ulong promote_load_fault(target_ulong orig_cause) 1823 { 1824 switch (orig_cause) { 1825 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 1826 return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 1827 1828 case RISCV_EXCP_LOAD_ACCESS_FAULT: 1829 return RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1830 1831 case RISCV_EXCP_LOAD_PAGE_FAULT: 1832 return RISCV_EXCP_STORE_PAGE_FAULT; 1833 } 1834 1835 /* if no promotion, return original cause */ 1836 return orig_cause; 1837 } 1838 /* 1839 * Handle Traps 1840 * 1841 * Adapted from Spike's processor_t::take_trap. 1842 * 1843 */ 1844 void riscv_cpu_do_interrupt(CPUState *cs) 1845 { 1846 RISCVCPU *cpu = RISCV_CPU(cs); 1847 CPURISCVState *env = &cpu->env; 1848 bool virt = env->virt_enabled; 1849 bool write_gva = false; 1850 bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO); 1851 uint64_t s; 1852 1853 /* 1854 * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 1855 * so we mask off the MSB and separate into trap type and cause. 1856 */ 1857 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 1858 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 1859 uint64_t deleg = async ? env->mideleg : env->medeleg; 1860 bool s_injected = env->mvip & (1ULL << cause) & env->mvien && 1861 !(env->mip & (1ULL << cause)); 1862 bool vs_injected = env->hvip & (1ULL << cause) & env->hvien && 1863 !(env->mip & (1ULL << cause)); 1864 target_ulong tval = 0; 1865 target_ulong tinst = 0; 1866 target_ulong htval = 0; 1867 target_ulong mtval2 = 0; 1868 int sxlen = 0; 1869 int mxlen = 0; 1870 1871 if (!async) { 1872 /* set tval to badaddr for traps with address information */ 1873 switch (cause) { 1874 #ifdef CONFIG_TCG 1875 case RISCV_EXCP_SEMIHOST: 1876 do_common_semihosting(cs); 1877 env->pc += 4; 1878 return; 1879 #endif 1880 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 1881 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: 1882 case RISCV_EXCP_LOAD_ADDR_MIS: 1883 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 1884 case RISCV_EXCP_LOAD_ACCESS_FAULT: 1885 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 1886 case RISCV_EXCP_LOAD_PAGE_FAULT: 1887 case RISCV_EXCP_STORE_PAGE_FAULT: 1888 if (always_storeamo) { 1889 cause = promote_load_fault(cause); 1890 } 1891 write_gva = env->two_stage_lookup; 1892 tval = env->badaddr; 1893 if (env->two_stage_indirect_lookup) { 1894 /* 1895 * special pseudoinstruction for G-stage fault taken while 1896 * doing VS-stage page table walk. 1897 */ 1898 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1899 } else { 1900 /* 1901 * The "Addr. Offset" field in transformed instruction is 1902 * non-zero only for misaligned access. 1903 */ 1904 tinst = riscv_transformed_insn(env, env->bins, tval); 1905 } 1906 break; 1907 case RISCV_EXCP_INST_GUEST_PAGE_FAULT: 1908 case RISCV_EXCP_INST_ADDR_MIS: 1909 case RISCV_EXCP_INST_ACCESS_FAULT: 1910 case RISCV_EXCP_INST_PAGE_FAULT: 1911 write_gva = env->two_stage_lookup; 1912 tval = env->badaddr; 1913 if (env->two_stage_indirect_lookup) { 1914 /* 1915 * special pseudoinstruction for G-stage fault taken while 1916 * doing VS-stage page table walk. 1917 */ 1918 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1919 } 1920 break; 1921 case RISCV_EXCP_ILLEGAL_INST: 1922 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT: 1923 tval = env->bins; 1924 break; 1925 case RISCV_EXCP_BREAKPOINT: 1926 tval = env->badaddr; 1927 if (cs->watchpoint_hit) { 1928 tval = cs->watchpoint_hit->hitaddr; 1929 cs->watchpoint_hit = NULL; 1930 } 1931 break; 1932 case RISCV_EXCP_SW_CHECK: 1933 tval = env->sw_check_code; 1934 break; 1935 default: 1936 break; 1937 } 1938 /* ecall is dispatched as one cause so translate based on mode */ 1939 if (cause == RISCV_EXCP_U_ECALL) { 1940 assert(env->priv <= 3); 1941 1942 if (env->priv == PRV_M) { 1943 cause = RISCV_EXCP_M_ECALL; 1944 } else if (env->priv == PRV_S && env->virt_enabled) { 1945 cause = RISCV_EXCP_VS_ECALL; 1946 } else if (env->priv == PRV_S && !env->virt_enabled) { 1947 cause = RISCV_EXCP_S_ECALL; 1948 } else if (env->priv == PRV_U) { 1949 cause = RISCV_EXCP_U_ECALL; 1950 } 1951 } 1952 } 1953 1954 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, 1955 riscv_cpu_get_trap_name(cause, async)); 1956 1957 qemu_log_mask(CPU_LOG_INT, 1958 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", " 1959 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n", 1960 __func__, env->mhartid, async, cause, env->pc, tval, 1961 riscv_cpu_get_trap_name(cause, async)); 1962 1963 if (env->priv <= PRV_S && cause < 64 && 1964 (((deleg >> cause) & 1) || s_injected || vs_injected)) { 1965 /* handle the trap in S-mode */ 1966 /* save elp status */ 1967 if (cpu_get_fcfien(env)) { 1968 env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp); 1969 } 1970 1971 if (riscv_has_ext(env, RVH)) { 1972 uint64_t hdeleg = async ? env->hideleg : env->hedeleg; 1973 1974 if (env->virt_enabled && 1975 (((hdeleg >> cause) & 1) || vs_injected)) { 1976 /* Trap to VS mode */ 1977 /* 1978 * See if we need to adjust cause. Yes if its VS mode interrupt 1979 * no if hypervisor has delegated one of hs mode's interrupt 1980 */ 1981 if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || 1982 cause == IRQ_VS_EXT)) { 1983 cause = cause - 1; 1984 } 1985 write_gva = false; 1986 } else if (env->virt_enabled) { 1987 /* Trap into HS mode, from virt */ 1988 riscv_cpu_swap_hypervisor_regs(env); 1989 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP, 1990 env->priv); 1991 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true); 1992 1993 htval = env->guest_phys_fault_addr; 1994 1995 virt = false; 1996 } else { 1997 /* Trap into HS mode */ 1998 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); 1999 htval = env->guest_phys_fault_addr; 2000 } 2001 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva); 2002 } 2003 2004 s = env->mstatus; 2005 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); 2006 s = set_field(s, MSTATUS_SPP, env->priv); 2007 s = set_field(s, MSTATUS_SIE, 0); 2008 env->mstatus = s; 2009 sxlen = 16 << riscv_cpu_sxl(env); 2010 env->scause = cause | ((target_ulong)async << (sxlen - 1)); 2011 env->sepc = env->pc; 2012 env->stval = tval; 2013 env->htval = htval; 2014 env->htinst = tinst; 2015 env->pc = (env->stvec >> 2 << 2) + 2016 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 2017 riscv_cpu_set_mode(env, PRV_S, virt); 2018 } else { 2019 /* handle the trap in M-mode */ 2020 /* save elp status */ 2021 if (cpu_get_fcfien(env)) { 2022 env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp); 2023 } 2024 2025 if (riscv_has_ext(env, RVH)) { 2026 if (env->virt_enabled) { 2027 riscv_cpu_swap_hypervisor_regs(env); 2028 } 2029 env->mstatus = set_field(env->mstatus, MSTATUS_MPV, 2030 env->virt_enabled); 2031 if (env->virt_enabled && tval) { 2032 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1); 2033 } 2034 2035 mtval2 = env->guest_phys_fault_addr; 2036 2037 /* Trapping to M mode, virt is disabled */ 2038 virt = false; 2039 } 2040 2041 s = env->mstatus; 2042 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); 2043 s = set_field(s, MSTATUS_MPP, env->priv); 2044 s = set_field(s, MSTATUS_MIE, 0); 2045 env->mstatus = s; 2046 mxlen = 16 << riscv_cpu_mxl(env); 2047 env->mcause = cause | ((target_ulong)async << (mxlen - 1)); 2048 env->mepc = env->pc; 2049 env->mtval = tval; 2050 env->mtval2 = mtval2; 2051 env->mtinst = tinst; 2052 env->pc = (env->mtvec >> 2 << 2) + 2053 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 2054 riscv_cpu_set_mode(env, PRV_M, virt); 2055 } 2056 2057 /* 2058 * Interrupt/exception/trap delivery is asynchronous event and as per 2059 * zicfilp spec CPU should clear up the ELP state. No harm in clearing 2060 * unconditionally. 2061 */ 2062 env->elp = false; 2063 2064 /* 2065 * NOTE: it is not necessary to yield load reservations here. It is only 2066 * necessary for an SC from "another hart" to cause a load reservation 2067 * to be yielded. Refer to the memory consistency model section of the 2068 * RISC-V ISA Specification. 2069 */ 2070 2071 env->two_stage_lookup = false; 2072 env->two_stage_indirect_lookup = false; 2073 } 2074 2075 #endif /* !CONFIG_USER_ONLY */ 2076