1 /* 2 * RISC-V Emulation Helpers for QEMU. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * Copyright (c) 2022 VRULL GmbH 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "internals.h" 24 #include "exec/exec-all.h" 25 #include "exec/cputlb.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/helper-proto.h" 28 #include "trace.h" 29 30 /* Exceptions processing helpers */ 31 G_NORETURN void riscv_raise_exception(CPURISCVState *env, 32 RISCVException exception, 33 uintptr_t pc) 34 { 35 CPUState *cs = env_cpu(env); 36 37 trace_riscv_exception(exception, 38 riscv_cpu_get_trap_name(exception, false), 39 env->pc); 40 41 cs->exception_index = exception; 42 cpu_loop_exit_restore(cs, pc); 43 } 44 45 void helper_raise_exception(CPURISCVState *env, uint32_t exception) 46 { 47 riscv_raise_exception(env, exception, 0); 48 } 49 50 target_ulong helper_csrr(CPURISCVState *env, int csr) 51 { 52 /* 53 * The seed CSR must be accessed with a read-write instruction. A 54 * read-only instruction such as CSRRS/CSRRC with rs1=x0 or CSRRSI/ 55 * CSRRCI with uimm=0 will raise an illegal instruction exception. 56 */ 57 if (csr == CSR_SEED) { 58 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 59 } 60 61 target_ulong val = 0; 62 RISCVException ret = riscv_csrr(env, csr, &val); 63 64 if (ret != RISCV_EXCP_NONE) { 65 riscv_raise_exception(env, ret, GETPC()); 66 } 67 return val; 68 } 69 70 void helper_csrw(CPURISCVState *env, int csr, target_ulong src) 71 { 72 target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1; 73 RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask); 74 75 if (ret != RISCV_EXCP_NONE) { 76 riscv_raise_exception(env, ret, GETPC()); 77 } 78 } 79 80 target_ulong helper_csrrw(CPURISCVState *env, int csr, 81 target_ulong src, target_ulong write_mask) 82 { 83 target_ulong val = 0; 84 RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask); 85 86 if (ret != RISCV_EXCP_NONE) { 87 riscv_raise_exception(env, ret, GETPC()); 88 } 89 return val; 90 } 91 92 target_ulong helper_csrr_i128(CPURISCVState *env, int csr) 93 { 94 Int128 rv = int128_zero(); 95 RISCVException ret = riscv_csrr_i128(env, csr, &rv); 96 97 if (ret != RISCV_EXCP_NONE) { 98 riscv_raise_exception(env, ret, GETPC()); 99 } 100 101 env->retxh = int128_gethi(rv); 102 return int128_getlo(rv); 103 } 104 105 void helper_csrw_i128(CPURISCVState *env, int csr, 106 target_ulong srcl, target_ulong srch) 107 { 108 RISCVException ret = riscv_csrrw_i128(env, csr, NULL, 109 int128_make128(srcl, srch), 110 UINT128_MAX); 111 112 if (ret != RISCV_EXCP_NONE) { 113 riscv_raise_exception(env, ret, GETPC()); 114 } 115 } 116 117 target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, 118 target_ulong srcl, target_ulong srch, 119 target_ulong maskl, target_ulong maskh) 120 { 121 Int128 rv = int128_zero(); 122 RISCVException ret = riscv_csrrw_i128(env, csr, &rv, 123 int128_make128(srcl, srch), 124 int128_make128(maskl, maskh)); 125 126 if (ret != RISCV_EXCP_NONE) { 127 riscv_raise_exception(env, ret, GETPC()); 128 } 129 130 env->retxh = int128_gethi(rv); 131 return int128_getlo(rv); 132 } 133 134 135 /* 136 * check_zicbo_envcfg 137 * 138 * Raise virtual exceptions and illegal instruction exceptions for 139 * Zicbo[mz] instructions based on the settings of [mhs]envcfg as 140 * specified in section 2.5.1 of the CMO specification. 141 */ 142 static void check_zicbo_envcfg(CPURISCVState *env, target_ulong envbits, 143 uintptr_t ra) 144 { 145 #ifndef CONFIG_USER_ONLY 146 if ((env->priv < PRV_M) && !get_field(env->menvcfg, envbits)) { 147 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); 148 } 149 150 if (env->virt_enabled && 151 (((env->priv <= PRV_S) && !get_field(env->henvcfg, envbits)) || 152 ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)))) { 153 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra); 154 } 155 156 if ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)) { 157 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); 158 } 159 #endif 160 } 161 162 void helper_cbo_zero(CPURISCVState *env, target_ulong address) 163 { 164 RISCVCPU *cpu = env_archcpu(env); 165 uint16_t cbozlen = cpu->cfg.cboz_blocksize; 166 int mmu_idx = riscv_env_mmu_index(env, false); 167 uintptr_t ra = GETPC(); 168 void *mem; 169 170 check_zicbo_envcfg(env, MENVCFG_CBZE, ra); 171 172 /* Mask off low-bits to align-down to the cache-block. */ 173 address &= ~(cbozlen - 1); 174 175 /* 176 * cbo.zero requires MMU_DATA_STORE access. Do a probe_write() 177 * to raise any exceptions, including PMP. 178 */ 179 mem = probe_write(env, address, cbozlen, mmu_idx, ra); 180 181 if (likely(mem)) { 182 memset(mem, 0, cbozlen); 183 } else { 184 /* 185 * This means that we're dealing with an I/O page. Section 4.2 186 * of cmobase v1.0.1 says: 187 * 188 * "Cache-block zero instructions store zeros independently 189 * of whether data from the underlying memory locations are 190 * cacheable." 191 * 192 * Write zeros in address + cbozlen regardless of not being 193 * a RAM page. 194 */ 195 for (int i = 0; i < cbozlen; i++) { 196 cpu_stb_mmuidx_ra(env, address + i, 0, mmu_idx, ra); 197 } 198 } 199 } 200 201 /* 202 * check_zicbom_access 203 * 204 * Check access permissions (LOAD, STORE or FETCH as specified in 205 * section 2.5.2 of the CMO specification) for Zicbom, raising 206 * either store page-fault (non-virtualized) or store guest-page 207 * fault (virtualized). 208 */ 209 static void check_zicbom_access(CPURISCVState *env, 210 target_ulong address, 211 uintptr_t ra) 212 { 213 RISCVCPU *cpu = env_archcpu(env); 214 int mmu_idx = riscv_env_mmu_index(env, false); 215 uint16_t cbomlen = cpu->cfg.cbom_blocksize; 216 void *phost; 217 int ret; 218 219 /* Mask off low-bits to align-down to the cache-block. */ 220 address &= ~(cbomlen - 1); 221 222 /* 223 * Section 2.5.2 of cmobase v1.0.1: 224 * 225 * "A cache-block management instruction is permitted to 226 * access the specified cache block whenever a load instruction 227 * or store instruction is permitted to access the corresponding 228 * physical addresses. If neither a load instruction nor store 229 * instruction is permitted to access the physical addresses, 230 * but an instruction fetch is permitted to access the physical 231 * addresses, whether a cache-block management instruction is 232 * permitted to access the cache block is UNSPECIFIED." 233 */ 234 ret = probe_access_flags(env, address, cbomlen, MMU_DATA_LOAD, 235 mmu_idx, true, &phost, ra); 236 if (ret != TLB_INVALID_MASK) { 237 /* Success: readable */ 238 return; 239 } 240 241 /* 242 * Since not readable, must be writable. On failure, store 243 * fault/store guest amo fault will be raised by 244 * riscv_cpu_tlb_fill(). PMP exceptions will be caught 245 * there as well. 246 */ 247 probe_write(env, address, cbomlen, mmu_idx, ra); 248 } 249 250 void helper_cbo_clean_flush(CPURISCVState *env, target_ulong address) 251 { 252 uintptr_t ra = GETPC(); 253 check_zicbo_envcfg(env, MENVCFG_CBCFE, ra); 254 check_zicbom_access(env, address, ra); 255 256 /* We don't emulate the cache-hierarchy, so we're done. */ 257 } 258 259 void helper_cbo_inval(CPURISCVState *env, target_ulong address) 260 { 261 uintptr_t ra = GETPC(); 262 check_zicbo_envcfg(env, MENVCFG_CBIE, ra); 263 check_zicbom_access(env, address, ra); 264 265 /* We don't emulate the cache-hierarchy, so we're done. */ 266 } 267 268 #ifndef CONFIG_USER_ONLY 269 270 target_ulong helper_sret(CPURISCVState *env) 271 { 272 uint64_t mstatus; 273 target_ulong prev_priv, prev_virt = env->virt_enabled; 274 const target_ulong src_priv = env->priv; 275 const bool src_virt = env->virt_enabled; 276 277 if (!(env->priv >= PRV_S)) { 278 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 279 } 280 281 target_ulong retpc = env->sepc; 282 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 283 riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 284 } 285 286 if (get_field(env->mstatus, MSTATUS_TSR) && !(env->priv >= PRV_M)) { 287 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 288 } 289 290 if (env->virt_enabled && get_field(env->hstatus, HSTATUS_VTSR)) { 291 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 292 } 293 294 mstatus = env->mstatus; 295 prev_priv = get_field(mstatus, MSTATUS_SPP); 296 mstatus = set_field(mstatus, MSTATUS_SIE, 297 get_field(mstatus, MSTATUS_SPIE)); 298 mstatus = set_field(mstatus, MSTATUS_SPIE, 1); 299 mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); 300 301 if (riscv_cpu_cfg(env)->ext_ssdbltrp) { 302 if (riscv_has_ext(env, RVH)) { 303 target_ulong prev_vu = get_field(env->hstatus, HSTATUS_SPV) && 304 prev_priv == PRV_U; 305 /* Returning to VU from HS, vsstatus.sdt = 0 */ 306 if (!env->virt_enabled && prev_vu) { 307 env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0); 308 } 309 } 310 mstatus = set_field(mstatus, MSTATUS_SDT, 0); 311 } 312 if (riscv_cpu_cfg(env)->ext_smdbltrp && env->priv >= PRV_M) { 313 mstatus = set_field(mstatus, MSTATUS_MDT, 0); 314 } 315 if (env->priv_ver >= PRIV_VERSION_1_12_0) { 316 mstatus = set_field(mstatus, MSTATUS_MPRV, 0); 317 } 318 env->mstatus = mstatus; 319 320 if (riscv_has_ext(env, RVH) && !env->virt_enabled) { 321 /* We support Hypervisor extensions and virtulisation is disabled */ 322 target_ulong hstatus = env->hstatus; 323 324 prev_virt = get_field(hstatus, HSTATUS_SPV); 325 hstatus = set_field(hstatus, HSTATUS_SPV, 0); 326 327 env->hstatus = hstatus; 328 329 if (prev_virt) { 330 riscv_cpu_swap_hypervisor_regs(env); 331 } 332 } 333 334 riscv_cpu_set_mode(env, prev_priv, prev_virt); 335 336 /* 337 * If forward cfi enabled for new priv, restore elp status 338 * and clear spelp in mstatus 339 */ 340 if (cpu_get_fcfien(env)) { 341 env->elp = get_field(env->mstatus, MSTATUS_SPELP); 342 } 343 env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0); 344 345 if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) { 346 riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET, 347 src_priv, src_virt); 348 } 349 350 return retpc; 351 } 352 353 static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc, 354 target_ulong prev_priv) 355 { 356 if (!(env->priv >= PRV_M)) { 357 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 358 } 359 360 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 361 riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 362 } 363 364 if (riscv_cpu_cfg(env)->pmp && 365 !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { 366 riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); 367 } 368 } 369 static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus, 370 target_ulong prev_priv, 371 target_ulong prev_virt) 372 { 373 /* If returning to U, VS or VU, sstatus.sdt = 0 */ 374 if (prev_priv == PRV_U || (prev_virt && 375 (prev_priv == PRV_S || prev_priv == PRV_U))) { 376 mstatus = set_field(mstatus, MSTATUS_SDT, 0); 377 /* If returning to VU, vsstatus.sdt = 0 */ 378 if (prev_virt && prev_priv == PRV_U) { 379 env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0); 380 } 381 } 382 383 return mstatus; 384 } 385 386 target_ulong helper_mret(CPURISCVState *env) 387 { 388 target_ulong retpc = env->mepc; 389 uint64_t mstatus = env->mstatus; 390 target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); 391 392 check_ret_from_m_mode(env, retpc, prev_priv); 393 394 target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) && 395 (prev_priv != PRV_M); 396 mstatus = set_field(mstatus, MSTATUS_MIE, 397 get_field(mstatus, MSTATUS_MPIE)); 398 mstatus = set_field(mstatus, MSTATUS_MPIE, 1); 399 mstatus = set_field(mstatus, MSTATUS_MPP, 400 riscv_has_ext(env, RVU) ? PRV_U : PRV_M); 401 mstatus = set_field(mstatus, MSTATUS_MPV, 0); 402 if (riscv_cpu_cfg(env)->ext_ssdbltrp) { 403 mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt); 404 } 405 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 406 mstatus = set_field(mstatus, MSTATUS_MDT, 0); 407 } 408 if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) { 409 mstatus = set_field(mstatus, MSTATUS_MPRV, 0); 410 } 411 env->mstatus = mstatus; 412 413 if (riscv_has_ext(env, RVH) && prev_virt) { 414 riscv_cpu_swap_hypervisor_regs(env); 415 } 416 417 riscv_cpu_set_mode(env, prev_priv, prev_virt); 418 /* 419 * If forward cfi enabled for new priv, restore elp status 420 * and clear mpelp in mstatus 421 */ 422 if (cpu_get_fcfien(env)) { 423 env->elp = get_field(env->mstatus, MSTATUS_MPELP); 424 } 425 env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0); 426 427 if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) { 428 riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET, 429 PRV_M, false); 430 } 431 432 return retpc; 433 } 434 435 target_ulong helper_mnret(CPURISCVState *env) 436 { 437 target_ulong retpc = env->mnepc; 438 target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP); 439 target_ulong prev_virt; 440 441 check_ret_from_m_mode(env, retpc, prev_priv); 442 443 prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) && 444 (prev_priv != PRV_M); 445 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, true); 446 447 /* 448 * If MNRET changes the privilege mode to a mode 449 * less privileged than M, it also sets mstatus.MPRV to 0. 450 */ 451 if (prev_priv < PRV_M) { 452 env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false); 453 } 454 if (riscv_cpu_cfg(env)->ext_ssdbltrp) { 455 env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt); 456 } 457 458 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 459 if (prev_priv < PRV_M) { 460 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 0); 461 } 462 } 463 464 if (riscv_has_ext(env, RVH) && prev_virt) { 465 riscv_cpu_swap_hypervisor_regs(env); 466 } 467 468 riscv_cpu_set_mode(env, prev_priv, prev_virt); 469 470 /* 471 * If forward cfi enabled for new priv, restore elp status 472 * and clear mnpelp in mnstatus 473 */ 474 if (cpu_get_fcfien(env)) { 475 env->elp = get_field(env->mnstatus, MNSTATUS_MNPELP); 476 } 477 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, 0); 478 479 return retpc; 480 } 481 482 void helper_ctr_add_entry(CPURISCVState *env, target_ulong src, 483 target_ulong dest, target_ulong type) 484 { 485 riscv_ctr_add_entry(env, src, dest, (enum CTRType)type, 486 env->priv, env->virt_enabled); 487 } 488 489 void helper_ctr_clear(CPURISCVState *env) 490 { 491 /* 492 * It's safe to call smstateen_acc_ok() for umode access regardless of the 493 * state of bit 54 (CTR bit in case of m/hstateen) of sstateen. If the bit 494 * is zero, smstateen_acc_ok() will return the correct exception code and 495 * if it's one, smstateen_acc_ok() will return RISCV_EXCP_NONE. In that 496 * scenario the U-mode check below will handle that case. 497 */ 498 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR); 499 if (ret != RISCV_EXCP_NONE) { 500 riscv_raise_exception(env, ret, GETPC()); 501 } 502 503 if (env->priv == PRV_U) { 504 /* 505 * One corner case is when sctrclr is executed from VU-mode and 506 * mstateen.CTR = 0, in which case we are supposed to raise 507 * RISCV_EXCP_ILLEGAL_INST. This case is already handled in 508 * smstateen_acc_ok(). 509 */ 510 uint32_t excep = env->virt_enabled ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT : 511 RISCV_EXCP_ILLEGAL_INST; 512 riscv_raise_exception(env, excep, GETPC()); 513 } 514 515 riscv_ctr_clear(env); 516 } 517 518 void helper_wfi(CPURISCVState *env) 519 { 520 CPUState *cs = env_cpu(env); 521 bool rvs = riscv_has_ext(env, RVS); 522 bool prv_u = env->priv == PRV_U; 523 bool prv_s = env->priv == PRV_S; 524 525 if (((prv_s || (!rvs && prv_u)) && get_field(env->mstatus, MSTATUS_TW)) || 526 (rvs && prv_u && !env->virt_enabled)) { 527 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 528 } else if (env->virt_enabled && 529 (prv_u || (prv_s && get_field(env->hstatus, HSTATUS_VTW)))) { 530 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 531 } else { 532 cs->halted = 1; 533 cs->exception_index = EXCP_HLT; 534 cpu_loop_exit(cs); 535 } 536 } 537 538 void helper_wrs_nto(CPURISCVState *env) 539 { 540 if (env->virt_enabled && (env->priv == PRV_S || env->priv == PRV_U) && 541 get_field(env->hstatus, HSTATUS_VTW) && 542 !get_field(env->mstatus, MSTATUS_TW)) { 543 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 544 } else if (env->priv != PRV_M && get_field(env->mstatus, MSTATUS_TW)) { 545 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 546 } 547 } 548 549 void helper_tlb_flush(CPURISCVState *env) 550 { 551 CPUState *cs = env_cpu(env); 552 if (!env->virt_enabled && 553 (env->priv == PRV_U || 554 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)))) { 555 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 556 } else if (env->virt_enabled && 557 (env->priv == PRV_U || get_field(env->hstatus, HSTATUS_VTVM))) { 558 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 559 } else { 560 tlb_flush(cs); 561 } 562 } 563 564 void helper_tlb_flush_all(CPURISCVState *env) 565 { 566 CPUState *cs = env_cpu(env); 567 tlb_flush_all_cpus_synced(cs); 568 } 569 570 void helper_hyp_tlb_flush(CPURISCVState *env) 571 { 572 CPUState *cs = env_cpu(env); 573 574 if (env->virt_enabled) { 575 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 576 } 577 578 if (env->priv == PRV_M || 579 (env->priv == PRV_S && !env->virt_enabled)) { 580 tlb_flush(cs); 581 return; 582 } 583 584 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 585 } 586 587 void helper_hyp_gvma_tlb_flush(CPURISCVState *env) 588 { 589 if (env->priv == PRV_S && !env->virt_enabled && 590 get_field(env->mstatus, MSTATUS_TVM)) { 591 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 592 } 593 594 helper_hyp_tlb_flush(env); 595 } 596 597 static int check_access_hlsv(CPURISCVState *env, bool x, uintptr_t ra) 598 { 599 if (env->priv == PRV_M) { 600 /* always allowed */ 601 } else if (env->virt_enabled) { 602 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra); 603 } else if (env->priv == PRV_U && !get_field(env->hstatus, HSTATUS_HU)) { 604 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); 605 } 606 607 int mode = get_field(env->hstatus, HSTATUS_SPVP); 608 if (!x && mode == PRV_S && get_field(env->vsstatus, MSTATUS_SUM)) { 609 mode = MMUIdx_S_SUM; 610 } 611 return mode | MMU_2STAGE_BIT; 612 } 613 614 target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr) 615 { 616 uintptr_t ra = GETPC(); 617 int mmu_idx = check_access_hlsv(env, false, ra); 618 MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 619 620 return cpu_ldb_mmu(env, adjust_addr_virt(env, addr), oi, ra); 621 } 622 623 target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr) 624 { 625 uintptr_t ra = GETPC(); 626 int mmu_idx = check_access_hlsv(env, false, ra); 627 MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx); 628 629 return cpu_ldw_mmu(env, adjust_addr_virt(env, addr), oi, ra); 630 } 631 632 target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr) 633 { 634 uintptr_t ra = GETPC(); 635 int mmu_idx = check_access_hlsv(env, false, ra); 636 MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx); 637 638 return cpu_ldl_mmu(env, adjust_addr_virt(env, addr), oi, ra); 639 } 640 641 target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr) 642 { 643 uintptr_t ra = GETPC(); 644 int mmu_idx = check_access_hlsv(env, false, ra); 645 MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx); 646 647 return cpu_ldq_mmu(env, adjust_addr_virt(env, addr), oi, ra); 648 } 649 650 void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val) 651 { 652 uintptr_t ra = GETPC(); 653 int mmu_idx = check_access_hlsv(env, false, ra); 654 MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 655 656 cpu_stb_mmu(env, adjust_addr_virt(env, addr), val, oi, ra); 657 } 658 659 void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val) 660 { 661 uintptr_t ra = GETPC(); 662 int mmu_idx = check_access_hlsv(env, false, ra); 663 MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx); 664 665 cpu_stw_mmu(env, adjust_addr_virt(env, addr), val, oi, ra); 666 } 667 668 void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val) 669 { 670 uintptr_t ra = GETPC(); 671 int mmu_idx = check_access_hlsv(env, false, ra); 672 MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx); 673 674 cpu_stl_mmu(env, adjust_addr_virt(env, addr), val, oi, ra); 675 } 676 677 void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val) 678 { 679 uintptr_t ra = GETPC(); 680 int mmu_idx = check_access_hlsv(env, false, ra); 681 MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx); 682 683 cpu_stq_mmu(env, adjust_addr_virt(env, addr), val, oi, ra); 684 } 685 686 /* 687 * TODO: These implementations are not quite correct. They perform the 688 * access using execute permission just fine, but the final PMP check 689 * is supposed to have read permission as well. Without replicating 690 * a fair fraction of cputlb.c, fixing this requires adding new mmu_idx 691 * which would imply that exact check in tlb_fill. 692 */ 693 target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong addr) 694 { 695 uintptr_t ra = GETPC(); 696 int mmu_idx = check_access_hlsv(env, true, ra); 697 MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx); 698 699 return cpu_ldw_code_mmu(env, addr, oi, GETPC()); 700 } 701 702 target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong addr) 703 { 704 uintptr_t ra = GETPC(); 705 int mmu_idx = check_access_hlsv(env, true, ra); 706 MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx); 707 708 return cpu_ldl_code_mmu(env, addr, oi, ra); 709 } 710 711 #endif /* !CONFIG_USER_ONLY */ 712