xref: /qemu/target/riscv/cpu_helper.c (revision f07a5674cf97b8473e5d06d7b1df9b51e97d553f)
1 /*
2  * RISC-V CPU helpers for qemu.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "pmu.h"
26 #include "exec/exec-all.h"
27 #include "exec/page-protection.h"
28 #include "instmap.h"
29 #include "tcg/tcg-op.h"
30 #include "hw/core/tcg-cpu-ops.h"
31 #include "trace.h"
32 #include "semihosting/common-semi.h"
33 #include "system/cpu-timers.h"
34 #include "cpu_bits.h"
35 #include "debug.h"
36 #include "pmp.h"
37 
38 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
39 {
40 #ifdef CONFIG_USER_ONLY
41     return 0;
42 #else
43     bool virt = env->virt_enabled;
44     int mode = env->priv;
45 
46     /* All priv -> mmu_idx mapping are here */
47     if (!ifetch) {
48         uint64_t status = env->mstatus;
49 
50         if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
51             mode = get_field(env->mstatus, MSTATUS_MPP);
52             virt = get_field(env->mstatus, MSTATUS_MPV) &&
53                    (mode != PRV_M);
54             if (virt) {
55                 status = env->vsstatus;
56             }
57         }
58         if (mode == PRV_S && get_field(status, MSTATUS_SUM)) {
59             mode = MMUIdx_S_SUM;
60         }
61     }
62 
63     return mode | (virt ? MMU_2STAGE_BIT : 0);
64 #endif
65 }
66 
67 bool cpu_get_fcfien(CPURISCVState *env)
68 {
69     /* no cfi extension, return false */
70     if (!env_archcpu(env)->cfg.ext_zicfilp) {
71         return false;
72     }
73 
74     switch (env->priv) {
75     case PRV_U:
76         if (riscv_has_ext(env, RVS)) {
77             return env->senvcfg & SENVCFG_LPE;
78         }
79         return env->menvcfg & MENVCFG_LPE;
80 #ifndef CONFIG_USER_ONLY
81     case PRV_S:
82         if (env->virt_enabled) {
83             return env->henvcfg & HENVCFG_LPE;
84         }
85         return env->menvcfg & MENVCFG_LPE;
86     case PRV_M:
87         return env->mseccfg & MSECCFG_MLPE;
88 #endif
89     default:
90         g_assert_not_reached();
91     }
92 }
93 
94 bool cpu_get_bcfien(CPURISCVState *env)
95 {
96     /* no cfi extension, return false */
97     if (!env_archcpu(env)->cfg.ext_zicfiss) {
98         return false;
99     }
100 
101     switch (env->priv) {
102     case PRV_U:
103         /*
104          * If S is not implemented then shadow stack for U can't be turned on
105          * It is checked in `riscv_cpu_validate_set_extensions`, so no need to
106          * check here or assert here
107          */
108         return env->senvcfg & SENVCFG_SSE;
109 #ifndef CONFIG_USER_ONLY
110     case PRV_S:
111         if (env->virt_enabled) {
112             return env->henvcfg & HENVCFG_SSE;
113         }
114         return env->menvcfg & MENVCFG_SSE;
115     case PRV_M: /* M-mode shadow stack is always off */
116         return false;
117 #endif
118     default:
119         g_assert_not_reached();
120     }
121 }
122 
123 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
124 {
125 #ifdef CONFIG_USER_ONLY
126     return false;
127 #else
128     if (virt) {
129         return (env->henvcfg & HENVCFG_DTE) != 0;
130     } else {
131         return (env->menvcfg & MENVCFG_DTE) != 0;
132     }
133 #endif
134 }
135 
136 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
137                           uint64_t *cs_base, uint32_t *pflags)
138 {
139     RISCVCPU *cpu = env_archcpu(env);
140     RISCVExtStatus fs, vs;
141     uint32_t flags = 0;
142     bool pm_signext = riscv_cpu_virt_mem_enabled(env);
143 
144     *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
145     *cs_base = 0;
146 
147     if (cpu->cfg.ext_zve32x) {
148         /*
149          * If env->vl equals to VLMAX, we can use generic vector operation
150          * expanders (GVEC) to accerlate the vector operations.
151          * However, as LMUL could be a fractional number. The maximum
152          * vector size can be operated might be less than 8 bytes,
153          * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
154          * only when maxsz >= 8 bytes.
155          */
156 
157         /* lmul encoded as in DisasContext::lmul */
158         int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
159         uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
160         uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
161         uint32_t maxsz = vlmax << vsew;
162         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
163                            (maxsz >= 8);
164         flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
165         flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
166         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
167                            FIELD_EX64(env->vtype, VTYPE, VLMUL));
168         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
169         flags = FIELD_DP32(flags, TB_FLAGS, VTA,
170                            FIELD_EX64(env->vtype, VTYPE, VTA));
171         flags = FIELD_DP32(flags, TB_FLAGS, VMA,
172                            FIELD_EX64(env->vtype, VTYPE, VMA));
173         flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
174     } else {
175         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
176     }
177 
178     if (cpu_get_fcfien(env)) {
179         /*
180          * For Forward CFI, only the expectation of a lpad at
181          * the start of the block is tracked via env->elp. env->elp
182          * is turned on during jalr translation.
183          */
184         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
185         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
186     }
187 
188     if (cpu_get_bcfien(env)) {
189         flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
190     }
191 
192 #ifdef CONFIG_USER_ONLY
193     fs = EXT_STATUS_DIRTY;
194     vs = EXT_STATUS_DIRTY;
195 #else
196     flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
197 
198     flags |= riscv_env_mmu_index(env, 0);
199     fs = get_field(env->mstatus, MSTATUS_FS);
200     vs = get_field(env->mstatus, MSTATUS_VS);
201 
202     if (env->virt_enabled) {
203         flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
204         /*
205          * Merge DISABLED and !DIRTY states using MIN.
206          * We will set both fields when dirtying.
207          */
208         fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
209         vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
210     }
211 
212     /* With Zfinx, floating point is enabled/disabled by Smstateen. */
213     if (!riscv_has_ext(env, RVF)) {
214         fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
215              ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
216     }
217 
218     if (cpu->cfg.debug && !icount_enabled()) {
219         flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
220     }
221 #endif
222 
223     flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
224     flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
225     flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
226     flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
227     flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
228     flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
229 
230     *pflags = flags;
231 }
232 
233 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
234 {
235 #ifndef CONFIG_USER_ONLY
236     int priv_mode = cpu_address_mode(env);
237 
238     if (get_field(env->mstatus, MSTATUS_MPRV) &&
239         get_field(env->mstatus, MSTATUS_MXR)) {
240         return PMM_FIELD_DISABLED;
241     }
242 
243     /* Get current PMM field */
244     switch (priv_mode) {
245     case PRV_M:
246         if (riscv_cpu_cfg(env)->ext_smmpm) {
247             return get_field(env->mseccfg, MSECCFG_PMM);
248         }
249         break;
250     case PRV_S:
251         if (riscv_cpu_cfg(env)->ext_smnpm) {
252             if (get_field(env->mstatus, MSTATUS_MPV)) {
253                 return get_field(env->henvcfg, HENVCFG_PMM);
254             } else {
255                 return get_field(env->menvcfg, MENVCFG_PMM);
256             }
257         }
258         break;
259     case PRV_U:
260         if (riscv_has_ext(env, RVS)) {
261             if (riscv_cpu_cfg(env)->ext_ssnpm) {
262                 return get_field(env->senvcfg, SENVCFG_PMM);
263             }
264         } else {
265             if (riscv_cpu_cfg(env)->ext_smnpm) {
266                 return get_field(env->menvcfg, MENVCFG_PMM);
267             }
268         }
269         break;
270     default:
271         g_assert_not_reached();
272     }
273     return PMM_FIELD_DISABLED;
274 #else
275     return PMM_FIELD_DISABLED;
276 #endif
277 }
278 
279 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
280 {
281 #ifndef CONFIG_USER_ONLY
282     int priv_mode = cpu_address_mode(env);
283 
284     if (priv_mode == PRV_U) {
285         return get_field(env->hstatus, HSTATUS_HUPMM);
286     } else {
287         if (get_field(env->hstatus, HSTATUS_SPVP)) {
288             return get_field(env->henvcfg, HENVCFG_PMM);
289         } else {
290             return get_field(env->senvcfg, SENVCFG_PMM);
291         }
292     }
293 #else
294     return PMM_FIELD_DISABLED;
295 #endif
296 }
297 
298 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
299 {
300 #ifndef CONFIG_USER_ONLY
301     int satp_mode = 0;
302     int priv_mode = cpu_address_mode(env);
303 
304     if (riscv_cpu_mxl(env) == MXL_RV32) {
305         satp_mode = get_field(env->satp, SATP32_MODE);
306     } else {
307         satp_mode = get_field(env->satp, SATP64_MODE);
308     }
309 
310     return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
311 #else
312     return false;
313 #endif
314 }
315 
316 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
317 {
318     switch (pmm) {
319     case PMM_FIELD_DISABLED:
320         return 0;
321     case PMM_FIELD_PMLEN7:
322         return 7;
323     case PMM_FIELD_PMLEN16:
324         return 16;
325     default:
326         g_assert_not_reached();
327     }
328 }
329 
330 #ifndef CONFIG_USER_ONLY
331 
332 /*
333  * The HS-mode is allowed to configure priority only for the
334  * following VS-mode local interrupts:
335  *
336  * 0  (Reserved interrupt, reads as zero)
337  * 1  Supervisor software interrupt
338  * 4  (Reserved interrupt, reads as zero)
339  * 5  Supervisor timer interrupt
340  * 8  (Reserved interrupt, reads as zero)
341  * 13 (Reserved interrupt)
342  * 14 "
343  * 15 "
344  * 16 "
345  * 17 "
346  * 18 "
347  * 19 "
348  * 20 "
349  * 21 "
350  * 22 "
351  * 23 "
352  */
353 
354 static const int hviprio_index2irq[] = {
355     0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
356 static const int hviprio_index2rdzero[] = {
357     1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
358 
359 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
360 {
361     if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
362         return -EINVAL;
363     }
364 
365     if (out_irq) {
366         *out_irq = hviprio_index2irq[index];
367     }
368 
369     if (out_rdzero) {
370         *out_rdzero = hviprio_index2rdzero[index];
371     }
372 
373     return 0;
374 }
375 
376 /*
377  * Default priorities of local interrupts are defined in the
378  * RISC-V Advanced Interrupt Architecture specification.
379  *
380  * ----------------------------------------------------------------
381  *  Default  |
382  *  Priority | Major Interrupt Numbers
383  * ----------------------------------------------------------------
384  *  Highest  | 47, 23, 46, 45, 22, 44,
385  *           | 43, 21, 42, 41, 20, 40
386  *           |
387  *           | 11 (0b),  3 (03),  7 (07)
388  *           |  9 (09),  1 (01),  5 (05)
389  *           | 12 (0c)
390  *           | 10 (0a),  2 (02),  6 (06)
391  *           |
392  *           | 39, 19, 38, 37, 18, 36,
393  *  Lowest   | 35, 17, 34, 33, 16, 32
394  * ----------------------------------------------------------------
395  */
396 static const uint8_t default_iprio[64] = {
397     /* Custom interrupts 48 to 63 */
398     [63] = IPRIO_MMAXIPRIO,
399     [62] = IPRIO_MMAXIPRIO,
400     [61] = IPRIO_MMAXIPRIO,
401     [60] = IPRIO_MMAXIPRIO,
402     [59] = IPRIO_MMAXIPRIO,
403     [58] = IPRIO_MMAXIPRIO,
404     [57] = IPRIO_MMAXIPRIO,
405     [56] = IPRIO_MMAXIPRIO,
406     [55] = IPRIO_MMAXIPRIO,
407     [54] = IPRIO_MMAXIPRIO,
408     [53] = IPRIO_MMAXIPRIO,
409     [52] = IPRIO_MMAXIPRIO,
410     [51] = IPRIO_MMAXIPRIO,
411     [50] = IPRIO_MMAXIPRIO,
412     [49] = IPRIO_MMAXIPRIO,
413     [48] = IPRIO_MMAXIPRIO,
414 
415     /* Custom interrupts 24 to 31 */
416     [31] = IPRIO_MMAXIPRIO,
417     [30] = IPRIO_MMAXIPRIO,
418     [29] = IPRIO_MMAXIPRIO,
419     [28] = IPRIO_MMAXIPRIO,
420     [27] = IPRIO_MMAXIPRIO,
421     [26] = IPRIO_MMAXIPRIO,
422     [25] = IPRIO_MMAXIPRIO,
423     [24] = IPRIO_MMAXIPRIO,
424 
425     [47] = IPRIO_DEFAULT_UPPER,
426     [23] = IPRIO_DEFAULT_UPPER + 1,
427     [46] = IPRIO_DEFAULT_UPPER + 2,
428     [45] = IPRIO_DEFAULT_UPPER + 3,
429     [22] = IPRIO_DEFAULT_UPPER + 4,
430     [44] = IPRIO_DEFAULT_UPPER + 5,
431 
432     [43] = IPRIO_DEFAULT_UPPER + 6,
433     [21] = IPRIO_DEFAULT_UPPER + 7,
434     [42] = IPRIO_DEFAULT_UPPER + 8,
435     [41] = IPRIO_DEFAULT_UPPER + 9,
436     [20] = IPRIO_DEFAULT_UPPER + 10,
437     [40] = IPRIO_DEFAULT_UPPER + 11,
438 
439     [11] = IPRIO_DEFAULT_M,
440     [3]  = IPRIO_DEFAULT_M + 1,
441     [7]  = IPRIO_DEFAULT_M + 2,
442 
443     [9]  = IPRIO_DEFAULT_S,
444     [1]  = IPRIO_DEFAULT_S + 1,
445     [5]  = IPRIO_DEFAULT_S + 2,
446 
447     [12] = IPRIO_DEFAULT_SGEXT,
448 
449     [10] = IPRIO_DEFAULT_VS,
450     [2]  = IPRIO_DEFAULT_VS + 1,
451     [6]  = IPRIO_DEFAULT_VS + 2,
452 
453     [39] = IPRIO_DEFAULT_LOWER,
454     [19] = IPRIO_DEFAULT_LOWER + 1,
455     [38] = IPRIO_DEFAULT_LOWER + 2,
456     [37] = IPRIO_DEFAULT_LOWER + 3,
457     [18] = IPRIO_DEFAULT_LOWER + 4,
458     [36] = IPRIO_DEFAULT_LOWER + 5,
459 
460     [35] = IPRIO_DEFAULT_LOWER + 6,
461     [17] = IPRIO_DEFAULT_LOWER + 7,
462     [34] = IPRIO_DEFAULT_LOWER + 8,
463     [33] = IPRIO_DEFAULT_LOWER + 9,
464     [16] = IPRIO_DEFAULT_LOWER + 10,
465     [32] = IPRIO_DEFAULT_LOWER + 11,
466 };
467 
468 uint8_t riscv_cpu_default_priority(int irq)
469 {
470     if (irq < 0 || irq > 63) {
471         return IPRIO_MMAXIPRIO;
472     }
473 
474     return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
475 };
476 
477 static int riscv_cpu_pending_to_irq(CPURISCVState *env,
478                                     int extirq, unsigned int extirq_def_prio,
479                                     uint64_t pending, uint8_t *iprio)
480 {
481     int irq, best_irq = RISCV_EXCP_NONE;
482     unsigned int prio, best_prio = UINT_MAX;
483 
484     if (!pending) {
485         return RISCV_EXCP_NONE;
486     }
487 
488     irq = ctz64(pending);
489     if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia :
490                                   riscv_cpu_cfg(env)->ext_ssaia)) {
491         return irq;
492     }
493 
494     pending = pending >> irq;
495     while (pending) {
496         prio = iprio[irq];
497         if (!prio) {
498             if (irq == extirq) {
499                 prio = extirq_def_prio;
500             } else {
501                 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
502                        1 : IPRIO_MMAXIPRIO;
503             }
504         }
505         if ((pending & 0x1) && (prio <= best_prio)) {
506             best_irq = irq;
507             best_prio = prio;
508         }
509         irq++;
510         pending = pending >> 1;
511     }
512 
513     return best_irq;
514 }
515 
516 /*
517  * Doesn't report interrupts inserted using mvip from M-mode firmware or
518  * using hvip bits 13:63 from HS-mode. Those are returned in
519  * riscv_cpu_sirq_pending() and riscv_cpu_vsirq_pending().
520  */
521 uint64_t riscv_cpu_all_pending(CPURISCVState *env)
522 {
523     uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
524     uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
525     uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
526 
527     return (env->mip | vsgein | vstip) & env->mie;
528 }
529 
530 int riscv_cpu_mirq_pending(CPURISCVState *env)
531 {
532     uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
533                     ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
534 
535     return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
536                                     irqs, env->miprio);
537 }
538 
539 int riscv_cpu_sirq_pending(CPURISCVState *env)
540 {
541     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
542                     ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
543     uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie;
544 
545     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
546                                     irqs | irqs_f, env->siprio);
547 }
548 
549 int riscv_cpu_vsirq_pending(CPURISCVState *env)
550 {
551     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg;
552     uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
553     uint64_t vsbits;
554 
555     /* Bring VS-level bits to correct position */
556     vsbits = irqs & VS_MODE_INTERRUPTS;
557     irqs &= ~VS_MODE_INTERRUPTS;
558     irqs |= vsbits >> 1;
559 
560     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
561                                     (irqs | irqs_f_vs), env->hviprio);
562 }
563 
564 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
565 {
566     uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs;
567     uint64_t vsbits, irq_delegated;
568     int virq;
569 
570     /* Priority: RNMI > Other interrupt. */
571     if (riscv_cpu_cfg(env)->ext_smrnmi) {
572         /* If mnstatus.NMIE == 0, all interrupts are disabled. */
573         if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
574             return RISCV_EXCP_NONE;
575         }
576 
577         if (env->rnmip) {
578             return ctz64(env->rnmip); /* since non-zero */
579         }
580     }
581 
582     /* Determine interrupt enable state of all privilege modes */
583     if (env->virt_enabled) {
584         mie = 1;
585         hsie = 1;
586         vsie = (env->priv < PRV_S) ||
587                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
588     } else {
589         mie = (env->priv < PRV_M) ||
590               (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
591         hsie = (env->priv < PRV_S) ||
592                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
593         vsie = 0;
594     }
595 
596     /* Determine all pending interrupts */
597     pending = riscv_cpu_all_pending(env);
598 
599     /* Check M-mode interrupts */
600     irqs = pending & ~env->mideleg & -mie;
601     if (irqs) {
602         return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
603                                         irqs, env->miprio);
604     }
605 
606     /* Check for virtual S-mode interrupts. */
607     irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie;
608 
609     /* Check HS-mode interrupts */
610     irqs =  ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie;
611     if (irqs) {
612         return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
613                                         irqs, env->siprio);
614     }
615 
616     /* Check for virtual VS-mode interrupts. */
617     irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
618 
619     /* Check VS-mode interrupts */
620     irq_delegated = pending & env->mideleg & env->hideleg;
621 
622     /* Bring VS-level bits to correct position */
623     vsbits = irq_delegated & VS_MODE_INTERRUPTS;
624     irq_delegated &= ~VS_MODE_INTERRUPTS;
625     irq_delegated |= vsbits >> 1;
626 
627     irqs = (irq_delegated | irqs_f_vs) & -vsie;
628     if (irqs) {
629         virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
630                                         irqs, env->hviprio);
631         if (virq <= 0 || (virq > 12 && virq <= 63)) {
632             return virq;
633         } else {
634             return virq + 1;
635         }
636     }
637 
638     /* Indicate no pending interrupt */
639     return RISCV_EXCP_NONE;
640 }
641 
642 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
643 {
644     uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
645 
646     if (interrupt_request & mask) {
647         RISCVCPU *cpu = RISCV_CPU(cs);
648         CPURISCVState *env = &cpu->env;
649         int interruptno = riscv_cpu_local_irq_pending(env);
650         if (interruptno >= 0) {
651             cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
652             riscv_cpu_do_interrupt(cs);
653             return true;
654         }
655     }
656     return false;
657 }
658 
659 /* Return true is floating point support is currently enabled */
660 bool riscv_cpu_fp_enabled(CPURISCVState *env)
661 {
662     if (env->mstatus & MSTATUS_FS) {
663         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) {
664             return false;
665         }
666         return true;
667     }
668 
669     return false;
670 }
671 
672 /* Return true is vector support is currently enabled */
673 bool riscv_cpu_vector_enabled(CPURISCVState *env)
674 {
675     if (env->mstatus & MSTATUS_VS) {
676         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) {
677             return false;
678         }
679         return true;
680     }
681 
682     return false;
683 }
684 
685 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
686 {
687     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
688                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
689                             MSTATUS64_UXL | MSTATUS_VS;
690 
691     if (riscv_has_ext(env, RVF)) {
692         mstatus_mask |= MSTATUS_FS;
693     }
694     bool current_virt = env->virt_enabled;
695 
696     /*
697      * If zicfilp extension available and henvcfg.LPE = 1,
698      * then apply SPELP mask on mstatus
699      */
700     if (env_archcpu(env)->cfg.ext_zicfilp &&
701         get_field(env->henvcfg, HENVCFG_LPE)) {
702         mstatus_mask |= SSTATUS_SPELP;
703     }
704 
705     g_assert(riscv_has_ext(env, RVH));
706 
707     if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
708         mstatus_mask |= MSTATUS_SDT;
709     }
710 
711     if (current_virt) {
712         /* Current V=1 and we are about to change to V=0 */
713         env->vsstatus = env->mstatus & mstatus_mask;
714         env->mstatus &= ~mstatus_mask;
715         env->mstatus |= env->mstatus_hs;
716 
717         env->vstvec = env->stvec;
718         env->stvec = env->stvec_hs;
719 
720         env->vsscratch = env->sscratch;
721         env->sscratch = env->sscratch_hs;
722 
723         env->vsepc = env->sepc;
724         env->sepc = env->sepc_hs;
725 
726         env->vscause = env->scause;
727         env->scause = env->scause_hs;
728 
729         env->vstval = env->stval;
730         env->stval = env->stval_hs;
731 
732         env->vsatp = env->satp;
733         env->satp = env->satp_hs;
734     } else {
735         /* Current V=0 and we are about to change to V=1 */
736         env->mstatus_hs = env->mstatus & mstatus_mask;
737         env->mstatus &= ~mstatus_mask;
738         env->mstatus |= env->vsstatus;
739 
740         env->stvec_hs = env->stvec;
741         env->stvec = env->vstvec;
742 
743         env->sscratch_hs = env->sscratch;
744         env->sscratch = env->vsscratch;
745 
746         env->sepc_hs = env->sepc;
747         env->sepc = env->vsepc;
748 
749         env->scause_hs = env->scause;
750         env->scause = env->vscause;
751 
752         env->stval_hs = env->stval;
753         env->stval = env->vstval;
754 
755         env->satp_hs = env->satp;
756         env->satp = env->vsatp;
757     }
758 }
759 
760 target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
761 {
762     if (!riscv_has_ext(env, RVH)) {
763         return 0;
764     }
765 
766     return env->geilen;
767 }
768 
769 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
770 {
771     if (!riscv_has_ext(env, RVH)) {
772         return;
773     }
774 
775     if (geilen > (TARGET_LONG_BITS - 1)) {
776         return;
777     }
778 
779     env->geilen = geilen;
780 }
781 
782 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
783 {
784     CPURISCVState *env = &cpu->env;
785     CPUState *cs = CPU(cpu);
786     bool release_lock = false;
787 
788     if (!bql_locked()) {
789         release_lock = true;
790         bql_lock();
791     }
792 
793     if (level) {
794         env->rnmip |= 1 << irq;
795         cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
796     } else {
797         env->rnmip &= ~(1 << irq);
798         cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
799     }
800 
801     if (release_lock) {
802         bql_unlock();
803     }
804 }
805 
806 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
807 {
808     CPURISCVState *env = &cpu->env;
809     if (env->miclaim & interrupts) {
810         return -1;
811     } else {
812         env->miclaim |= interrupts;
813         return 0;
814     }
815 }
816 
817 void riscv_cpu_interrupt(CPURISCVState *env)
818 {
819     uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
820     CPUState *cs = env_cpu(env);
821 
822     BQL_LOCK_GUARD();
823 
824     if (env->virt_enabled) {
825         gein = get_field(env->hstatus, HSTATUS_VGEIN);
826         vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
827         irqf = env->hvien & env->hvip & env->vsie;
828     } else {
829         irqf = env->mvien & env->mvip & env->sie;
830     }
831 
832     vstip = env->vstime_irq ? MIP_VSTIP : 0;
833 
834     if (env->mip | vsgein | vstip | irqf) {
835         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
836     } else {
837         cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
838     }
839 }
840 
841 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
842 {
843     uint64_t old = env->mip;
844 
845     /* No need to update mip for VSTIP */
846     mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
847 
848     BQL_LOCK_GUARD();
849 
850     env->mip = (env->mip & ~mask) | (value & mask);
851 
852     riscv_cpu_interrupt(env);
853 
854     return old;
855 }
856 
857 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
858                              void *arg)
859 {
860     env->rdtime_fn = fn;
861     env->rdtime_fn_arg = arg;
862 }
863 
864 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
865                                    int (*rmw_fn)(void *arg,
866                                                  target_ulong reg,
867                                                  target_ulong *val,
868                                                  target_ulong new_val,
869                                                  target_ulong write_mask),
870                                    void *rmw_fn_arg)
871 {
872     if (priv <= PRV_M) {
873         env->aia_ireg_rmw_fn[priv] = rmw_fn;
874         env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
875     }
876 }
877 
878 static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
879                              bool virt)
880 {
881     uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
882 
883     assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
884 
885     if (ctl & freeze_mask) {
886         env->sctrstatus |= SCTRSTATUS_FROZEN;
887     }
888 }
889 
890 void riscv_ctr_clear(CPURISCVState *env)
891 {
892     memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
893     memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
894     memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
895 }
896 
897 static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
898 {
899     switch (priv) {
900     case PRV_M:
901         return MCTRCTL_M;
902     case PRV_S:
903         if (virt) {
904             return XCTRCTL_S;
905         }
906         return XCTRCTL_S;
907     case PRV_U:
908         if (virt) {
909             return XCTRCTL_U;
910         }
911         return XCTRCTL_U;
912     }
913 
914     g_assert_not_reached();
915 }
916 
917 static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
918                                       bool virt)
919 {
920     switch (priv) {
921     case PRV_M:
922         return env->mctrctl;
923     case PRV_S:
924     case PRV_U:
925         if (virt) {
926             return env->vsctrctl;
927         }
928         return env->mctrctl;
929     }
930 
931     g_assert_not_reached();
932 }
933 
934 /*
935  * This function assumes that src privilege and target privilege are not same
936  * and src privilege is less than target privilege. This includes the virtual
937  * state as well.
938  */
939 static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
940                                 bool src_virt)
941 {
942     target_long tgt_prv = env->priv;
943     bool res = true;
944 
945     /*
946      * VS and U mode are same in terms of xTE bits required to record an
947      * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
948      * Requirements. This changes VS to U to simplify the logic a bit.
949      */
950     if (src_virt && src_prv == PRV_S) {
951         src_prv = PRV_U;
952     } else if (env->virt_enabled && tgt_prv == PRV_S) {
953         tgt_prv = PRV_U;
954     }
955 
956     /* VU mode is an outlier here. */
957     if (src_virt && src_prv == PRV_U) {
958         res &= !!(env->vsctrctl & XCTRCTL_STE);
959     }
960 
961     switch (src_prv) {
962     case PRV_U:
963         if (tgt_prv == PRV_U) {
964             break;
965         }
966         res &= !!(env->mctrctl & XCTRCTL_STE);
967         /* fall-through */
968     case PRV_S:
969         if (tgt_prv == PRV_S) {
970             break;
971         }
972         res &= !!(env->mctrctl & MCTRCTL_MTE);
973         /* fall-through */
974     case PRV_M:
975         break;
976     }
977 
978     return res;
979 }
980 
981 /*
982  * Special cases for traps and trap returns:
983  *
984  * 1- Traps, and trap returns, between enabled modes are recorded as normal.
985  * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
986  * enabled mode back to an inhibited mode, are partially recorded.  In such
987  * cases, the PC from the inhibited mode (source PC for traps, and target PC
988  * for trap returns) is 0.
989  *
990  * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
991  * Traps from an enabled mode to an inhibited mode, known as external traps,
992  * receive special handling.
993  * By default external traps are not recorded, but a handshake mechanism exists
994  * to allow partial recording.  Software running in the target mode of the trap
995  * can opt-in to allowing CTR to record traps into that mode even when the mode
996  * is inhibited.  The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
997  * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
998  * x is the target privilege mode of the trap, will CTR record the trap. In such
999  * cases, the target PC is 0.
1000  */
1001 /*
1002  * CTR arrays are implemented as circular buffers and new entry is stored at
1003  * sctrstatus.WRPTR, but they are presented to software as moving circular
1004  * buffers. Which means, software get's the illusion that whenever a new entry
1005  * is added the whole buffer is moved by one place and the new entry is added at
1006  * the start keeping new entry at idx 0 and older ones follow.
1007  *
1008  * Depth = 16.
1009  *
1010  * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
1011  * WRPTR                                   W
1012  * entry   7   6   5   4   3   2   1   0   F   E   D   C   B   A   9   8
1013  *
1014  * When a new entry is added:
1015  * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
1016  * WRPTR                                       W
1017  * entry   8   7   6   5   4   3   2   1   0   F   E   D   C   B   A   9
1018  *
1019  * entry here denotes the logical entry number that software can access
1020  * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
1021  * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
1022  * buffer[7]. Here is how we convert entry to buffer idx.
1023  *
1024  *    entry = isel - CTR_ENTRIES_FIRST;
1025  *    idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
1026  */
1027 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
1028     enum CTRType type, target_ulong src_priv, bool src_virt)
1029 {
1030     bool tgt_virt = env->virt_enabled;
1031     uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
1032     uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
1033     uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
1034     uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
1035     uint64_t depth, head;
1036     bool ext_trap = false;
1037 
1038     /*
1039      * Return immediately if both target and src recording is disabled or if
1040      * CTR is in frozen state.
1041      */
1042     if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
1043         env->sctrstatus & SCTRSTATUS_FROZEN) {
1044         return;
1045     }
1046 
1047     /*
1048      * With RAS Emul enabled, only allow Indirect, direct calls, Function
1049      * returns and Co-routine swap types.
1050      */
1051     if (tgt_ctrl & XCTRCTL_RASEMU &&
1052         type != CTRDATA_TYPE_INDIRECT_CALL &&
1053         type != CTRDATA_TYPE_DIRECT_CALL &&
1054         type != CTRDATA_TYPE_RETURN &&
1055         type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1056         return;
1057     }
1058 
1059     if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
1060         /* Case 2 for traps. */
1061         if (!(src_ctrl & src_mask)) {
1062             src = 0;
1063         } else if (!(tgt_ctrl & tgt_mask)) {
1064             /* Check if target priv-mode has allowed external trap recording. */
1065             if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
1066                 return;
1067             }
1068 
1069             ext_trap = true;
1070             dst = 0;
1071         }
1072     } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
1073         /*
1074          * Case 3 for trap returns.  Trap returns from inhibited mode are not
1075          * recorded.
1076          */
1077         if (!(src_ctrl & src_mask)) {
1078             return;
1079         }
1080 
1081         /* Case 2 for trap returns. */
1082         if (!(tgt_ctrl & tgt_mask)) {
1083             dst = 0;
1084         }
1085     }
1086 
1087     /* Ignore filters in case of RASEMU mode or External trap. */
1088     if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
1089         /*
1090          * Check if the specific type is inhibited. Not taken branch filter is
1091          * an enable bit and needs to be checked separatly.
1092          */
1093         bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
1094         if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
1095             (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
1096             return;
1097         }
1098     }
1099 
1100     head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
1101 
1102     depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
1103     if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
1104         head = (head - 1) & (depth - 1);
1105 
1106         env->ctr_src[head] &= ~CTRSOURCE_VALID;
1107         env->sctrstatus =
1108             set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1109         return;
1110     }
1111 
1112     /* In case of Co-routine SWAP we overwrite latest entry. */
1113     if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1114         head = (head - 1) & (depth - 1);
1115     }
1116 
1117     env->ctr_src[head] = src | CTRSOURCE_VALID;
1118     env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
1119     env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
1120 
1121     head = (head + 1) & (depth - 1);
1122 
1123     env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1124 }
1125 
1126 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
1127 {
1128     g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
1129 
1130     if (newpriv != env->priv || env->virt_enabled != virt_en) {
1131         if (icount_enabled()) {
1132             riscv_itrigger_update_priv(env);
1133         }
1134 
1135         riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
1136     }
1137 
1138     /* tlb_flush is unnecessary as mode is contained in mmu_idx */
1139     env->priv = newpriv;
1140     env->xl = cpu_recompute_xl(env);
1141 
1142     /*
1143      * Clear the load reservation - otherwise a reservation placed in one
1144      * context/process can be used by another, resulting in an SC succeeding
1145      * incorrectly. Version 2.2 of the ISA specification explicitly requires
1146      * this behaviour, while later revisions say that the kernel "should" use
1147      * an SC instruction to force the yielding of a load reservation on a
1148      * preemptive context switch. As a result, do both.
1149      */
1150     env->load_res = -1;
1151 
1152     if (riscv_has_ext(env, RVH)) {
1153         /* Flush the TLB on all virt mode changes. */
1154         if (env->virt_enabled != virt_en) {
1155             tlb_flush(env_cpu(env));
1156         }
1157 
1158         env->virt_enabled = virt_en;
1159         if (virt_en) {
1160             /*
1161              * The guest external interrupts from an interrupt controller are
1162              * delivered only when the Guest/VM is running (i.e. V=1). This
1163              * means any guest external interrupt which is triggered while the
1164              * Guest/VM is not running (i.e. V=0) will be missed on QEMU
1165              * resulting in guest with sluggish response to serial console
1166              * input and other I/O events.
1167              *
1168              * To solve this, we check and inject interrupt after setting V=1.
1169              */
1170             riscv_cpu_update_mip(env, 0, 0);
1171         }
1172     }
1173 }
1174 
1175 /*
1176  * get_physical_address_pmp - check PMP permission for this physical address
1177  *
1178  * Match the PMP region and check permission for this physical address and it's
1179  * TLB page. Returns 0 if the permission checking was successful
1180  *
1181  * @env: CPURISCVState
1182  * @prot: The returned protection attributes
1183  * @addr: The physical address to be checked permission
1184  * @access_type: The type of MMU access
1185  * @mode: Indicates current privilege level.
1186  */
1187 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
1188                                     int size, MMUAccessType access_type,
1189                                     int mode)
1190 {
1191     pmp_priv_t pmp_priv;
1192     bool pmp_has_privs;
1193 
1194     if (!riscv_cpu_cfg(env)->pmp) {
1195         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1196         return TRANSLATE_SUCCESS;
1197     }
1198 
1199     pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
1200                                        &pmp_priv, mode);
1201     if (!pmp_has_privs) {
1202         *prot = 0;
1203         return TRANSLATE_PMP_FAIL;
1204     }
1205 
1206     *prot = pmp_priv_to_page_prot(pmp_priv);
1207 
1208     return TRANSLATE_SUCCESS;
1209 }
1210 
1211 /* Returns 'true' if a svukte address check is needed */
1212 static bool do_svukte_check(CPURISCVState *env, bool first_stage,
1213                              int mode, bool virt)
1214 {
1215     /* Svukte extension depends on Sv39. */
1216     if (!(env_archcpu(env)->cfg.ext_svukte ||
1217         !first_stage ||
1218         VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
1219         return false;
1220     }
1221 
1222     /*
1223      * Check hstatus.HUKTE if the effective mode is switched to VU-mode by
1224      * executing HLV/HLVX/HSV in U-mode.
1225      * For other cases, check senvcfg.UKTE.
1226      */
1227     if (env->priv == PRV_U && !env->virt_enabled && virt) {
1228         if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
1229             return false;
1230         }
1231     } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
1232         return false;
1233     }
1234 
1235     /*
1236      * Svukte extension is qualified only in U or VU-mode.
1237      *
1238      * Effective mode can be switched to U or VU-mode by:
1239      *   - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode.
1240      *   - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0.
1241      *   - U-mode.
1242      *   - VU-mode.
1243      *   - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1.
1244      */
1245     if (mode != PRV_U) {
1246         return false;
1247     }
1248 
1249     return true;
1250 }
1251 
1252 static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
1253 {
1254     /* svukte extension excludes RV32 */
1255     uint32_t sxlen = 32 * riscv_cpu_sxl(env);
1256     uint64_t high_bit = addr & (1UL << (sxlen - 1));
1257     return !high_bit;
1258 }
1259 
1260 /*
1261  * get_physical_address - get the physical address for this virtual address
1262  *
1263  * Do a page table walk to obtain the physical address corresponding to a
1264  * virtual address. Returns 0 if the translation was successful
1265  *
1266  * Adapted from Spike's mmu_t::translate and mmu_t::walk
1267  *
1268  * @env: CPURISCVState
1269  * @physical: This will be set to the calculated physical address
1270  * @prot: The returned protection attributes
1271  * @addr: The virtual address or guest physical address to be translated
1272  * @fault_pte_addr: If not NULL, this will be set to fault pte address
1273  *                  when a error occurs on pte address translation.
1274  *                  This will already be shifted to match htval.
1275  * @access_type: The type of MMU access
1276  * @mmu_idx: Indicates current privilege level
1277  * @first_stage: Are we in first stage translation?
1278  *               Second stage is used for hypervisor guest translation
1279  * @two_stage: Are we going to perform two stage translation
1280  * @is_debug: Is this access from a debugger or the monitor?
1281  */
1282 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
1283                                 int *ret_prot, vaddr addr,
1284                                 target_ulong *fault_pte_addr,
1285                                 int access_type, int mmu_idx,
1286                                 bool first_stage, bool two_stage,
1287                                 bool is_debug, bool is_probe)
1288 {
1289     /*
1290      * NOTE: the env->pc value visible here will not be
1291      * correct, but the value visible to the exception handler
1292      * (riscv_cpu_do_interrupt) is correct
1293      */
1294     MemTxResult res;
1295     MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
1296     int mode = mmuidx_priv(mmu_idx);
1297     bool virt = mmuidx_2stage(mmu_idx);
1298     bool use_background = false;
1299     hwaddr ppn;
1300     int napot_bits = 0;
1301     target_ulong napot_mask;
1302     bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
1303     bool sstack_page = false;
1304 
1305     if (do_svukte_check(env, first_stage, mode, virt) &&
1306         !check_svukte_addr(env, addr)) {
1307         return TRANSLATE_FAIL;
1308     }
1309 
1310     /*
1311      * Check if we should use the background registers for the two
1312      * stage translation. We don't need to check if we actually need
1313      * two stage translation as that happened before this function
1314      * was called. Background registers will be used if the guest has
1315      * forced a two stage translation to be on (in HS or M mode).
1316      */
1317     if (!env->virt_enabled && two_stage) {
1318         use_background = true;
1319     }
1320 
1321     if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
1322         *physical = addr;
1323         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1324         return TRANSLATE_SUCCESS;
1325     }
1326 
1327     *ret_prot = 0;
1328 
1329     hwaddr base;
1330     int levels, ptidxbits, ptesize, vm, widened;
1331 
1332     if (first_stage == true) {
1333         if (use_background) {
1334             if (riscv_cpu_mxl(env) == MXL_RV32) {
1335                 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
1336                 vm = get_field(env->vsatp, SATP32_MODE);
1337             } else {
1338                 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
1339                 vm = get_field(env->vsatp, SATP64_MODE);
1340             }
1341         } else {
1342             if (riscv_cpu_mxl(env) == MXL_RV32) {
1343                 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
1344                 vm = get_field(env->satp, SATP32_MODE);
1345             } else {
1346                 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
1347                 vm = get_field(env->satp, SATP64_MODE);
1348             }
1349         }
1350         widened = 0;
1351     } else {
1352         if (riscv_cpu_mxl(env) == MXL_RV32) {
1353             base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
1354             vm = get_field(env->hgatp, SATP32_MODE);
1355         } else {
1356             base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
1357             vm = get_field(env->hgatp, SATP64_MODE);
1358         }
1359         widened = 2;
1360     }
1361 
1362     switch (vm) {
1363     case VM_1_10_SV32:
1364       levels = 2; ptidxbits = 10; ptesize = 4; break;
1365     case VM_1_10_SV39:
1366       levels = 3; ptidxbits = 9; ptesize = 8; break;
1367     case VM_1_10_SV48:
1368       levels = 4; ptidxbits = 9; ptesize = 8; break;
1369     case VM_1_10_SV57:
1370       levels = 5; ptidxbits = 9; ptesize = 8; break;
1371     case VM_1_10_MBARE:
1372         *physical = addr;
1373         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1374         return TRANSLATE_SUCCESS;
1375     default:
1376       g_assert_not_reached();
1377     }
1378 
1379     CPUState *cs = env_cpu(env);
1380     int va_bits = PGSHIFT + levels * ptidxbits + widened;
1381     int sxlen = 16 << riscv_cpu_sxl(env);
1382     int sxlen_bytes = sxlen / 8;
1383 
1384     if (first_stage == true) {
1385         target_ulong mask, masked_msbs;
1386 
1387         if (sxlen > (va_bits - 1)) {
1388             mask = (1L << (sxlen - (va_bits - 1))) - 1;
1389         } else {
1390             mask = 0;
1391         }
1392         masked_msbs = (addr >> (va_bits - 1)) & mask;
1393 
1394         if (masked_msbs != 0 && masked_msbs != mask) {
1395             return TRANSLATE_FAIL;
1396         }
1397     } else {
1398         if (vm != VM_1_10_SV32 && addr >> va_bits != 0) {
1399             return TRANSLATE_FAIL;
1400         }
1401     }
1402 
1403     bool pbmte = env->menvcfg & MENVCFG_PBMTE;
1404     bool svade = riscv_cpu_cfg(env)->ext_svade;
1405     bool svadu = riscv_cpu_cfg(env)->ext_svadu;
1406     bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade;
1407 
1408     if (first_stage && two_stage && env->virt_enabled) {
1409         pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
1410         adue = adue && (env->henvcfg & HENVCFG_ADUE);
1411     }
1412 
1413     int ptshift = (levels - 1) * ptidxbits;
1414     target_ulong pte;
1415     hwaddr pte_addr;
1416     int i;
1417 
1418  restart:
1419     for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
1420         target_ulong idx;
1421         if (i == 0) {
1422             idx = (addr >> (PGSHIFT + ptshift)) &
1423                            ((1 << (ptidxbits + widened)) - 1);
1424         } else {
1425             idx = (addr >> (PGSHIFT + ptshift)) &
1426                            ((1 << ptidxbits) - 1);
1427         }
1428 
1429         /* check that physical address of PTE is legal */
1430 
1431         if (two_stage && first_stage) {
1432             int vbase_prot;
1433             hwaddr vbase;
1434 
1435             /* Do the second stage translation on the base PTE address. */
1436             int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
1437                                                  base, NULL, MMU_DATA_LOAD,
1438                                                  MMUIdx_U, false, true,
1439                                                  is_debug, false);
1440 
1441             if (vbase_ret != TRANSLATE_SUCCESS) {
1442                 if (fault_pte_addr) {
1443                     *fault_pte_addr = (base + idx * ptesize) >> 2;
1444                 }
1445                 return TRANSLATE_G_STAGE_FAIL;
1446             }
1447 
1448             pte_addr = vbase + idx * ptesize;
1449         } else {
1450             pte_addr = base + idx * ptesize;
1451         }
1452 
1453         int pmp_prot;
1454         int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
1455                                                sxlen_bytes,
1456                                                MMU_DATA_LOAD, PRV_S);
1457         if (pmp_ret != TRANSLATE_SUCCESS) {
1458             return TRANSLATE_PMP_FAIL;
1459         }
1460 
1461         if (riscv_cpu_mxl(env) == MXL_RV32) {
1462             pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
1463         } else {
1464             pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
1465         }
1466 
1467         if (res != MEMTX_OK) {
1468             return TRANSLATE_FAIL;
1469         }
1470 
1471         if (riscv_cpu_sxl(env) == MXL_RV32) {
1472             ppn = pte >> PTE_PPN_SHIFT;
1473         } else {
1474             if (pte & PTE_RESERVED) {
1475                 qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
1476                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1477                               __func__, pte_addr, pte);
1478                 return TRANSLATE_FAIL;
1479             }
1480 
1481             if (!pbmte && (pte & PTE_PBMT)) {
1482                 /* Reserved without Svpbmt. */
1483                 qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1484                               "and Svpbmt extension is disabled: "
1485                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1486                               __func__, pte_addr, pte);
1487                 return TRANSLATE_FAIL;
1488             }
1489 
1490             if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1491                 /* Reserved without Svnapot extension */
1492                 qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
1493                               "and Svnapot extension is disabled: "
1494                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1495                               __func__, pte_addr, pte);
1496                 return TRANSLATE_FAIL;
1497             }
1498 
1499             ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
1500         }
1501 
1502         if (!(pte & PTE_V)) {
1503             /* Invalid PTE */
1504             return TRANSLATE_FAIL;
1505         }
1506 
1507         if (pte & (PTE_R | PTE_W | PTE_X)) {
1508             goto leaf;
1509         }
1510 
1511         if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
1512             /* D, A, and U bits are reserved in non-leaf/inner PTEs */
1513             qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
1514                           "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1515                           __func__, pte_addr, pte);
1516             return TRANSLATE_FAIL;
1517         }
1518         /* Inner PTE, continue walking */
1519         base = ppn << PGSHIFT;
1520     }
1521 
1522     /* No leaf pte at any translation level. */
1523     return TRANSLATE_FAIL;
1524 
1525  leaf:
1526     if (ppn & ((1ULL << ptshift) - 1)) {
1527         /* Misaligned PPN */
1528         qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
1529                       "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1530                       __func__, pte_addr, pte);
1531         return TRANSLATE_FAIL;
1532     }
1533     if (!pbmte && (pte & PTE_PBMT)) {
1534         /* Reserved without Svpbmt. */
1535         qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1536                       "and Svpbmt extension is disabled: "
1537                       "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1538                       __func__, pte_addr, pte);
1539         return TRANSLATE_FAIL;
1540     }
1541 
1542     target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
1543     /* Check for reserved combinations of RWX flags. */
1544     switch (rwx) {
1545     case PTE_W | PTE_X:
1546         return TRANSLATE_FAIL;
1547     case PTE_W:
1548         /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
1549         if (cpu_get_bcfien(env) && first_stage) {
1550             sstack_page = true;
1551             /*
1552              * if ss index, read and write allowed. else if not a probe
1553              * then only read allowed
1554              */
1555             rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 :  PTE_R);
1556             break;
1557         }
1558         return TRANSLATE_FAIL;
1559     case PTE_R:
1560         /*
1561          * no matter what's the `access_type`, shadow stack access to readonly
1562          * memory are always store page faults. During unwind, loads will be
1563          * promoted as store fault.
1564          */
1565         if (is_sstack_idx) {
1566             return TRANSLATE_FAIL;
1567         }
1568         break;
1569     }
1570 
1571     int prot = 0;
1572     if (rwx & PTE_R) {
1573         prot |= PAGE_READ;
1574     }
1575     if (rwx & PTE_W) {
1576         prot |= PAGE_WRITE;
1577     }
1578     if (rwx & PTE_X) {
1579         bool mxr = false;
1580 
1581         /*
1582          * Use mstatus for first stage or for the second stage without
1583          * virt_enabled (MPRV+MPV)
1584          */
1585         if (first_stage || !env->virt_enabled) {
1586             mxr = get_field(env->mstatus, MSTATUS_MXR);
1587         }
1588 
1589         /* MPRV+MPV case, check VSSTATUS */
1590         if (first_stage && two_stage && !env->virt_enabled) {
1591             mxr |= get_field(env->vsstatus, MSTATUS_MXR);
1592         }
1593 
1594         /*
1595          * Setting MXR at HS-level overrides both VS-stage and G-stage
1596          * execute-only permissions
1597          */
1598         if (env->virt_enabled) {
1599             mxr |= get_field(env->mstatus_hs, MSTATUS_MXR);
1600         }
1601 
1602         if (mxr) {
1603             prot |= PAGE_READ;
1604         }
1605         prot |= PAGE_EXEC;
1606     }
1607 
1608     if (pte & PTE_U) {
1609         if (mode != PRV_U) {
1610             if (!mmuidx_sum(mmu_idx)) {
1611                 return TRANSLATE_FAIL;
1612             }
1613             /* SUM allows only read+write, not execute. */
1614             prot &= PAGE_READ | PAGE_WRITE;
1615         }
1616     } else if (mode != PRV_S) {
1617         /* Supervisor PTE flags when not S mode */
1618         return TRANSLATE_FAIL;
1619     }
1620 
1621     if (!((prot >> access_type) & 1)) {
1622         /*
1623          * Access check failed, access check failures for shadow stack are
1624          * access faults.
1625          */
1626         return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
1627     }
1628 
1629     target_ulong updated_pte = pte;
1630 
1631     /*
1632      * If ADUE is enabled, set accessed and dirty bits.
1633      * Otherwise raise an exception if necessary.
1634      */
1635     if (adue) {
1636         updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0);
1637     } else if (!(pte & PTE_A) ||
1638                (access_type == MMU_DATA_STORE && !(pte & PTE_D))) {
1639         return TRANSLATE_FAIL;
1640     }
1641 
1642     /* Page table updates need to be atomic with MTTCG enabled */
1643     if (updated_pte != pte && !is_debug) {
1644         if (!adue) {
1645             return TRANSLATE_FAIL;
1646         }
1647 
1648         /*
1649          * - if accessed or dirty bits need updating, and the PTE is
1650          *   in RAM, then we do so atomically with a compare and swap.
1651          * - if the PTE is in IO space or ROM, then it can't be updated
1652          *   and we return TRANSLATE_FAIL.
1653          * - if the PTE changed by the time we went to update it, then
1654          *   it is no longer valid and we must re-walk the page table.
1655          */
1656         MemoryRegion *mr;
1657         hwaddr l = sxlen_bytes, addr1;
1658         mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
1659                                      false, MEMTXATTRS_UNSPECIFIED);
1660         if (memory_region_is_ram(mr)) {
1661             target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
1662             target_ulong old_pte;
1663             if (riscv_cpu_sxl(env) == MXL_RV32) {
1664                 old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
1665             } else {
1666                 old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
1667             }
1668             if (old_pte != pte) {
1669                 goto restart;
1670             }
1671             pte = updated_pte;
1672         } else {
1673             /*
1674              * Misconfigured PTE in ROM (AD bits are not preset) or
1675              * PTE is in IO space and can't be updated atomically.
1676              */
1677             return TRANSLATE_FAIL;
1678         }
1679     }
1680 
1681     /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
1682     target_ulong vpn = addr >> PGSHIFT;
1683 
1684     if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1685         napot_bits = ctzl(ppn) + 1;
1686         if ((i != (levels - 1)) || (napot_bits != 4)) {
1687             return TRANSLATE_FAIL;
1688         }
1689     }
1690 
1691     napot_mask = (1 << napot_bits) - 1;
1692     *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1693                   (vpn & (((target_ulong)1 << ptshift) - 1))
1694                  ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1695 
1696     /*
1697      * Remove write permission unless this is a store, or the page is
1698      * already dirty, so that we TLB miss on later writes to update
1699      * the dirty bit.
1700      */
1701     if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) {
1702         prot &= ~PAGE_WRITE;
1703     }
1704     *ret_prot = prot;
1705 
1706     return TRANSLATE_SUCCESS;
1707 }
1708 
1709 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1710                                 MMUAccessType access_type, bool pmp_violation,
1711                                 bool first_stage, bool two_stage,
1712                                 bool two_stage_indirect)
1713 {
1714     CPUState *cs = env_cpu(env);
1715 
1716     switch (access_type) {
1717     case MMU_INST_FETCH:
1718         if (pmp_violation) {
1719             cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1720         } else if (env->virt_enabled && !first_stage) {
1721             cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1722         } else {
1723             cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
1724         }
1725         break;
1726     case MMU_DATA_LOAD:
1727         if (pmp_violation) {
1728             cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1729         } else if (two_stage && !first_stage) {
1730             cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1731         } else {
1732             cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
1733         }
1734         break;
1735     case MMU_DATA_STORE:
1736         if (pmp_violation) {
1737             cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1738         } else if (two_stage && !first_stage) {
1739             cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1740         } else {
1741             cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
1742         }
1743         break;
1744     default:
1745         g_assert_not_reached();
1746     }
1747     env->badaddr = address;
1748     env->two_stage_lookup = two_stage;
1749     env->two_stage_indirect_lookup = two_stage_indirect;
1750 }
1751 
1752 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1753 {
1754     RISCVCPU *cpu = RISCV_CPU(cs);
1755     CPURISCVState *env = &cpu->env;
1756     hwaddr phys_addr;
1757     int prot;
1758     int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
1759 
1760     if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1761                              true, env->virt_enabled, true, false)) {
1762         return -1;
1763     }
1764 
1765     if (env->virt_enabled) {
1766         if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1767                                  0, MMUIdx_U, false, true, true, false)) {
1768             return -1;
1769         }
1770     }
1771 
1772     return phys_addr & TARGET_PAGE_MASK;
1773 }
1774 
1775 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1776                                      vaddr addr, unsigned size,
1777                                      MMUAccessType access_type,
1778                                      int mmu_idx, MemTxAttrs attrs,
1779                                      MemTxResult response, uintptr_t retaddr)
1780 {
1781     RISCVCPU *cpu = RISCV_CPU(cs);
1782     CPURISCVState *env = &cpu->env;
1783 
1784     if (access_type == MMU_DATA_STORE) {
1785         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1786     } else if (access_type == MMU_DATA_LOAD) {
1787         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1788     } else {
1789         cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1790     }
1791 
1792     env->badaddr = addr;
1793     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1794     env->two_stage_indirect_lookup = false;
1795     cpu_loop_exit_restore(cs, retaddr);
1796 }
1797 
1798 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1799                                    MMUAccessType access_type, int mmu_idx,
1800                                    uintptr_t retaddr)
1801 {
1802     RISCVCPU *cpu = RISCV_CPU(cs);
1803     CPURISCVState *env = &cpu->env;
1804     switch (access_type) {
1805     case MMU_INST_FETCH:
1806         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1807         break;
1808     case MMU_DATA_LOAD:
1809         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1810         /* shadow stack mis aligned accesses are access faults */
1811         if (mmu_idx & MMU_IDX_SS_WRITE) {
1812             cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1813         }
1814         break;
1815     case MMU_DATA_STORE:
1816         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1817         /* shadow stack mis aligned accesses are access faults */
1818         if (mmu_idx & MMU_IDX_SS_WRITE) {
1819             cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1820         }
1821         break;
1822     default:
1823         g_assert_not_reached();
1824     }
1825     env->badaddr = addr;
1826     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1827     env->two_stage_indirect_lookup = false;
1828     cpu_loop_exit_restore(cs, retaddr);
1829 }
1830 
1831 
1832 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1833 {
1834     enum riscv_pmu_event_idx pmu_event_type;
1835 
1836     switch (access_type) {
1837     case MMU_INST_FETCH:
1838         pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1839         break;
1840     case MMU_DATA_LOAD:
1841         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1842         break;
1843     case MMU_DATA_STORE:
1844         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1845         break;
1846     default:
1847         return;
1848     }
1849 
1850     riscv_pmu_incr_ctr(cpu, pmu_event_type);
1851 }
1852 
1853 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1854                         MMUAccessType access_type, int mmu_idx,
1855                         bool probe, uintptr_t retaddr)
1856 {
1857     RISCVCPU *cpu = RISCV_CPU(cs);
1858     CPURISCVState *env = &cpu->env;
1859     vaddr im_address;
1860     hwaddr pa = 0;
1861     int prot, prot2, prot_pmp;
1862     bool pmp_violation = false;
1863     bool first_stage_error = true;
1864     bool two_stage_lookup = mmuidx_2stage(mmu_idx);
1865     bool two_stage_indirect_error = false;
1866     int ret = TRANSLATE_FAIL;
1867     int mode = mmuidx_priv(mmu_idx);
1868     /* default TLB page size */
1869     hwaddr tlb_size = TARGET_PAGE_SIZE;
1870 
1871     env->guest_phys_fault_addr = 0;
1872 
1873     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1874                   __func__, address, access_type, mmu_idx);
1875 
1876     pmu_tlb_fill_incr_ctr(cpu, access_type);
1877     if (two_stage_lookup) {
1878         /* Two stage lookup */
1879         ret = get_physical_address(env, &pa, &prot, address,
1880                                    &env->guest_phys_fault_addr, access_type,
1881                                    mmu_idx, true, true, false, probe);
1882 
1883         /*
1884          * A G-stage exception may be triggered during two state lookup.
1885          * And the env->guest_phys_fault_addr has already been set in
1886          * get_physical_address().
1887          */
1888         if (ret == TRANSLATE_G_STAGE_FAIL) {
1889             first_stage_error = false;
1890             two_stage_indirect_error = true;
1891         }
1892 
1893         qemu_log_mask(CPU_LOG_MMU,
1894                       "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1895                       HWADDR_FMT_plx " prot %d\n",
1896                       __func__, address, ret, pa, prot);
1897 
1898         if (ret == TRANSLATE_SUCCESS) {
1899             /* Second stage lookup */
1900             im_address = pa;
1901 
1902             ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1903                                        access_type, MMUIdx_U, false, true,
1904                                        false, probe);
1905 
1906             qemu_log_mask(CPU_LOG_MMU,
1907                           "%s 2nd-stage address=%" VADDR_PRIx
1908                           " ret %d physical "
1909                           HWADDR_FMT_plx " prot %d\n",
1910                           __func__, im_address, ret, pa, prot2);
1911 
1912             prot &= prot2;
1913 
1914             if (ret == TRANSLATE_SUCCESS) {
1915                 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1916                                                size, access_type, mode);
1917                 tlb_size = pmp_get_tlb_size(env, pa);
1918 
1919                 qemu_log_mask(CPU_LOG_MMU,
1920                               "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1921                               " %d tlb_size %" HWADDR_PRIu "\n",
1922                               __func__, pa, ret, prot_pmp, tlb_size);
1923 
1924                 prot &= prot_pmp;
1925             } else {
1926                 /*
1927                  * Guest physical address translation failed, this is a HS
1928                  * level exception
1929                  */
1930                 first_stage_error = false;
1931                 if (ret != TRANSLATE_PMP_FAIL) {
1932                     env->guest_phys_fault_addr = (im_address |
1933                                                   (address &
1934                                                    (TARGET_PAGE_SIZE - 1))) >> 2;
1935                 }
1936             }
1937         }
1938     } else {
1939         /* Single stage lookup */
1940         ret = get_physical_address(env, &pa, &prot, address, NULL,
1941                                    access_type, mmu_idx, true, false, false,
1942                                    probe);
1943 
1944         qemu_log_mask(CPU_LOG_MMU,
1945                       "%s address=%" VADDR_PRIx " ret %d physical "
1946                       HWADDR_FMT_plx " prot %d\n",
1947                       __func__, address, ret, pa, prot);
1948 
1949         if (ret == TRANSLATE_SUCCESS) {
1950             ret = get_physical_address_pmp(env, &prot_pmp, pa,
1951                                            size, access_type, mode);
1952             tlb_size = pmp_get_tlb_size(env, pa);
1953 
1954             qemu_log_mask(CPU_LOG_MMU,
1955                           "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1956                           " %d tlb_size %" HWADDR_PRIu "\n",
1957                           __func__, pa, ret, prot_pmp, tlb_size);
1958 
1959             prot &= prot_pmp;
1960         }
1961     }
1962 
1963     if (ret == TRANSLATE_PMP_FAIL) {
1964         pmp_violation = true;
1965     }
1966 
1967     if (ret == TRANSLATE_SUCCESS) {
1968         tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1969                      prot, mmu_idx, tlb_size);
1970         return true;
1971     } else if (probe) {
1972         return false;
1973     } else {
1974         int wp_access = 0;
1975 
1976         if (access_type == MMU_DATA_LOAD) {
1977             wp_access |= BP_MEM_READ;
1978         } else if (access_type == MMU_DATA_STORE) {
1979             wp_access |= BP_MEM_WRITE;
1980         }
1981 
1982         /*
1983          * If a watchpoint isn't found for 'addr' this will
1984          * be a no-op and we'll resume the mmu_exception path.
1985          * Otherwise we'll throw a debug exception and execution
1986          * will continue elsewhere.
1987          */
1988         cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
1989                              wp_access, retaddr);
1990 
1991         raise_mmu_exception(env, address, access_type, pmp_violation,
1992                             first_stage_error, two_stage_lookup,
1993                             two_stage_indirect_error);
1994         cpu_loop_exit_restore(cs, retaddr);
1995     }
1996 
1997     return true;
1998 }
1999 
2000 static target_ulong riscv_transformed_insn(CPURISCVState *env,
2001                                            target_ulong insn,
2002                                            target_ulong taddr)
2003 {
2004     target_ulong xinsn = 0;
2005     target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
2006 
2007     /*
2008      * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
2009      * be uncompressed. The Quadrant 1 of RVC instruction space need
2010      * not be transformed because these instructions won't generate
2011      * any load/store trap.
2012      */
2013 
2014     if ((insn & 0x3) != 0x3) {
2015         /* Transform 16bit instruction into 32bit instruction */
2016         switch (GET_C_OP(insn)) {
2017         case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */
2018             switch (GET_C_FUNC(insn)) {
2019             case OPC_RISC_C_FUNC_FLD_LQ:
2020                 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */
2021                     xinsn = OPC_RISC_FLD;
2022                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2023                     access_rs1 = GET_C_RS1S(insn);
2024                     access_imm = GET_C_LD_IMM(insn);
2025                     access_size = 8;
2026                 }
2027                 break;
2028             case OPC_RISC_C_FUNC_LW: /* C.LW */
2029                 xinsn = OPC_RISC_LW;
2030                 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2031                 access_rs1 = GET_C_RS1S(insn);
2032                 access_imm = GET_C_LW_IMM(insn);
2033                 access_size = 4;
2034                 break;
2035             case OPC_RISC_C_FUNC_FLW_LD:
2036                 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */
2037                     xinsn = OPC_RISC_FLW;
2038                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2039                     access_rs1 = GET_C_RS1S(insn);
2040                     access_imm = GET_C_LW_IMM(insn);
2041                     access_size = 4;
2042                 } else { /* C.LD (RV64/RV128) */
2043                     xinsn = OPC_RISC_LD;
2044                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2045                     access_rs1 = GET_C_RS1S(insn);
2046                     access_imm = GET_C_LD_IMM(insn);
2047                     access_size = 8;
2048                 }
2049                 break;
2050             case OPC_RISC_C_FUNC_FSD_SQ:
2051                 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */
2052                     xinsn = OPC_RISC_FSD;
2053                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2054                     access_rs1 = GET_C_RS1S(insn);
2055                     access_imm = GET_C_SD_IMM(insn);
2056                     access_size = 8;
2057                 }
2058                 break;
2059             case OPC_RISC_C_FUNC_SW: /* C.SW */
2060                 xinsn = OPC_RISC_SW;
2061                 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2062                 access_rs1 = GET_C_RS1S(insn);
2063                 access_imm = GET_C_SW_IMM(insn);
2064                 access_size = 4;
2065                 break;
2066             case OPC_RISC_C_FUNC_FSW_SD:
2067                 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */
2068                     xinsn = OPC_RISC_FSW;
2069                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2070                     access_rs1 = GET_C_RS1S(insn);
2071                     access_imm = GET_C_SW_IMM(insn);
2072                     access_size = 4;
2073                 } else { /* C.SD (RV64/RV128) */
2074                     xinsn = OPC_RISC_SD;
2075                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2076                     access_rs1 = GET_C_RS1S(insn);
2077                     access_imm = GET_C_SD_IMM(insn);
2078                     access_size = 8;
2079                 }
2080                 break;
2081             default:
2082                 break;
2083             }
2084             break;
2085         case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */
2086             switch (GET_C_FUNC(insn)) {
2087             case OPC_RISC_C_FUNC_FLDSP_LQSP:
2088                 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */
2089                     xinsn = OPC_RISC_FLD;
2090                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2091                     access_rs1 = 2;
2092                     access_imm = GET_C_LDSP_IMM(insn);
2093                     access_size = 8;
2094                 }
2095                 break;
2096             case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */
2097                 xinsn = OPC_RISC_LW;
2098                 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2099                 access_rs1 = 2;
2100                 access_imm = GET_C_LWSP_IMM(insn);
2101                 access_size = 4;
2102                 break;
2103             case OPC_RISC_C_FUNC_FLWSP_LDSP:
2104                 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */
2105                     xinsn = OPC_RISC_FLW;
2106                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2107                     access_rs1 = 2;
2108                     access_imm = GET_C_LWSP_IMM(insn);
2109                     access_size = 4;
2110                 } else { /* C.LDSP (RV64/RV128) */
2111                     xinsn = OPC_RISC_LD;
2112                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2113                     access_rs1 = 2;
2114                     access_imm = GET_C_LDSP_IMM(insn);
2115                     access_size = 8;
2116                 }
2117                 break;
2118             case OPC_RISC_C_FUNC_FSDSP_SQSP:
2119                 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */
2120                     xinsn = OPC_RISC_FSD;
2121                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2122                     access_rs1 = 2;
2123                     access_imm = GET_C_SDSP_IMM(insn);
2124                     access_size = 8;
2125                 }
2126                 break;
2127             case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */
2128                 xinsn = OPC_RISC_SW;
2129                 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2130                 access_rs1 = 2;
2131                 access_imm = GET_C_SWSP_IMM(insn);
2132                 access_size = 4;
2133                 break;
2134             case 7:
2135                 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */
2136                     xinsn = OPC_RISC_FSW;
2137                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2138                     access_rs1 = 2;
2139                     access_imm = GET_C_SWSP_IMM(insn);
2140                     access_size = 4;
2141                 } else { /* C.SDSP (RV64/RV128) */
2142                     xinsn = OPC_RISC_SD;
2143                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2144                     access_rs1 = 2;
2145                     access_imm = GET_C_SDSP_IMM(insn);
2146                     access_size = 8;
2147                 }
2148                 break;
2149             default:
2150                 break;
2151             }
2152             break;
2153         default:
2154             break;
2155         }
2156 
2157         /*
2158          * Clear Bit1 of transformed instruction to indicate that
2159          * original insruction was a 16bit instruction
2160          */
2161         xinsn &= ~((target_ulong)0x2);
2162     } else {
2163         /* Transform 32bit (or wider) instructions */
2164         switch (MASK_OP_MAJOR(insn)) {
2165         case OPC_RISC_ATOMIC:
2166             xinsn = insn;
2167             access_rs1 = GET_RS1(insn);
2168             access_size = 1 << GET_FUNCT3(insn);
2169             break;
2170         case OPC_RISC_LOAD:
2171         case OPC_RISC_FP_LOAD:
2172             xinsn = SET_I_IMM(insn, 0);
2173             access_rs1 = GET_RS1(insn);
2174             access_imm = GET_IMM(insn);
2175             access_size = 1 << GET_FUNCT3(insn);
2176             break;
2177         case OPC_RISC_STORE:
2178         case OPC_RISC_FP_STORE:
2179             xinsn = SET_S_IMM(insn, 0);
2180             access_rs1 = GET_RS1(insn);
2181             access_imm = GET_STORE_IMM(insn);
2182             access_size = 1 << GET_FUNCT3(insn);
2183             break;
2184         case OPC_RISC_SYSTEM:
2185             if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
2186                 xinsn = insn;
2187                 access_rs1 = GET_RS1(insn);
2188                 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
2189                 access_size = 1 << access_size;
2190             }
2191             break;
2192         default:
2193             break;
2194         }
2195     }
2196 
2197     if (access_size) {
2198         xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
2199                                (access_size - 1));
2200     }
2201 
2202     return xinsn;
2203 }
2204 
2205 static target_ulong promote_load_fault(target_ulong orig_cause)
2206 {
2207     switch (orig_cause) {
2208     case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2209         return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
2210 
2211     case RISCV_EXCP_LOAD_ACCESS_FAULT:
2212         return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
2213 
2214     case RISCV_EXCP_LOAD_PAGE_FAULT:
2215         return RISCV_EXCP_STORE_PAGE_FAULT;
2216     }
2217 
2218     /* if no promotion, return original cause */
2219     return orig_cause;
2220 }
2221 
2222 static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
2223 {
2224     env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
2225     env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
2226     env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
2227     env->mncause = cause;
2228     env->mnepc = env->pc;
2229     env->pc = env->rnmi_irqvec;
2230 
2231     if (cpu_get_fcfien(env)) {
2232         env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
2233     }
2234 
2235     /* Trapping to M mode, virt is disabled */
2236     riscv_cpu_set_mode(env, PRV_M, false);
2237 }
2238 
2239 /*
2240  * Handle Traps
2241  *
2242  * Adapted from Spike's processor_t::take_trap.
2243  *
2244  */
2245 void riscv_cpu_do_interrupt(CPUState *cs)
2246 {
2247     RISCVCPU *cpu = RISCV_CPU(cs);
2248     CPURISCVState *env = &cpu->env;
2249     bool virt = env->virt_enabled;
2250     bool write_gva = false;
2251     bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
2252     bool vsmode_exc;
2253     uint64_t s;
2254     int mode;
2255 
2256     /*
2257      * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
2258      * so we mask off the MSB and separate into trap type and cause.
2259      */
2260     bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
2261     target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
2262     uint64_t deleg = async ? env->mideleg : env->medeleg;
2263     bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
2264         !(env->mip & (1ULL << cause));
2265     bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
2266         !(env->mip & (1ULL << cause));
2267     bool smode_double_trap = false;
2268     uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
2269     const bool prev_virt = env->virt_enabled;
2270     const target_ulong prev_priv = env->priv;
2271     target_ulong tval = 0;
2272     target_ulong tinst = 0;
2273     target_ulong htval = 0;
2274     target_ulong mtval2 = 0;
2275     target_ulong src;
2276     int sxlen = 0;
2277     int mxlen = 16 << riscv_cpu_mxl(env);
2278     bool nnmi_excep = false;
2279 
2280     if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
2281         riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
2282                      env->virt_enabled);
2283         return;
2284     }
2285 
2286     if (!async) {
2287         /* set tval to badaddr for traps with address information */
2288         switch (cause) {
2289 #ifdef CONFIG_TCG
2290         case RISCV_EXCP_SEMIHOST:
2291             do_common_semihosting(cs);
2292             env->pc += 4;
2293             return;
2294 #endif
2295         case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2296         case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
2297         case RISCV_EXCP_LOAD_ADDR_MIS:
2298         case RISCV_EXCP_STORE_AMO_ADDR_MIS:
2299         case RISCV_EXCP_LOAD_ACCESS_FAULT:
2300         case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
2301         case RISCV_EXCP_LOAD_PAGE_FAULT:
2302         case RISCV_EXCP_STORE_PAGE_FAULT:
2303             if (always_storeamo) {
2304                 cause = promote_load_fault(cause);
2305             }
2306             write_gva = env->two_stage_lookup;
2307             tval = env->badaddr;
2308             if (env->two_stage_indirect_lookup) {
2309                 /*
2310                  * special pseudoinstruction for G-stage fault taken while
2311                  * doing VS-stage page table walk.
2312                  */
2313                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2314             } else {
2315                 /*
2316                  * The "Addr. Offset" field in transformed instruction is
2317                  * non-zero only for misaligned access.
2318                  */
2319                 tinst = riscv_transformed_insn(env, env->bins, tval);
2320             }
2321             break;
2322         case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
2323         case RISCV_EXCP_INST_ADDR_MIS:
2324         case RISCV_EXCP_INST_ACCESS_FAULT:
2325         case RISCV_EXCP_INST_PAGE_FAULT:
2326             write_gva = env->two_stage_lookup;
2327             tval = env->badaddr;
2328             if (env->two_stage_indirect_lookup) {
2329                 /*
2330                  * special pseudoinstruction for G-stage fault taken while
2331                  * doing VS-stage page table walk.
2332                  */
2333                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2334             }
2335             break;
2336         case RISCV_EXCP_ILLEGAL_INST:
2337         case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
2338             tval = env->bins;
2339             break;
2340         case RISCV_EXCP_BREAKPOINT:
2341             tval = env->badaddr;
2342             if (cs->watchpoint_hit) {
2343                 tval = cs->watchpoint_hit->hitaddr;
2344                 cs->watchpoint_hit = NULL;
2345             }
2346             break;
2347         case RISCV_EXCP_SW_CHECK:
2348             tval = env->sw_check_code;
2349             break;
2350         default:
2351             break;
2352         }
2353         /* ecall is dispatched as one cause so translate based on mode */
2354         if (cause == RISCV_EXCP_U_ECALL) {
2355             assert(env->priv <= 3);
2356 
2357             if (env->priv == PRV_M) {
2358                 cause = RISCV_EXCP_M_ECALL;
2359             } else if (env->priv == PRV_S && env->virt_enabled) {
2360                 cause = RISCV_EXCP_VS_ECALL;
2361             } else if (env->priv == PRV_S && !env->virt_enabled) {
2362                 cause = RISCV_EXCP_S_ECALL;
2363             } else if (env->priv == PRV_U) {
2364                 cause = RISCV_EXCP_U_ECALL;
2365             }
2366         }
2367     }
2368 
2369     trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
2370                      riscv_cpu_get_trap_name(cause, async));
2371 
2372     qemu_log_mask(CPU_LOG_INT,
2373                   "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
2374                   "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
2375                   __func__, env->mhartid, async, cause, env->pc, tval,
2376                   riscv_cpu_get_trap_name(cause, async));
2377 
2378     mode = env->priv <= PRV_S && cause < 64 &&
2379         (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
2380 
2381     vsmode_exc = env->virt_enabled && cause < 64 &&
2382         (((hdeleg >> cause) & 1) || vs_injected);
2383 
2384     /*
2385      * Check double trap condition only if already in S-mode and targeting
2386      * S-mode
2387      */
2388     if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
2389         bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
2390         bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
2391         /* In VS or HS */
2392         if (riscv_has_ext(env, RVH)) {
2393             if (vsmode_exc) {
2394                 /* VS -> VS, use henvcfg instead of menvcfg*/
2395                 dte = (env->henvcfg & HENVCFG_DTE) != 0;
2396             } else if (env->virt_enabled) {
2397                 /* VS -> HS, use mstatus_hs */
2398                 sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
2399             }
2400         }
2401         smode_double_trap = dte && sdt;
2402         if (smode_double_trap) {
2403             mode = PRV_M;
2404         }
2405     }
2406 
2407     if (mode == PRV_S) {
2408         /* handle the trap in S-mode */
2409         /* save elp status */
2410         if (cpu_get_fcfien(env)) {
2411             env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
2412         }
2413 
2414         if (riscv_has_ext(env, RVH)) {
2415             if (vsmode_exc) {
2416                 /* Trap to VS mode */
2417                 /*
2418                  * See if we need to adjust cause. Yes if its VS mode interrupt
2419                  * no if hypervisor has delegated one of hs mode's interrupt
2420                  */
2421                 if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
2422                               cause == IRQ_VS_EXT)) {
2423                     cause = cause - 1;
2424                 }
2425                 write_gva = false;
2426             } else if (env->virt_enabled) {
2427                 /* Trap into HS mode, from virt */
2428                 riscv_cpu_swap_hypervisor_regs(env);
2429                 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
2430                                          env->priv);
2431                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true);
2432 
2433                 htval = env->guest_phys_fault_addr;
2434 
2435                 virt = false;
2436             } else {
2437                 /* Trap into HS mode */
2438                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
2439                 htval = env->guest_phys_fault_addr;
2440             }
2441             env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
2442         }
2443 
2444         s = env->mstatus;
2445         s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
2446         s = set_field(s, MSTATUS_SPP, env->priv);
2447         s = set_field(s, MSTATUS_SIE, 0);
2448         if (riscv_env_smode_dbltrp_enabled(env, virt)) {
2449             s = set_field(s, MSTATUS_SDT, 1);
2450         }
2451         env->mstatus = s;
2452         sxlen = 16 << riscv_cpu_sxl(env);
2453         env->scause = cause | ((target_ulong)async << (sxlen - 1));
2454         env->sepc = env->pc;
2455         env->stval = tval;
2456         env->htval = htval;
2457         env->htinst = tinst;
2458         env->pc = (env->stvec >> 2 << 2) +
2459                   ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
2460         riscv_cpu_set_mode(env, PRV_S, virt);
2461 
2462         src = env->sepc;
2463     } else {
2464         /*
2465          * If the hart encounters an exception while executing in M-mode
2466          * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
2467          */
2468         nnmi_excep = cpu->cfg.ext_smrnmi &&
2469                      !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2470                      !async;
2471 
2472         /* handle the trap in M-mode */
2473         /* save elp status */
2474         if (cpu_get_fcfien(env)) {
2475             if (nnmi_excep) {
2476                 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
2477                                           env->elp);
2478             } else {
2479                 env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
2480             }
2481         }
2482 
2483         if (riscv_has_ext(env, RVH)) {
2484             if (env->virt_enabled) {
2485                 riscv_cpu_swap_hypervisor_regs(env);
2486             }
2487             env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
2488                                      env->virt_enabled);
2489             if (env->virt_enabled && tval) {
2490                 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
2491             }
2492 
2493             mtval2 = env->guest_phys_fault_addr;
2494 
2495             /* Trapping to M mode, virt is disabled */
2496             virt = false;
2497         }
2498         /*
2499          * If the hart encounters an exception while executing in M-mode,
2500          * with the mnstatus.NMIE bit clear, the program counter is set to
2501          * the RNMI exception trap handler address.
2502          */
2503         nnmi_excep = cpu->cfg.ext_smrnmi &&
2504                      !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2505                      !async;
2506 
2507         s = env->mstatus;
2508         s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
2509         s = set_field(s, MSTATUS_MPP, env->priv);
2510         s = set_field(s, MSTATUS_MIE, 0);
2511         if (cpu->cfg.ext_smdbltrp) {
2512             if (env->mstatus & MSTATUS_MDT) {
2513                 assert(env->priv == PRV_M);
2514                 if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
2515                     cpu_abort(CPU(cpu), "M-mode double trap\n");
2516                 } else {
2517                     riscv_do_nmi(env, cause, false);
2518                     return;
2519                 }
2520             }
2521 
2522             s = set_field(s, MSTATUS_MDT, 1);
2523         }
2524         env->mstatus = s;
2525         env->mcause = cause | ((target_ulong)async << (mxlen - 1));
2526         if (smode_double_trap) {
2527             env->mtval2 = env->mcause;
2528             env->mcause = RISCV_EXCP_DOUBLE_TRAP;
2529         } else {
2530             env->mtval2 = mtval2;
2531         }
2532         env->mepc = env->pc;
2533         env->mtval = tval;
2534         env->mtinst = tinst;
2535 
2536         /*
2537          * For RNMI exception, program counter is set to the RNMI exception
2538          * trap handler address.
2539          */
2540         if (nnmi_excep) {
2541             env->pc = env->rnmi_excpvec;
2542         } else {
2543             env->pc = (env->mtvec >> 2 << 2) +
2544                       ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
2545         }
2546         riscv_cpu_set_mode(env, PRV_M, virt);
2547         src = env->mepc;
2548     }
2549 
2550     if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
2551         if (async && cause == IRQ_PMU_OVF) {
2552             riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
2553         } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
2554             riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
2555         }
2556 
2557         riscv_ctr_add_entry(env, src, env->pc,
2558                         async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
2559                         prev_priv, prev_virt);
2560     }
2561 
2562     /*
2563      * Interrupt/exception/trap delivery is asynchronous event and as per
2564      * zicfilp spec CPU should clear up the ELP state. No harm in clearing
2565      * unconditionally.
2566      */
2567     env->elp = false;
2568 
2569     /*
2570      * NOTE: it is not necessary to yield load reservations here. It is only
2571      * necessary for an SC from "another hart" to cause a load reservation
2572      * to be yielded. Refer to the memory consistency model section of the
2573      * RISC-V ISA Specification.
2574      */
2575 
2576     env->two_stage_lookup = false;
2577     env->two_stage_indirect_lookup = false;
2578 }
2579 
2580 #endif /* !CONFIG_USER_ONLY */
2581