xref: /qemu/target/riscv/cpu_helper.c (revision 6ff5da16000f908140723e164d33a0b51a6c4162)
1 /*
2  * RISC-V CPU helpers for qemu.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "pmu.h"
26 #include "exec/cputlb.h"
27 #include "exec/exec-all.h"
28 #include "exec/page-protection.h"
29 #include "instmap.h"
30 #include "tcg/tcg-op.h"
31 #include "accel/tcg/cpu-ops.h"
32 #include "trace.h"
33 #include "semihosting/common-semi.h"
34 #include "system/cpu-timers.h"
35 #include "cpu_bits.h"
36 #include "debug.h"
37 #include "pmp.h"
38 
39 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
40 {
41 #ifdef CONFIG_USER_ONLY
42     return 0;
43 #else
44     bool virt = env->virt_enabled;
45     int mode = env->priv;
46 
47     /* All priv -> mmu_idx mapping are here */
48     if (!ifetch) {
49         uint64_t status = env->mstatus;
50 
51         if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
52             mode = get_field(env->mstatus, MSTATUS_MPP);
53             virt = get_field(env->mstatus, MSTATUS_MPV) &&
54                    (mode != PRV_M);
55             if (virt) {
56                 status = env->vsstatus;
57             }
58         }
59         if (mode == PRV_S && get_field(status, MSTATUS_SUM)) {
60             mode = MMUIdx_S_SUM;
61         }
62     }
63 
64     return mode | (virt ? MMU_2STAGE_BIT : 0);
65 #endif
66 }
67 
68 bool cpu_get_fcfien(CPURISCVState *env)
69 {
70     /* no cfi extension, return false */
71     if (!env_archcpu(env)->cfg.ext_zicfilp) {
72         return false;
73     }
74 
75     switch (env->priv) {
76     case PRV_U:
77         if (riscv_has_ext(env, RVS)) {
78             return env->senvcfg & SENVCFG_LPE;
79         }
80         return env->menvcfg & MENVCFG_LPE;
81 #ifndef CONFIG_USER_ONLY
82     case PRV_S:
83         if (env->virt_enabled) {
84             return env->henvcfg & HENVCFG_LPE;
85         }
86         return env->menvcfg & MENVCFG_LPE;
87     case PRV_M:
88         return env->mseccfg & MSECCFG_MLPE;
89 #endif
90     default:
91         g_assert_not_reached();
92     }
93 }
94 
95 bool cpu_get_bcfien(CPURISCVState *env)
96 {
97     /* no cfi extension, return false */
98     if (!env_archcpu(env)->cfg.ext_zicfiss) {
99         return false;
100     }
101 
102     switch (env->priv) {
103     case PRV_U:
104         /*
105          * If S is not implemented then shadow stack for U can't be turned on
106          * It is checked in `riscv_cpu_validate_set_extensions`, so no need to
107          * check here or assert here
108          */
109         return env->senvcfg & SENVCFG_SSE;
110 #ifndef CONFIG_USER_ONLY
111     case PRV_S:
112         if (env->virt_enabled) {
113             return env->henvcfg & HENVCFG_SSE;
114         }
115         return env->menvcfg & MENVCFG_SSE;
116     case PRV_M: /* M-mode shadow stack is always off */
117         return false;
118 #endif
119     default:
120         g_assert_not_reached();
121     }
122 }
123 
124 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
125 {
126 #ifdef CONFIG_USER_ONLY
127     return false;
128 #else
129     if (virt) {
130         return (env->henvcfg & HENVCFG_DTE) != 0;
131     } else {
132         return (env->menvcfg & MENVCFG_DTE) != 0;
133     }
134 #endif
135 }
136 
137 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
138                           uint64_t *cs_base, uint32_t *pflags)
139 {
140     RISCVCPU *cpu = env_archcpu(env);
141     RISCVExtStatus fs, vs;
142     uint32_t flags = 0;
143     bool pm_signext = riscv_cpu_virt_mem_enabled(env);
144 
145     *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
146     *cs_base = 0;
147 
148     if (cpu->cfg.ext_zve32x) {
149         /*
150          * If env->vl equals to VLMAX, we can use generic vector operation
151          * expanders (GVEC) to accerlate the vector operations.
152          * However, as LMUL could be a fractional number. The maximum
153          * vector size can be operated might be less than 8 bytes,
154          * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
155          * only when maxsz >= 8 bytes.
156          */
157 
158         /* lmul encoded as in DisasContext::lmul */
159         int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
160         uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
161         uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
162         uint32_t maxsz = vlmax << vsew;
163         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
164                            (maxsz >= 8);
165         flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
166         flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
167         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
168                            FIELD_EX64(env->vtype, VTYPE, VLMUL));
169         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
170         flags = FIELD_DP32(flags, TB_FLAGS, VTA,
171                            FIELD_EX64(env->vtype, VTYPE, VTA));
172         flags = FIELD_DP32(flags, TB_FLAGS, VMA,
173                            FIELD_EX64(env->vtype, VTYPE, VMA));
174         flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
175     } else {
176         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
177     }
178 
179     if (cpu_get_fcfien(env)) {
180         /*
181          * For Forward CFI, only the expectation of a lpad at
182          * the start of the block is tracked via env->elp. env->elp
183          * is turned on during jalr translation.
184          */
185         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
186         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
187     }
188 
189     if (cpu_get_bcfien(env)) {
190         flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
191     }
192 
193 #ifdef CONFIG_USER_ONLY
194     fs = EXT_STATUS_DIRTY;
195     vs = EXT_STATUS_DIRTY;
196 #else
197     flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
198 
199     flags |= riscv_env_mmu_index(env, 0);
200     fs = get_field(env->mstatus, MSTATUS_FS);
201     vs = get_field(env->mstatus, MSTATUS_VS);
202 
203     if (env->virt_enabled) {
204         flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
205         /*
206          * Merge DISABLED and !DIRTY states using MIN.
207          * We will set both fields when dirtying.
208          */
209         fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
210         vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
211     }
212 
213     /* With Zfinx, floating point is enabled/disabled by Smstateen. */
214     if (!riscv_has_ext(env, RVF)) {
215         fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
216              ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
217     }
218 
219     if (cpu->cfg.debug && !icount_enabled()) {
220         flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
221     }
222 #endif
223 
224     flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
225     flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
226     flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
227     flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
228     flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
229     flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
230 
231     *pflags = flags;
232 }
233 
234 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
235 {
236 #ifndef CONFIG_USER_ONLY
237     int priv_mode = cpu_address_mode(env);
238 
239     if (get_field(env->mstatus, MSTATUS_MPRV) &&
240         get_field(env->mstatus, MSTATUS_MXR)) {
241         return PMM_FIELD_DISABLED;
242     }
243 
244     /* Get current PMM field */
245     switch (priv_mode) {
246     case PRV_M:
247         if (riscv_cpu_cfg(env)->ext_smmpm) {
248             return get_field(env->mseccfg, MSECCFG_PMM);
249         }
250         break;
251     case PRV_S:
252         if (riscv_cpu_cfg(env)->ext_smnpm) {
253             if (get_field(env->mstatus, MSTATUS_MPV)) {
254                 return get_field(env->henvcfg, HENVCFG_PMM);
255             } else {
256                 return get_field(env->menvcfg, MENVCFG_PMM);
257             }
258         }
259         break;
260     case PRV_U:
261         if (riscv_has_ext(env, RVS)) {
262             if (riscv_cpu_cfg(env)->ext_ssnpm) {
263                 return get_field(env->senvcfg, SENVCFG_PMM);
264             }
265         } else {
266             if (riscv_cpu_cfg(env)->ext_smnpm) {
267                 return get_field(env->menvcfg, MENVCFG_PMM);
268             }
269         }
270         break;
271     default:
272         g_assert_not_reached();
273     }
274     return PMM_FIELD_DISABLED;
275 #else
276     return PMM_FIELD_DISABLED;
277 #endif
278 }
279 
280 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
281 {
282 #ifndef CONFIG_USER_ONLY
283     int priv_mode = cpu_address_mode(env);
284 
285     if (priv_mode == PRV_U) {
286         return get_field(env->hstatus, HSTATUS_HUPMM);
287     } else {
288         if (get_field(env->hstatus, HSTATUS_SPVP)) {
289             return get_field(env->henvcfg, HENVCFG_PMM);
290         } else {
291             return get_field(env->senvcfg, SENVCFG_PMM);
292         }
293     }
294 #else
295     return PMM_FIELD_DISABLED;
296 #endif
297 }
298 
299 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
300 {
301 #ifndef CONFIG_USER_ONLY
302     int satp_mode = 0;
303     int priv_mode = cpu_address_mode(env);
304 
305     if (riscv_cpu_mxl(env) == MXL_RV32) {
306         satp_mode = get_field(env->satp, SATP32_MODE);
307     } else {
308         satp_mode = get_field(env->satp, SATP64_MODE);
309     }
310 
311     return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
312 #else
313     return false;
314 #endif
315 }
316 
317 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
318 {
319     switch (pmm) {
320     case PMM_FIELD_DISABLED:
321         return 0;
322     case PMM_FIELD_PMLEN7:
323         return 7;
324     case PMM_FIELD_PMLEN16:
325         return 16;
326     default:
327         g_assert_not_reached();
328     }
329 }
330 
331 #ifndef CONFIG_USER_ONLY
332 
333 /*
334  * The HS-mode is allowed to configure priority only for the
335  * following VS-mode local interrupts:
336  *
337  * 0  (Reserved interrupt, reads as zero)
338  * 1  Supervisor software interrupt
339  * 4  (Reserved interrupt, reads as zero)
340  * 5  Supervisor timer interrupt
341  * 8  (Reserved interrupt, reads as zero)
342  * 13 (Reserved interrupt)
343  * 14 "
344  * 15 "
345  * 16 "
346  * 17 "
347  * 18 "
348  * 19 "
349  * 20 "
350  * 21 "
351  * 22 "
352  * 23 "
353  */
354 
355 static const int hviprio_index2irq[] = {
356     0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
357 static const int hviprio_index2rdzero[] = {
358     1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
359 
360 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
361 {
362     if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
363         return -EINVAL;
364     }
365 
366     if (out_irq) {
367         *out_irq = hviprio_index2irq[index];
368     }
369 
370     if (out_rdzero) {
371         *out_rdzero = hviprio_index2rdzero[index];
372     }
373 
374     return 0;
375 }
376 
377 /*
378  * Default priorities of local interrupts are defined in the
379  * RISC-V Advanced Interrupt Architecture specification.
380  *
381  * ----------------------------------------------------------------
382  *  Default  |
383  *  Priority | Major Interrupt Numbers
384  * ----------------------------------------------------------------
385  *  Highest  | 47, 23, 46, 45, 22, 44,
386  *           | 43, 21, 42, 41, 20, 40
387  *           |
388  *           | 11 (0b),  3 (03),  7 (07)
389  *           |  9 (09),  1 (01),  5 (05)
390  *           | 12 (0c)
391  *           | 10 (0a),  2 (02),  6 (06)
392  *           |
393  *           | 39, 19, 38, 37, 18, 36,
394  *  Lowest   | 35, 17, 34, 33, 16, 32
395  * ----------------------------------------------------------------
396  */
397 static const uint8_t default_iprio[64] = {
398     /* Custom interrupts 48 to 63 */
399     [63] = IPRIO_MMAXIPRIO,
400     [62] = IPRIO_MMAXIPRIO,
401     [61] = IPRIO_MMAXIPRIO,
402     [60] = IPRIO_MMAXIPRIO,
403     [59] = IPRIO_MMAXIPRIO,
404     [58] = IPRIO_MMAXIPRIO,
405     [57] = IPRIO_MMAXIPRIO,
406     [56] = IPRIO_MMAXIPRIO,
407     [55] = IPRIO_MMAXIPRIO,
408     [54] = IPRIO_MMAXIPRIO,
409     [53] = IPRIO_MMAXIPRIO,
410     [52] = IPRIO_MMAXIPRIO,
411     [51] = IPRIO_MMAXIPRIO,
412     [50] = IPRIO_MMAXIPRIO,
413     [49] = IPRIO_MMAXIPRIO,
414     [48] = IPRIO_MMAXIPRIO,
415 
416     /* Custom interrupts 24 to 31 */
417     [31] = IPRIO_MMAXIPRIO,
418     [30] = IPRIO_MMAXIPRIO,
419     [29] = IPRIO_MMAXIPRIO,
420     [28] = IPRIO_MMAXIPRIO,
421     [27] = IPRIO_MMAXIPRIO,
422     [26] = IPRIO_MMAXIPRIO,
423     [25] = IPRIO_MMAXIPRIO,
424     [24] = IPRIO_MMAXIPRIO,
425 
426     [47] = IPRIO_DEFAULT_UPPER,
427     [23] = IPRIO_DEFAULT_UPPER + 1,
428     [46] = IPRIO_DEFAULT_UPPER + 2,
429     [45] = IPRIO_DEFAULT_UPPER + 3,
430     [22] = IPRIO_DEFAULT_UPPER + 4,
431     [44] = IPRIO_DEFAULT_UPPER + 5,
432 
433     [43] = IPRIO_DEFAULT_UPPER + 6,
434     [21] = IPRIO_DEFAULT_UPPER + 7,
435     [42] = IPRIO_DEFAULT_UPPER + 8,
436     [41] = IPRIO_DEFAULT_UPPER + 9,
437     [20] = IPRIO_DEFAULT_UPPER + 10,
438     [40] = IPRIO_DEFAULT_UPPER + 11,
439 
440     [11] = IPRIO_DEFAULT_M,
441     [3]  = IPRIO_DEFAULT_M + 1,
442     [7]  = IPRIO_DEFAULT_M + 2,
443 
444     [9]  = IPRIO_DEFAULT_S,
445     [1]  = IPRIO_DEFAULT_S + 1,
446     [5]  = IPRIO_DEFAULT_S + 2,
447 
448     [12] = IPRIO_DEFAULT_SGEXT,
449 
450     [10] = IPRIO_DEFAULT_VS,
451     [2]  = IPRIO_DEFAULT_VS + 1,
452     [6]  = IPRIO_DEFAULT_VS + 2,
453 
454     [39] = IPRIO_DEFAULT_LOWER,
455     [19] = IPRIO_DEFAULT_LOWER + 1,
456     [38] = IPRIO_DEFAULT_LOWER + 2,
457     [37] = IPRIO_DEFAULT_LOWER + 3,
458     [18] = IPRIO_DEFAULT_LOWER + 4,
459     [36] = IPRIO_DEFAULT_LOWER + 5,
460 
461     [35] = IPRIO_DEFAULT_LOWER + 6,
462     [17] = IPRIO_DEFAULT_LOWER + 7,
463     [34] = IPRIO_DEFAULT_LOWER + 8,
464     [33] = IPRIO_DEFAULT_LOWER + 9,
465     [16] = IPRIO_DEFAULT_LOWER + 10,
466     [32] = IPRIO_DEFAULT_LOWER + 11,
467 };
468 
469 uint8_t riscv_cpu_default_priority(int irq)
470 {
471     if (irq < 0 || irq > 63) {
472         return IPRIO_MMAXIPRIO;
473     }
474 
475     return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
476 };
477 
478 static int riscv_cpu_pending_to_irq(CPURISCVState *env,
479                                     int extirq, unsigned int extirq_def_prio,
480                                     uint64_t pending, uint8_t *iprio)
481 {
482     int irq, best_irq = RISCV_EXCP_NONE;
483     unsigned int prio, best_prio = UINT_MAX;
484 
485     if (!pending) {
486         return RISCV_EXCP_NONE;
487     }
488 
489     irq = ctz64(pending);
490     if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia :
491                                   riscv_cpu_cfg(env)->ext_ssaia)) {
492         return irq;
493     }
494 
495     pending = pending >> irq;
496     while (pending) {
497         prio = iprio[irq];
498         if (!prio) {
499             if (irq == extirq) {
500                 prio = extirq_def_prio;
501             } else {
502                 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
503                        1 : IPRIO_MMAXIPRIO;
504             }
505         }
506         if ((pending & 0x1) && (prio <= best_prio)) {
507             best_irq = irq;
508             best_prio = prio;
509         }
510         irq++;
511         pending = pending >> 1;
512     }
513 
514     return best_irq;
515 }
516 
517 /*
518  * Doesn't report interrupts inserted using mvip from M-mode firmware or
519  * using hvip bits 13:63 from HS-mode. Those are returned in
520  * riscv_cpu_sirq_pending() and riscv_cpu_vsirq_pending().
521  */
522 uint64_t riscv_cpu_all_pending(CPURISCVState *env)
523 {
524     uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
525     uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
526     uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
527 
528     return (env->mip | vsgein | vstip) & env->mie;
529 }
530 
531 int riscv_cpu_mirq_pending(CPURISCVState *env)
532 {
533     uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
534                     ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
535 
536     return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
537                                     irqs, env->miprio);
538 }
539 
540 int riscv_cpu_sirq_pending(CPURISCVState *env)
541 {
542     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
543                     ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
544     uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie;
545 
546     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
547                                     irqs | irqs_f, env->siprio);
548 }
549 
550 int riscv_cpu_vsirq_pending(CPURISCVState *env)
551 {
552     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg;
553     uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
554     uint64_t vsbits;
555 
556     /* Bring VS-level bits to correct position */
557     vsbits = irqs & VS_MODE_INTERRUPTS;
558     irqs &= ~VS_MODE_INTERRUPTS;
559     irqs |= vsbits >> 1;
560 
561     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
562                                     (irqs | irqs_f_vs), env->hviprio);
563 }
564 
565 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
566 {
567     uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs;
568     uint64_t vsbits, irq_delegated;
569     int virq;
570 
571     /* Priority: RNMI > Other interrupt. */
572     if (riscv_cpu_cfg(env)->ext_smrnmi) {
573         /* If mnstatus.NMIE == 0, all interrupts are disabled. */
574         if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
575             return RISCV_EXCP_NONE;
576         }
577 
578         if (env->rnmip) {
579             return ctz64(env->rnmip); /* since non-zero */
580         }
581     }
582 
583     /* Determine interrupt enable state of all privilege modes */
584     if (env->virt_enabled) {
585         mie = 1;
586         hsie = 1;
587         vsie = (env->priv < PRV_S) ||
588                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
589     } else {
590         mie = (env->priv < PRV_M) ||
591               (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
592         hsie = (env->priv < PRV_S) ||
593                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
594         vsie = 0;
595     }
596 
597     /* Determine all pending interrupts */
598     pending = riscv_cpu_all_pending(env);
599 
600     /* Check M-mode interrupts */
601     irqs = pending & ~env->mideleg & -mie;
602     if (irqs) {
603         return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
604                                         irqs, env->miprio);
605     }
606 
607     /* Check for virtual S-mode interrupts. */
608     irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie;
609 
610     /* Check HS-mode interrupts */
611     irqs =  ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie;
612     if (irqs) {
613         return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
614                                         irqs, env->siprio);
615     }
616 
617     /* Check for virtual VS-mode interrupts. */
618     irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
619 
620     /* Check VS-mode interrupts */
621     irq_delegated = pending & env->mideleg & env->hideleg;
622 
623     /* Bring VS-level bits to correct position */
624     vsbits = irq_delegated & VS_MODE_INTERRUPTS;
625     irq_delegated &= ~VS_MODE_INTERRUPTS;
626     irq_delegated |= vsbits >> 1;
627 
628     irqs = (irq_delegated | irqs_f_vs) & -vsie;
629     if (irqs) {
630         virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
631                                         irqs, env->hviprio);
632         if (virq <= 0 || (virq > 12 && virq <= 63)) {
633             return virq;
634         } else {
635             return virq + 1;
636         }
637     }
638 
639     /* Indicate no pending interrupt */
640     return RISCV_EXCP_NONE;
641 }
642 
643 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
644 {
645     uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
646 
647     if (interrupt_request & mask) {
648         RISCVCPU *cpu = RISCV_CPU(cs);
649         CPURISCVState *env = &cpu->env;
650         int interruptno = riscv_cpu_local_irq_pending(env);
651         if (interruptno >= 0) {
652             cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
653             riscv_cpu_do_interrupt(cs);
654             return true;
655         }
656     }
657     return false;
658 }
659 
660 /* Return true is floating point support is currently enabled */
661 bool riscv_cpu_fp_enabled(CPURISCVState *env)
662 {
663     if (env->mstatus & MSTATUS_FS) {
664         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) {
665             return false;
666         }
667         return true;
668     }
669 
670     return false;
671 }
672 
673 /* Return true is vector support is currently enabled */
674 bool riscv_cpu_vector_enabled(CPURISCVState *env)
675 {
676     if (env->mstatus & MSTATUS_VS) {
677         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) {
678             return false;
679         }
680         return true;
681     }
682 
683     return false;
684 }
685 
686 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
687 {
688     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
689                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
690                             MSTATUS64_UXL | MSTATUS_VS;
691 
692     if (riscv_has_ext(env, RVF)) {
693         mstatus_mask |= MSTATUS_FS;
694     }
695     bool current_virt = env->virt_enabled;
696 
697     /*
698      * If zicfilp extension available and henvcfg.LPE = 1,
699      * then apply SPELP mask on mstatus
700      */
701     if (env_archcpu(env)->cfg.ext_zicfilp &&
702         get_field(env->henvcfg, HENVCFG_LPE)) {
703         mstatus_mask |= SSTATUS_SPELP;
704     }
705 
706     g_assert(riscv_has_ext(env, RVH));
707 
708     if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
709         mstatus_mask |= MSTATUS_SDT;
710     }
711 
712     if (current_virt) {
713         /* Current V=1 and we are about to change to V=0 */
714         env->vsstatus = env->mstatus & mstatus_mask;
715         env->mstatus &= ~mstatus_mask;
716         env->mstatus |= env->mstatus_hs;
717 
718         env->vstvec = env->stvec;
719         env->stvec = env->stvec_hs;
720 
721         env->vsscratch = env->sscratch;
722         env->sscratch = env->sscratch_hs;
723 
724         env->vsepc = env->sepc;
725         env->sepc = env->sepc_hs;
726 
727         env->vscause = env->scause;
728         env->scause = env->scause_hs;
729 
730         env->vstval = env->stval;
731         env->stval = env->stval_hs;
732 
733         env->vsatp = env->satp;
734         env->satp = env->satp_hs;
735     } else {
736         /* Current V=0 and we are about to change to V=1 */
737         env->mstatus_hs = env->mstatus & mstatus_mask;
738         env->mstatus &= ~mstatus_mask;
739         env->mstatus |= env->vsstatus;
740 
741         env->stvec_hs = env->stvec;
742         env->stvec = env->vstvec;
743 
744         env->sscratch_hs = env->sscratch;
745         env->sscratch = env->vsscratch;
746 
747         env->sepc_hs = env->sepc;
748         env->sepc = env->vsepc;
749 
750         env->scause_hs = env->scause;
751         env->scause = env->vscause;
752 
753         env->stval_hs = env->stval;
754         env->stval = env->vstval;
755 
756         env->satp_hs = env->satp;
757         env->satp = env->vsatp;
758     }
759 }
760 
761 target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
762 {
763     if (!riscv_has_ext(env, RVH)) {
764         return 0;
765     }
766 
767     return env->geilen;
768 }
769 
770 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
771 {
772     if (!riscv_has_ext(env, RVH)) {
773         return;
774     }
775 
776     if (geilen > (TARGET_LONG_BITS - 1)) {
777         return;
778     }
779 
780     env->geilen = geilen;
781 }
782 
783 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
784 {
785     CPURISCVState *env = &cpu->env;
786     CPUState *cs = CPU(cpu);
787     bool release_lock = false;
788 
789     if (!bql_locked()) {
790         release_lock = true;
791         bql_lock();
792     }
793 
794     if (level) {
795         env->rnmip |= 1 << irq;
796         cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
797     } else {
798         env->rnmip &= ~(1 << irq);
799         cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
800     }
801 
802     if (release_lock) {
803         bql_unlock();
804     }
805 }
806 
807 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
808 {
809     CPURISCVState *env = &cpu->env;
810     if (env->miclaim & interrupts) {
811         return -1;
812     } else {
813         env->miclaim |= interrupts;
814         return 0;
815     }
816 }
817 
818 void riscv_cpu_interrupt(CPURISCVState *env)
819 {
820     uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
821     CPUState *cs = env_cpu(env);
822 
823     BQL_LOCK_GUARD();
824 
825     if (env->virt_enabled) {
826         gein = get_field(env->hstatus, HSTATUS_VGEIN);
827         vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
828         irqf = env->hvien & env->hvip & env->vsie;
829     } else {
830         irqf = env->mvien & env->mvip & env->sie;
831     }
832 
833     vstip = env->vstime_irq ? MIP_VSTIP : 0;
834 
835     if (env->mip | vsgein | vstip | irqf) {
836         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
837     } else {
838         cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
839     }
840 }
841 
842 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
843 {
844     uint64_t old = env->mip;
845 
846     /* No need to update mip for VSTIP */
847     mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
848 
849     BQL_LOCK_GUARD();
850 
851     env->mip = (env->mip & ~mask) | (value & mask);
852 
853     riscv_cpu_interrupt(env);
854 
855     return old;
856 }
857 
858 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
859                              void *arg)
860 {
861     env->rdtime_fn = fn;
862     env->rdtime_fn_arg = arg;
863 }
864 
865 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
866                                    int (*rmw_fn)(void *arg,
867                                                  target_ulong reg,
868                                                  target_ulong *val,
869                                                  target_ulong new_val,
870                                                  target_ulong write_mask),
871                                    void *rmw_fn_arg)
872 {
873     if (priv <= PRV_M) {
874         env->aia_ireg_rmw_fn[priv] = rmw_fn;
875         env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
876     }
877 }
878 
879 static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
880                              bool virt)
881 {
882     uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
883 
884     assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
885 
886     if (ctl & freeze_mask) {
887         env->sctrstatus |= SCTRSTATUS_FROZEN;
888     }
889 }
890 
891 void riscv_ctr_clear(CPURISCVState *env)
892 {
893     memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
894     memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
895     memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
896 }
897 
898 static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
899 {
900     switch (priv) {
901     case PRV_M:
902         return MCTRCTL_M;
903     case PRV_S:
904         if (virt) {
905             return XCTRCTL_S;
906         }
907         return XCTRCTL_S;
908     case PRV_U:
909         if (virt) {
910             return XCTRCTL_U;
911         }
912         return XCTRCTL_U;
913     }
914 
915     g_assert_not_reached();
916 }
917 
918 static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
919                                       bool virt)
920 {
921     switch (priv) {
922     case PRV_M:
923         return env->mctrctl;
924     case PRV_S:
925     case PRV_U:
926         if (virt) {
927             return env->vsctrctl;
928         }
929         return env->mctrctl;
930     }
931 
932     g_assert_not_reached();
933 }
934 
935 /*
936  * This function assumes that src privilege and target privilege are not same
937  * and src privilege is less than target privilege. This includes the virtual
938  * state as well.
939  */
940 static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
941                                 bool src_virt)
942 {
943     target_long tgt_prv = env->priv;
944     bool res = true;
945 
946     /*
947      * VS and U mode are same in terms of xTE bits required to record an
948      * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
949      * Requirements. This changes VS to U to simplify the logic a bit.
950      */
951     if (src_virt && src_prv == PRV_S) {
952         src_prv = PRV_U;
953     } else if (env->virt_enabled && tgt_prv == PRV_S) {
954         tgt_prv = PRV_U;
955     }
956 
957     /* VU mode is an outlier here. */
958     if (src_virt && src_prv == PRV_U) {
959         res &= !!(env->vsctrctl & XCTRCTL_STE);
960     }
961 
962     switch (src_prv) {
963     case PRV_U:
964         if (tgt_prv == PRV_U) {
965             break;
966         }
967         res &= !!(env->mctrctl & XCTRCTL_STE);
968         /* fall-through */
969     case PRV_S:
970         if (tgt_prv == PRV_S) {
971             break;
972         }
973         res &= !!(env->mctrctl & MCTRCTL_MTE);
974         /* fall-through */
975     case PRV_M:
976         break;
977     }
978 
979     return res;
980 }
981 
982 /*
983  * Special cases for traps and trap returns:
984  *
985  * 1- Traps, and trap returns, between enabled modes are recorded as normal.
986  * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
987  * enabled mode back to an inhibited mode, are partially recorded.  In such
988  * cases, the PC from the inhibited mode (source PC for traps, and target PC
989  * for trap returns) is 0.
990  *
991  * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
992  * Traps from an enabled mode to an inhibited mode, known as external traps,
993  * receive special handling.
994  * By default external traps are not recorded, but a handshake mechanism exists
995  * to allow partial recording.  Software running in the target mode of the trap
996  * can opt-in to allowing CTR to record traps into that mode even when the mode
997  * is inhibited.  The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
998  * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
999  * x is the target privilege mode of the trap, will CTR record the trap. In such
1000  * cases, the target PC is 0.
1001  */
1002 /*
1003  * CTR arrays are implemented as circular buffers and new entry is stored at
1004  * sctrstatus.WRPTR, but they are presented to software as moving circular
1005  * buffers. Which means, software get's the illusion that whenever a new entry
1006  * is added the whole buffer is moved by one place and the new entry is added at
1007  * the start keeping new entry at idx 0 and older ones follow.
1008  *
1009  * Depth = 16.
1010  *
1011  * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
1012  * WRPTR                                   W
1013  * entry   7   6   5   4   3   2   1   0   F   E   D   C   B   A   9   8
1014  *
1015  * When a new entry is added:
1016  * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
1017  * WRPTR                                       W
1018  * entry   8   7   6   5   4   3   2   1   0   F   E   D   C   B   A   9
1019  *
1020  * entry here denotes the logical entry number that software can access
1021  * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
1022  * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
1023  * buffer[7]. Here is how we convert entry to buffer idx.
1024  *
1025  *    entry = isel - CTR_ENTRIES_FIRST;
1026  *    idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
1027  */
1028 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
1029     enum CTRType type, target_ulong src_priv, bool src_virt)
1030 {
1031     bool tgt_virt = env->virt_enabled;
1032     uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
1033     uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
1034     uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
1035     uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
1036     uint64_t depth, head;
1037     bool ext_trap = false;
1038 
1039     /*
1040      * Return immediately if both target and src recording is disabled or if
1041      * CTR is in frozen state.
1042      */
1043     if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
1044         env->sctrstatus & SCTRSTATUS_FROZEN) {
1045         return;
1046     }
1047 
1048     /*
1049      * With RAS Emul enabled, only allow Indirect, direct calls, Function
1050      * returns and Co-routine swap types.
1051      */
1052     if (tgt_ctrl & XCTRCTL_RASEMU &&
1053         type != CTRDATA_TYPE_INDIRECT_CALL &&
1054         type != CTRDATA_TYPE_DIRECT_CALL &&
1055         type != CTRDATA_TYPE_RETURN &&
1056         type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1057         return;
1058     }
1059 
1060     if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
1061         /* Case 2 for traps. */
1062         if (!(src_ctrl & src_mask)) {
1063             src = 0;
1064         } else if (!(tgt_ctrl & tgt_mask)) {
1065             /* Check if target priv-mode has allowed external trap recording. */
1066             if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
1067                 return;
1068             }
1069 
1070             ext_trap = true;
1071             dst = 0;
1072         }
1073     } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
1074         /*
1075          * Case 3 for trap returns.  Trap returns from inhibited mode are not
1076          * recorded.
1077          */
1078         if (!(src_ctrl & src_mask)) {
1079             return;
1080         }
1081 
1082         /* Case 2 for trap returns. */
1083         if (!(tgt_ctrl & tgt_mask)) {
1084             dst = 0;
1085         }
1086     }
1087 
1088     /* Ignore filters in case of RASEMU mode or External trap. */
1089     if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
1090         /*
1091          * Check if the specific type is inhibited. Not taken branch filter is
1092          * an enable bit and needs to be checked separatly.
1093          */
1094         bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
1095         if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
1096             (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
1097             return;
1098         }
1099     }
1100 
1101     head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
1102 
1103     depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
1104     if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
1105         head = (head - 1) & (depth - 1);
1106 
1107         env->ctr_src[head] &= ~CTRSOURCE_VALID;
1108         env->sctrstatus =
1109             set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1110         return;
1111     }
1112 
1113     /* In case of Co-routine SWAP we overwrite latest entry. */
1114     if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1115         head = (head - 1) & (depth - 1);
1116     }
1117 
1118     env->ctr_src[head] = src | CTRSOURCE_VALID;
1119     env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
1120     env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
1121 
1122     head = (head + 1) & (depth - 1);
1123 
1124     env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1125 }
1126 
1127 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
1128 {
1129     g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
1130 
1131     if (newpriv != env->priv || env->virt_enabled != virt_en) {
1132         if (icount_enabled()) {
1133             riscv_itrigger_update_priv(env);
1134         }
1135 
1136         riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
1137     }
1138 
1139     /* tlb_flush is unnecessary as mode is contained in mmu_idx */
1140     env->priv = newpriv;
1141     env->xl = cpu_recompute_xl(env);
1142 
1143     /*
1144      * Clear the load reservation - otherwise a reservation placed in one
1145      * context/process can be used by another, resulting in an SC succeeding
1146      * incorrectly. Version 2.2 of the ISA specification explicitly requires
1147      * this behaviour, while later revisions say that the kernel "should" use
1148      * an SC instruction to force the yielding of a load reservation on a
1149      * preemptive context switch. As a result, do both.
1150      */
1151     env->load_res = -1;
1152 
1153     if (riscv_has_ext(env, RVH)) {
1154         /* Flush the TLB on all virt mode changes. */
1155         if (env->virt_enabled != virt_en) {
1156             tlb_flush(env_cpu(env));
1157         }
1158 
1159         env->virt_enabled = virt_en;
1160         if (virt_en) {
1161             /*
1162              * The guest external interrupts from an interrupt controller are
1163              * delivered only when the Guest/VM is running (i.e. V=1). This
1164              * means any guest external interrupt which is triggered while the
1165              * Guest/VM is not running (i.e. V=0) will be missed on QEMU
1166              * resulting in guest with sluggish response to serial console
1167              * input and other I/O events.
1168              *
1169              * To solve this, we check and inject interrupt after setting V=1.
1170              */
1171             riscv_cpu_update_mip(env, 0, 0);
1172         }
1173     }
1174 }
1175 
1176 /*
1177  * get_physical_address_pmp - check PMP permission for this physical address
1178  *
1179  * Match the PMP region and check permission for this physical address and it's
1180  * TLB page. Returns 0 if the permission checking was successful
1181  *
1182  * @env: CPURISCVState
1183  * @prot: The returned protection attributes
1184  * @addr: The physical address to be checked permission
1185  * @access_type: The type of MMU access
1186  * @mode: Indicates current privilege level.
1187  */
1188 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
1189                                     int size, MMUAccessType access_type,
1190                                     int mode)
1191 {
1192     pmp_priv_t pmp_priv;
1193     bool pmp_has_privs;
1194 
1195     if (!riscv_cpu_cfg(env)->pmp) {
1196         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1197         return TRANSLATE_SUCCESS;
1198     }
1199 
1200     pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
1201                                        &pmp_priv, mode);
1202     if (!pmp_has_privs) {
1203         *prot = 0;
1204         return TRANSLATE_PMP_FAIL;
1205     }
1206 
1207     *prot = pmp_priv_to_page_prot(pmp_priv);
1208 
1209     return TRANSLATE_SUCCESS;
1210 }
1211 
1212 /* Returns 'true' if a svukte address check is needed */
1213 static bool do_svukte_check(CPURISCVState *env, bool first_stage,
1214                              int mode, bool virt)
1215 {
1216     /* Svukte extension depends on Sv39. */
1217     if (!(env_archcpu(env)->cfg.ext_svukte ||
1218         !first_stage ||
1219         VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
1220         return false;
1221     }
1222 
1223     /*
1224      * Check hstatus.HUKTE if the effective mode is switched to VU-mode by
1225      * executing HLV/HLVX/HSV in U-mode.
1226      * For other cases, check senvcfg.UKTE.
1227      */
1228     if (env->priv == PRV_U && !env->virt_enabled && virt) {
1229         if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
1230             return false;
1231         }
1232     } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
1233         return false;
1234     }
1235 
1236     /*
1237      * Svukte extension is qualified only in U or VU-mode.
1238      *
1239      * Effective mode can be switched to U or VU-mode by:
1240      *   - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode.
1241      *   - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0.
1242      *   - U-mode.
1243      *   - VU-mode.
1244      *   - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1.
1245      */
1246     if (mode != PRV_U) {
1247         return false;
1248     }
1249 
1250     return true;
1251 }
1252 
1253 static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
1254 {
1255     /* svukte extension excludes RV32 */
1256     uint32_t sxlen = 32 * riscv_cpu_sxl(env);
1257     uint64_t high_bit = addr & (1UL << (sxlen - 1));
1258     return !high_bit;
1259 }
1260 
1261 /*
1262  * get_physical_address - get the physical address for this virtual address
1263  *
1264  * Do a page table walk to obtain the physical address corresponding to a
1265  * virtual address. Returns 0 if the translation was successful
1266  *
1267  * Adapted from Spike's mmu_t::translate and mmu_t::walk
1268  *
1269  * @env: CPURISCVState
1270  * @physical: This will be set to the calculated physical address
1271  * @prot: The returned protection attributes
1272  * @addr: The virtual address or guest physical address to be translated
1273  * @fault_pte_addr: If not NULL, this will be set to fault pte address
1274  *                  when a error occurs on pte address translation.
1275  *                  This will already be shifted to match htval.
1276  * @access_type: The type of MMU access
1277  * @mmu_idx: Indicates current privilege level
1278  * @first_stage: Are we in first stage translation?
1279  *               Second stage is used for hypervisor guest translation
1280  * @two_stage: Are we going to perform two stage translation
1281  * @is_debug: Is this access from a debugger or the monitor?
1282  */
1283 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
1284                                 int *ret_prot, vaddr addr,
1285                                 target_ulong *fault_pte_addr,
1286                                 int access_type, int mmu_idx,
1287                                 bool first_stage, bool two_stage,
1288                                 bool is_debug, bool is_probe)
1289 {
1290     /*
1291      * NOTE: the env->pc value visible here will not be
1292      * correct, but the value visible to the exception handler
1293      * (riscv_cpu_do_interrupt) is correct
1294      */
1295     MemTxResult res;
1296     MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
1297     int mode = mmuidx_priv(mmu_idx);
1298     bool virt = mmuidx_2stage(mmu_idx);
1299     bool use_background = false;
1300     hwaddr ppn;
1301     int napot_bits = 0;
1302     target_ulong napot_mask;
1303     bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
1304     bool sstack_page = false;
1305 
1306     if (do_svukte_check(env, first_stage, mode, virt) &&
1307         !check_svukte_addr(env, addr)) {
1308         return TRANSLATE_FAIL;
1309     }
1310 
1311     /*
1312      * Check if we should use the background registers for the two
1313      * stage translation. We don't need to check if we actually need
1314      * two stage translation as that happened before this function
1315      * was called. Background registers will be used if the guest has
1316      * forced a two stage translation to be on (in HS or M mode).
1317      */
1318     if (!env->virt_enabled && two_stage) {
1319         use_background = true;
1320     }
1321 
1322     if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
1323         *physical = addr;
1324         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1325         return TRANSLATE_SUCCESS;
1326     }
1327 
1328     *ret_prot = 0;
1329 
1330     hwaddr base;
1331     int levels, ptidxbits, ptesize, vm, widened;
1332 
1333     if (first_stage == true) {
1334         if (use_background) {
1335             if (riscv_cpu_mxl(env) == MXL_RV32) {
1336                 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
1337                 vm = get_field(env->vsatp, SATP32_MODE);
1338             } else {
1339                 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
1340                 vm = get_field(env->vsatp, SATP64_MODE);
1341             }
1342         } else {
1343             if (riscv_cpu_mxl(env) == MXL_RV32) {
1344                 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
1345                 vm = get_field(env->satp, SATP32_MODE);
1346             } else {
1347                 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
1348                 vm = get_field(env->satp, SATP64_MODE);
1349             }
1350         }
1351         widened = 0;
1352     } else {
1353         if (riscv_cpu_mxl(env) == MXL_RV32) {
1354             base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
1355             vm = get_field(env->hgatp, SATP32_MODE);
1356         } else {
1357             base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
1358             vm = get_field(env->hgatp, SATP64_MODE);
1359         }
1360         widened = 2;
1361     }
1362 
1363     switch (vm) {
1364     case VM_1_10_SV32:
1365       levels = 2; ptidxbits = 10; ptesize = 4; break;
1366     case VM_1_10_SV39:
1367       levels = 3; ptidxbits = 9; ptesize = 8; break;
1368     case VM_1_10_SV48:
1369       levels = 4; ptidxbits = 9; ptesize = 8; break;
1370     case VM_1_10_SV57:
1371       levels = 5; ptidxbits = 9; ptesize = 8; break;
1372     case VM_1_10_MBARE:
1373         *physical = addr;
1374         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1375         return TRANSLATE_SUCCESS;
1376     default:
1377       g_assert_not_reached();
1378     }
1379 
1380     CPUState *cs = env_cpu(env);
1381     int va_bits = PGSHIFT + levels * ptidxbits + widened;
1382     int sxlen = 16 << riscv_cpu_sxl(env);
1383     int sxlen_bytes = sxlen / 8;
1384 
1385     if (first_stage == true) {
1386         target_ulong mask, masked_msbs;
1387 
1388         if (sxlen > (va_bits - 1)) {
1389             mask = (1L << (sxlen - (va_bits - 1))) - 1;
1390         } else {
1391             mask = 0;
1392         }
1393         masked_msbs = (addr >> (va_bits - 1)) & mask;
1394 
1395         if (masked_msbs != 0 && masked_msbs != mask) {
1396             return TRANSLATE_FAIL;
1397         }
1398     } else {
1399         if (vm != VM_1_10_SV32 && addr >> va_bits != 0) {
1400             return TRANSLATE_FAIL;
1401         }
1402     }
1403 
1404     bool pbmte = env->menvcfg & MENVCFG_PBMTE;
1405     bool svade = riscv_cpu_cfg(env)->ext_svade;
1406     bool svadu = riscv_cpu_cfg(env)->ext_svadu;
1407     bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade;
1408 
1409     if (first_stage && two_stage && env->virt_enabled) {
1410         pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
1411         adue = adue && (env->henvcfg & HENVCFG_ADUE);
1412     }
1413 
1414     int ptshift = (levels - 1) * ptidxbits;
1415     target_ulong pte;
1416     hwaddr pte_addr;
1417     int i;
1418 
1419  restart:
1420     for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
1421         target_ulong idx;
1422         if (i == 0) {
1423             idx = (addr >> (PGSHIFT + ptshift)) &
1424                            ((1 << (ptidxbits + widened)) - 1);
1425         } else {
1426             idx = (addr >> (PGSHIFT + ptshift)) &
1427                            ((1 << ptidxbits) - 1);
1428         }
1429 
1430         /* check that physical address of PTE is legal */
1431 
1432         if (two_stage && first_stage) {
1433             int vbase_prot;
1434             hwaddr vbase;
1435 
1436             /* Do the second stage translation on the base PTE address. */
1437             int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
1438                                                  base, NULL, MMU_DATA_LOAD,
1439                                                  MMUIdx_U, false, true,
1440                                                  is_debug, false);
1441 
1442             if (vbase_ret != TRANSLATE_SUCCESS) {
1443                 if (fault_pte_addr) {
1444                     *fault_pte_addr = (base + idx * ptesize) >> 2;
1445                 }
1446                 return TRANSLATE_G_STAGE_FAIL;
1447             }
1448 
1449             pte_addr = vbase + idx * ptesize;
1450         } else {
1451             pte_addr = base + idx * ptesize;
1452         }
1453 
1454         int pmp_prot;
1455         int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
1456                                                sxlen_bytes,
1457                                                MMU_DATA_LOAD, PRV_S);
1458         if (pmp_ret != TRANSLATE_SUCCESS) {
1459             return TRANSLATE_PMP_FAIL;
1460         }
1461 
1462         if (riscv_cpu_mxl(env) == MXL_RV32) {
1463             pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
1464         } else {
1465             pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
1466         }
1467 
1468         if (res != MEMTX_OK) {
1469             return TRANSLATE_FAIL;
1470         }
1471 
1472         if (riscv_cpu_sxl(env) == MXL_RV32) {
1473             ppn = pte >> PTE_PPN_SHIFT;
1474         } else {
1475             if (pte & PTE_RESERVED) {
1476                 qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
1477                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1478                               __func__, pte_addr, pte);
1479                 return TRANSLATE_FAIL;
1480             }
1481 
1482             if (!pbmte && (pte & PTE_PBMT)) {
1483                 /* Reserved without Svpbmt. */
1484                 qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1485                               "and Svpbmt extension is disabled: "
1486                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1487                               __func__, pte_addr, pte);
1488                 return TRANSLATE_FAIL;
1489             }
1490 
1491             if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1492                 /* Reserved without Svnapot extension */
1493                 qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
1494                               "and Svnapot extension is disabled: "
1495                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1496                               __func__, pte_addr, pte);
1497                 return TRANSLATE_FAIL;
1498             }
1499 
1500             ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
1501         }
1502 
1503         if (!(pte & PTE_V)) {
1504             /* Invalid PTE */
1505             return TRANSLATE_FAIL;
1506         }
1507 
1508         if (pte & (PTE_R | PTE_W | PTE_X)) {
1509             goto leaf;
1510         }
1511 
1512         if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
1513             /* D, A, and U bits are reserved in non-leaf/inner PTEs */
1514             qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
1515                           "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1516                           __func__, pte_addr, pte);
1517             return TRANSLATE_FAIL;
1518         }
1519         /* Inner PTE, continue walking */
1520         base = ppn << PGSHIFT;
1521     }
1522 
1523     /* No leaf pte at any translation level. */
1524     return TRANSLATE_FAIL;
1525 
1526  leaf:
1527     if (ppn & ((1ULL << ptshift) - 1)) {
1528         /* Misaligned PPN */
1529         qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
1530                       "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1531                       __func__, pte_addr, pte);
1532         return TRANSLATE_FAIL;
1533     }
1534     if (!pbmte && (pte & PTE_PBMT)) {
1535         /* Reserved without Svpbmt. */
1536         qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1537                       "and Svpbmt extension is disabled: "
1538                       "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1539                       __func__, pte_addr, pte);
1540         return TRANSLATE_FAIL;
1541     }
1542 
1543     target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
1544     /* Check for reserved combinations of RWX flags. */
1545     switch (rwx) {
1546     case PTE_W | PTE_X:
1547         return TRANSLATE_FAIL;
1548     case PTE_W:
1549         /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
1550         if (cpu_get_bcfien(env) && first_stage) {
1551             sstack_page = true;
1552             /*
1553              * if ss index, read and write allowed. else if not a probe
1554              * then only read allowed
1555              */
1556             rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 :  PTE_R);
1557             break;
1558         }
1559         return TRANSLATE_FAIL;
1560     case PTE_R:
1561         /*
1562          * no matter what's the `access_type`, shadow stack access to readonly
1563          * memory are always store page faults. During unwind, loads will be
1564          * promoted as store fault.
1565          */
1566         if (is_sstack_idx) {
1567             return TRANSLATE_FAIL;
1568         }
1569         break;
1570     }
1571 
1572     int prot = 0;
1573     if (rwx & PTE_R) {
1574         prot |= PAGE_READ;
1575     }
1576     if (rwx & PTE_W) {
1577         prot |= PAGE_WRITE;
1578     }
1579     if (rwx & PTE_X) {
1580         bool mxr = false;
1581 
1582         /*
1583          * Use mstatus for first stage or for the second stage without
1584          * virt_enabled (MPRV+MPV)
1585          */
1586         if (first_stage || !env->virt_enabled) {
1587             mxr = get_field(env->mstatus, MSTATUS_MXR);
1588         }
1589 
1590         /* MPRV+MPV case, check VSSTATUS */
1591         if (first_stage && two_stage && !env->virt_enabled) {
1592             mxr |= get_field(env->vsstatus, MSTATUS_MXR);
1593         }
1594 
1595         /*
1596          * Setting MXR at HS-level overrides both VS-stage and G-stage
1597          * execute-only permissions
1598          */
1599         if (env->virt_enabled) {
1600             mxr |= get_field(env->mstatus_hs, MSTATUS_MXR);
1601         }
1602 
1603         if (mxr) {
1604             prot |= PAGE_READ;
1605         }
1606         prot |= PAGE_EXEC;
1607     }
1608 
1609     if (pte & PTE_U) {
1610         if (mode != PRV_U) {
1611             if (!mmuidx_sum(mmu_idx)) {
1612                 return TRANSLATE_FAIL;
1613             }
1614             /* SUM allows only read+write, not execute. */
1615             prot &= PAGE_READ | PAGE_WRITE;
1616         }
1617     } else if (mode != PRV_S) {
1618         /* Supervisor PTE flags when not S mode */
1619         return TRANSLATE_FAIL;
1620     }
1621 
1622     if (!((prot >> access_type) & 1)) {
1623         /*
1624          * Access check failed, access check failures for shadow stack are
1625          * access faults.
1626          */
1627         return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
1628     }
1629 
1630     target_ulong updated_pte = pte;
1631 
1632     /*
1633      * If ADUE is enabled, set accessed and dirty bits.
1634      * Otherwise raise an exception if necessary.
1635      */
1636     if (adue) {
1637         updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0);
1638     } else if (!(pte & PTE_A) ||
1639                (access_type == MMU_DATA_STORE && !(pte & PTE_D))) {
1640         return TRANSLATE_FAIL;
1641     }
1642 
1643     /* Page table updates need to be atomic with MTTCG enabled */
1644     if (updated_pte != pte && !is_debug) {
1645         if (!adue) {
1646             return TRANSLATE_FAIL;
1647         }
1648 
1649         /*
1650          * - if accessed or dirty bits need updating, and the PTE is
1651          *   in RAM, then we do so atomically with a compare and swap.
1652          * - if the PTE is in IO space or ROM, then it can't be updated
1653          *   and we return TRANSLATE_FAIL.
1654          * - if the PTE changed by the time we went to update it, then
1655          *   it is no longer valid and we must re-walk the page table.
1656          */
1657         MemoryRegion *mr;
1658         hwaddr l = sxlen_bytes, addr1;
1659         mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
1660                                      false, MEMTXATTRS_UNSPECIFIED);
1661         if (memory_region_is_ram(mr)) {
1662             target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
1663             target_ulong old_pte;
1664             if (riscv_cpu_sxl(env) == MXL_RV32) {
1665                 old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
1666             } else {
1667                 old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
1668             }
1669             if (old_pte != pte) {
1670                 goto restart;
1671             }
1672             pte = updated_pte;
1673         } else {
1674             /*
1675              * Misconfigured PTE in ROM (AD bits are not preset) or
1676              * PTE is in IO space and can't be updated atomically.
1677              */
1678             return TRANSLATE_FAIL;
1679         }
1680     }
1681 
1682     /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
1683     target_ulong vpn = addr >> PGSHIFT;
1684 
1685     if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1686         napot_bits = ctzl(ppn) + 1;
1687         if ((i != (levels - 1)) || (napot_bits != 4)) {
1688             return TRANSLATE_FAIL;
1689         }
1690     }
1691 
1692     napot_mask = (1 << napot_bits) - 1;
1693     *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1694                   (vpn & (((target_ulong)1 << ptshift) - 1))
1695                  ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1696 
1697     /*
1698      * Remove write permission unless this is a store, or the page is
1699      * already dirty, so that we TLB miss on later writes to update
1700      * the dirty bit.
1701      */
1702     if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) {
1703         prot &= ~PAGE_WRITE;
1704     }
1705     *ret_prot = prot;
1706 
1707     return TRANSLATE_SUCCESS;
1708 }
1709 
1710 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1711                                 MMUAccessType access_type, bool pmp_violation,
1712                                 bool first_stage, bool two_stage,
1713                                 bool two_stage_indirect)
1714 {
1715     CPUState *cs = env_cpu(env);
1716 
1717     switch (access_type) {
1718     case MMU_INST_FETCH:
1719         if (pmp_violation) {
1720             cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1721         } else if (env->virt_enabled && !first_stage) {
1722             cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1723         } else {
1724             cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
1725         }
1726         break;
1727     case MMU_DATA_LOAD:
1728         if (pmp_violation) {
1729             cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1730         } else if (two_stage && !first_stage) {
1731             cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1732         } else {
1733             cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
1734         }
1735         break;
1736     case MMU_DATA_STORE:
1737         if (pmp_violation) {
1738             cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1739         } else if (two_stage && !first_stage) {
1740             cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1741         } else {
1742             cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
1743         }
1744         break;
1745     default:
1746         g_assert_not_reached();
1747     }
1748     env->badaddr = address;
1749     env->two_stage_lookup = two_stage;
1750     env->two_stage_indirect_lookup = two_stage_indirect;
1751 }
1752 
1753 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1754 {
1755     RISCVCPU *cpu = RISCV_CPU(cs);
1756     CPURISCVState *env = &cpu->env;
1757     hwaddr phys_addr;
1758     int prot;
1759     int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
1760 
1761     if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1762                              true, env->virt_enabled, true, false)) {
1763         return -1;
1764     }
1765 
1766     if (env->virt_enabled) {
1767         if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1768                                  0, MMUIdx_U, false, true, true, false)) {
1769             return -1;
1770         }
1771     }
1772 
1773     return phys_addr & TARGET_PAGE_MASK;
1774 }
1775 
1776 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1777                                      vaddr addr, unsigned size,
1778                                      MMUAccessType access_type,
1779                                      int mmu_idx, MemTxAttrs attrs,
1780                                      MemTxResult response, uintptr_t retaddr)
1781 {
1782     RISCVCPU *cpu = RISCV_CPU(cs);
1783     CPURISCVState *env = &cpu->env;
1784 
1785     if (access_type == MMU_DATA_STORE) {
1786         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1787     } else if (access_type == MMU_DATA_LOAD) {
1788         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1789     } else {
1790         cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1791     }
1792 
1793     env->badaddr = addr;
1794     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1795     env->two_stage_indirect_lookup = false;
1796     cpu_loop_exit_restore(cs, retaddr);
1797 }
1798 
1799 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1800                                    MMUAccessType access_type, int mmu_idx,
1801                                    uintptr_t retaddr)
1802 {
1803     RISCVCPU *cpu = RISCV_CPU(cs);
1804     CPURISCVState *env = &cpu->env;
1805     switch (access_type) {
1806     case MMU_INST_FETCH:
1807         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1808         break;
1809     case MMU_DATA_LOAD:
1810         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1811         /* shadow stack mis aligned accesses are access faults */
1812         if (mmu_idx & MMU_IDX_SS_WRITE) {
1813             cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1814         }
1815         break;
1816     case MMU_DATA_STORE:
1817         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1818         /* shadow stack mis aligned accesses are access faults */
1819         if (mmu_idx & MMU_IDX_SS_WRITE) {
1820             cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1821         }
1822         break;
1823     default:
1824         g_assert_not_reached();
1825     }
1826     env->badaddr = addr;
1827     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1828     env->two_stage_indirect_lookup = false;
1829     cpu_loop_exit_restore(cs, retaddr);
1830 }
1831 
1832 
1833 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1834 {
1835     enum riscv_pmu_event_idx pmu_event_type;
1836 
1837     switch (access_type) {
1838     case MMU_INST_FETCH:
1839         pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1840         break;
1841     case MMU_DATA_LOAD:
1842         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1843         break;
1844     case MMU_DATA_STORE:
1845         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1846         break;
1847     default:
1848         return;
1849     }
1850 
1851     riscv_pmu_incr_ctr(cpu, pmu_event_type);
1852 }
1853 
1854 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1855                         MMUAccessType access_type, int mmu_idx,
1856                         bool probe, uintptr_t retaddr)
1857 {
1858     RISCVCPU *cpu = RISCV_CPU(cs);
1859     CPURISCVState *env = &cpu->env;
1860     vaddr im_address;
1861     hwaddr pa = 0;
1862     int prot, prot2, prot_pmp;
1863     bool pmp_violation = false;
1864     bool first_stage_error = true;
1865     bool two_stage_lookup = mmuidx_2stage(mmu_idx);
1866     bool two_stage_indirect_error = false;
1867     int ret = TRANSLATE_FAIL;
1868     int mode = mmuidx_priv(mmu_idx);
1869     /* default TLB page size */
1870     hwaddr tlb_size = TARGET_PAGE_SIZE;
1871 
1872     env->guest_phys_fault_addr = 0;
1873 
1874     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1875                   __func__, address, access_type, mmu_idx);
1876 
1877     pmu_tlb_fill_incr_ctr(cpu, access_type);
1878     if (two_stage_lookup) {
1879         /* Two stage lookup */
1880         ret = get_physical_address(env, &pa, &prot, address,
1881                                    &env->guest_phys_fault_addr, access_type,
1882                                    mmu_idx, true, true, false, probe);
1883 
1884         /*
1885          * A G-stage exception may be triggered during two state lookup.
1886          * And the env->guest_phys_fault_addr has already been set in
1887          * get_physical_address().
1888          */
1889         if (ret == TRANSLATE_G_STAGE_FAIL) {
1890             first_stage_error = false;
1891             two_stage_indirect_error = true;
1892         }
1893 
1894         qemu_log_mask(CPU_LOG_MMU,
1895                       "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1896                       HWADDR_FMT_plx " prot %d\n",
1897                       __func__, address, ret, pa, prot);
1898 
1899         if (ret == TRANSLATE_SUCCESS) {
1900             /* Second stage lookup */
1901             im_address = pa;
1902 
1903             ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1904                                        access_type, MMUIdx_U, false, true,
1905                                        false, probe);
1906 
1907             qemu_log_mask(CPU_LOG_MMU,
1908                           "%s 2nd-stage address=%" VADDR_PRIx
1909                           " ret %d physical "
1910                           HWADDR_FMT_plx " prot %d\n",
1911                           __func__, im_address, ret, pa, prot2);
1912 
1913             prot &= prot2;
1914 
1915             if (ret == TRANSLATE_SUCCESS) {
1916                 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1917                                                size, access_type, mode);
1918                 tlb_size = pmp_get_tlb_size(env, pa);
1919 
1920                 qemu_log_mask(CPU_LOG_MMU,
1921                               "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1922                               " %d tlb_size %" HWADDR_PRIu "\n",
1923                               __func__, pa, ret, prot_pmp, tlb_size);
1924 
1925                 prot &= prot_pmp;
1926             } else {
1927                 /*
1928                  * Guest physical address translation failed, this is a HS
1929                  * level exception
1930                  */
1931                 first_stage_error = false;
1932                 if (ret != TRANSLATE_PMP_FAIL) {
1933                     env->guest_phys_fault_addr = (im_address |
1934                                                   (address &
1935                                                    (TARGET_PAGE_SIZE - 1))) >> 2;
1936                 }
1937             }
1938         }
1939     } else {
1940         /* Single stage lookup */
1941         ret = get_physical_address(env, &pa, &prot, address, NULL,
1942                                    access_type, mmu_idx, true, false, false,
1943                                    probe);
1944 
1945         qemu_log_mask(CPU_LOG_MMU,
1946                       "%s address=%" VADDR_PRIx " ret %d physical "
1947                       HWADDR_FMT_plx " prot %d\n",
1948                       __func__, address, ret, pa, prot);
1949 
1950         if (ret == TRANSLATE_SUCCESS) {
1951             ret = get_physical_address_pmp(env, &prot_pmp, pa,
1952                                            size, access_type, mode);
1953             tlb_size = pmp_get_tlb_size(env, pa);
1954 
1955             qemu_log_mask(CPU_LOG_MMU,
1956                           "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1957                           " %d tlb_size %" HWADDR_PRIu "\n",
1958                           __func__, pa, ret, prot_pmp, tlb_size);
1959 
1960             prot &= prot_pmp;
1961         }
1962     }
1963 
1964     if (ret == TRANSLATE_PMP_FAIL) {
1965         pmp_violation = true;
1966     }
1967 
1968     if (ret == TRANSLATE_SUCCESS) {
1969         tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1970                      prot, mmu_idx, tlb_size);
1971         return true;
1972     } else if (probe) {
1973         return false;
1974     } else {
1975         int wp_access = 0;
1976 
1977         if (access_type == MMU_DATA_LOAD) {
1978             wp_access |= BP_MEM_READ;
1979         } else if (access_type == MMU_DATA_STORE) {
1980             wp_access |= BP_MEM_WRITE;
1981         }
1982 
1983         /*
1984          * If a watchpoint isn't found for 'addr' this will
1985          * be a no-op and we'll resume the mmu_exception path.
1986          * Otherwise we'll throw a debug exception and execution
1987          * will continue elsewhere.
1988          */
1989         cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
1990                              wp_access, retaddr);
1991 
1992         raise_mmu_exception(env, address, access_type, pmp_violation,
1993                             first_stage_error, two_stage_lookup,
1994                             two_stage_indirect_error);
1995         cpu_loop_exit_restore(cs, retaddr);
1996     }
1997 
1998     return true;
1999 }
2000 
2001 static target_ulong riscv_transformed_insn(CPURISCVState *env,
2002                                            target_ulong insn,
2003                                            target_ulong taddr)
2004 {
2005     target_ulong xinsn = 0;
2006     target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
2007 
2008     /*
2009      * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
2010      * be uncompressed. The Quadrant 1 of RVC instruction space need
2011      * not be transformed because these instructions won't generate
2012      * any load/store trap.
2013      */
2014 
2015     if ((insn & 0x3) != 0x3) {
2016         /* Transform 16bit instruction into 32bit instruction */
2017         switch (GET_C_OP(insn)) {
2018         case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */
2019             switch (GET_C_FUNC(insn)) {
2020             case OPC_RISC_C_FUNC_FLD_LQ:
2021                 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */
2022                     xinsn = OPC_RISC_FLD;
2023                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2024                     access_rs1 = GET_C_RS1S(insn);
2025                     access_imm = GET_C_LD_IMM(insn);
2026                     access_size = 8;
2027                 }
2028                 break;
2029             case OPC_RISC_C_FUNC_LW: /* C.LW */
2030                 xinsn = OPC_RISC_LW;
2031                 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2032                 access_rs1 = GET_C_RS1S(insn);
2033                 access_imm = GET_C_LW_IMM(insn);
2034                 access_size = 4;
2035                 break;
2036             case OPC_RISC_C_FUNC_FLW_LD:
2037                 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */
2038                     xinsn = OPC_RISC_FLW;
2039                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2040                     access_rs1 = GET_C_RS1S(insn);
2041                     access_imm = GET_C_LW_IMM(insn);
2042                     access_size = 4;
2043                 } else { /* C.LD (RV64/RV128) */
2044                     xinsn = OPC_RISC_LD;
2045                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2046                     access_rs1 = GET_C_RS1S(insn);
2047                     access_imm = GET_C_LD_IMM(insn);
2048                     access_size = 8;
2049                 }
2050                 break;
2051             case OPC_RISC_C_FUNC_FSD_SQ:
2052                 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */
2053                     xinsn = OPC_RISC_FSD;
2054                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2055                     access_rs1 = GET_C_RS1S(insn);
2056                     access_imm = GET_C_SD_IMM(insn);
2057                     access_size = 8;
2058                 }
2059                 break;
2060             case OPC_RISC_C_FUNC_SW: /* C.SW */
2061                 xinsn = OPC_RISC_SW;
2062                 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2063                 access_rs1 = GET_C_RS1S(insn);
2064                 access_imm = GET_C_SW_IMM(insn);
2065                 access_size = 4;
2066                 break;
2067             case OPC_RISC_C_FUNC_FSW_SD:
2068                 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */
2069                     xinsn = OPC_RISC_FSW;
2070                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2071                     access_rs1 = GET_C_RS1S(insn);
2072                     access_imm = GET_C_SW_IMM(insn);
2073                     access_size = 4;
2074                 } else { /* C.SD (RV64/RV128) */
2075                     xinsn = OPC_RISC_SD;
2076                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2077                     access_rs1 = GET_C_RS1S(insn);
2078                     access_imm = GET_C_SD_IMM(insn);
2079                     access_size = 8;
2080                 }
2081                 break;
2082             default:
2083                 break;
2084             }
2085             break;
2086         case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */
2087             switch (GET_C_FUNC(insn)) {
2088             case OPC_RISC_C_FUNC_FLDSP_LQSP:
2089                 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */
2090                     xinsn = OPC_RISC_FLD;
2091                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2092                     access_rs1 = 2;
2093                     access_imm = GET_C_LDSP_IMM(insn);
2094                     access_size = 8;
2095                 }
2096                 break;
2097             case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */
2098                 xinsn = OPC_RISC_LW;
2099                 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2100                 access_rs1 = 2;
2101                 access_imm = GET_C_LWSP_IMM(insn);
2102                 access_size = 4;
2103                 break;
2104             case OPC_RISC_C_FUNC_FLWSP_LDSP:
2105                 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */
2106                     xinsn = OPC_RISC_FLW;
2107                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2108                     access_rs1 = 2;
2109                     access_imm = GET_C_LWSP_IMM(insn);
2110                     access_size = 4;
2111                 } else { /* C.LDSP (RV64/RV128) */
2112                     xinsn = OPC_RISC_LD;
2113                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2114                     access_rs1 = 2;
2115                     access_imm = GET_C_LDSP_IMM(insn);
2116                     access_size = 8;
2117                 }
2118                 break;
2119             case OPC_RISC_C_FUNC_FSDSP_SQSP:
2120                 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */
2121                     xinsn = OPC_RISC_FSD;
2122                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2123                     access_rs1 = 2;
2124                     access_imm = GET_C_SDSP_IMM(insn);
2125                     access_size = 8;
2126                 }
2127                 break;
2128             case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */
2129                 xinsn = OPC_RISC_SW;
2130                 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2131                 access_rs1 = 2;
2132                 access_imm = GET_C_SWSP_IMM(insn);
2133                 access_size = 4;
2134                 break;
2135             case 7:
2136                 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */
2137                     xinsn = OPC_RISC_FSW;
2138                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2139                     access_rs1 = 2;
2140                     access_imm = GET_C_SWSP_IMM(insn);
2141                     access_size = 4;
2142                 } else { /* C.SDSP (RV64/RV128) */
2143                     xinsn = OPC_RISC_SD;
2144                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2145                     access_rs1 = 2;
2146                     access_imm = GET_C_SDSP_IMM(insn);
2147                     access_size = 8;
2148                 }
2149                 break;
2150             default:
2151                 break;
2152             }
2153             break;
2154         default:
2155             break;
2156         }
2157 
2158         /*
2159          * Clear Bit1 of transformed instruction to indicate that
2160          * original insruction was a 16bit instruction
2161          */
2162         xinsn &= ~((target_ulong)0x2);
2163     } else {
2164         /* Transform 32bit (or wider) instructions */
2165         switch (MASK_OP_MAJOR(insn)) {
2166         case OPC_RISC_ATOMIC:
2167             xinsn = insn;
2168             access_rs1 = GET_RS1(insn);
2169             access_size = 1 << GET_FUNCT3(insn);
2170             break;
2171         case OPC_RISC_LOAD:
2172         case OPC_RISC_FP_LOAD:
2173             xinsn = SET_I_IMM(insn, 0);
2174             access_rs1 = GET_RS1(insn);
2175             access_imm = GET_IMM(insn);
2176             access_size = 1 << GET_FUNCT3(insn);
2177             break;
2178         case OPC_RISC_STORE:
2179         case OPC_RISC_FP_STORE:
2180             xinsn = SET_S_IMM(insn, 0);
2181             access_rs1 = GET_RS1(insn);
2182             access_imm = GET_STORE_IMM(insn);
2183             access_size = 1 << GET_FUNCT3(insn);
2184             break;
2185         case OPC_RISC_SYSTEM:
2186             if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
2187                 xinsn = insn;
2188                 access_rs1 = GET_RS1(insn);
2189                 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
2190                 access_size = 1 << access_size;
2191             }
2192             break;
2193         default:
2194             break;
2195         }
2196     }
2197 
2198     if (access_size) {
2199         xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
2200                                (access_size - 1));
2201     }
2202 
2203     return xinsn;
2204 }
2205 
2206 static target_ulong promote_load_fault(target_ulong orig_cause)
2207 {
2208     switch (orig_cause) {
2209     case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2210         return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
2211 
2212     case RISCV_EXCP_LOAD_ACCESS_FAULT:
2213         return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
2214 
2215     case RISCV_EXCP_LOAD_PAGE_FAULT:
2216         return RISCV_EXCP_STORE_PAGE_FAULT;
2217     }
2218 
2219     /* if no promotion, return original cause */
2220     return orig_cause;
2221 }
2222 
2223 static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
2224 {
2225     env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
2226     env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
2227     env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
2228     env->mncause = cause;
2229     env->mnepc = env->pc;
2230     env->pc = env->rnmi_irqvec;
2231 
2232     if (cpu_get_fcfien(env)) {
2233         env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
2234     }
2235 
2236     /* Trapping to M mode, virt is disabled */
2237     riscv_cpu_set_mode(env, PRV_M, false);
2238 }
2239 
2240 /*
2241  * Handle Traps
2242  *
2243  * Adapted from Spike's processor_t::take_trap.
2244  *
2245  */
2246 void riscv_cpu_do_interrupt(CPUState *cs)
2247 {
2248     RISCVCPU *cpu = RISCV_CPU(cs);
2249     CPURISCVState *env = &cpu->env;
2250     bool virt = env->virt_enabled;
2251     bool write_gva = false;
2252     bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
2253     bool vsmode_exc;
2254     uint64_t s;
2255     int mode;
2256 
2257     /*
2258      * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
2259      * so we mask off the MSB and separate into trap type and cause.
2260      */
2261     bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
2262     target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
2263     uint64_t deleg = async ? env->mideleg : env->medeleg;
2264     bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
2265         !(env->mip & (1ULL << cause));
2266     bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
2267         !(env->mip & (1ULL << cause));
2268     bool smode_double_trap = false;
2269     uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
2270     const bool prev_virt = env->virt_enabled;
2271     const target_ulong prev_priv = env->priv;
2272     target_ulong tval = 0;
2273     target_ulong tinst = 0;
2274     target_ulong htval = 0;
2275     target_ulong mtval2 = 0;
2276     target_ulong src;
2277     int sxlen = 0;
2278     int mxlen = 16 << riscv_cpu_mxl(env);
2279     bool nnmi_excep = false;
2280 
2281     if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
2282         riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
2283                      env->virt_enabled);
2284         return;
2285     }
2286 
2287     if (!async) {
2288         /* set tval to badaddr for traps with address information */
2289         switch (cause) {
2290 #ifdef CONFIG_TCG
2291         case RISCV_EXCP_SEMIHOST:
2292             do_common_semihosting(cs);
2293             env->pc += 4;
2294             return;
2295 #endif
2296         case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2297         case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
2298         case RISCV_EXCP_LOAD_ADDR_MIS:
2299         case RISCV_EXCP_STORE_AMO_ADDR_MIS:
2300         case RISCV_EXCP_LOAD_ACCESS_FAULT:
2301         case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
2302         case RISCV_EXCP_LOAD_PAGE_FAULT:
2303         case RISCV_EXCP_STORE_PAGE_FAULT:
2304             if (always_storeamo) {
2305                 cause = promote_load_fault(cause);
2306             }
2307             write_gva = env->two_stage_lookup;
2308             tval = env->badaddr;
2309             if (env->two_stage_indirect_lookup) {
2310                 /*
2311                  * special pseudoinstruction for G-stage fault taken while
2312                  * doing VS-stage page table walk.
2313                  */
2314                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2315             } else {
2316                 /*
2317                  * The "Addr. Offset" field in transformed instruction is
2318                  * non-zero only for misaligned access.
2319                  */
2320                 tinst = riscv_transformed_insn(env, env->bins, tval);
2321             }
2322             break;
2323         case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
2324         case RISCV_EXCP_INST_ADDR_MIS:
2325         case RISCV_EXCP_INST_ACCESS_FAULT:
2326         case RISCV_EXCP_INST_PAGE_FAULT:
2327             write_gva = env->two_stage_lookup;
2328             tval = env->badaddr;
2329             if (env->two_stage_indirect_lookup) {
2330                 /*
2331                  * special pseudoinstruction for G-stage fault taken while
2332                  * doing VS-stage page table walk.
2333                  */
2334                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2335             }
2336             break;
2337         case RISCV_EXCP_ILLEGAL_INST:
2338         case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
2339             tval = env->bins;
2340             break;
2341         case RISCV_EXCP_BREAKPOINT:
2342             tval = env->badaddr;
2343             if (cs->watchpoint_hit) {
2344                 tval = cs->watchpoint_hit->hitaddr;
2345                 cs->watchpoint_hit = NULL;
2346             }
2347             break;
2348         case RISCV_EXCP_SW_CHECK:
2349             tval = env->sw_check_code;
2350             break;
2351         default:
2352             break;
2353         }
2354         /* ecall is dispatched as one cause so translate based on mode */
2355         if (cause == RISCV_EXCP_U_ECALL) {
2356             assert(env->priv <= 3);
2357 
2358             if (env->priv == PRV_M) {
2359                 cause = RISCV_EXCP_M_ECALL;
2360             } else if (env->priv == PRV_S && env->virt_enabled) {
2361                 cause = RISCV_EXCP_VS_ECALL;
2362             } else if (env->priv == PRV_S && !env->virt_enabled) {
2363                 cause = RISCV_EXCP_S_ECALL;
2364             } else if (env->priv == PRV_U) {
2365                 cause = RISCV_EXCP_U_ECALL;
2366             }
2367         }
2368     }
2369 
2370     trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
2371                      riscv_cpu_get_trap_name(cause, async));
2372 
2373     qemu_log_mask(CPU_LOG_INT,
2374                   "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
2375                   "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
2376                   __func__, env->mhartid, async, cause, env->pc, tval,
2377                   riscv_cpu_get_trap_name(cause, async));
2378 
2379     mode = env->priv <= PRV_S && cause < 64 &&
2380         (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
2381 
2382     vsmode_exc = env->virt_enabled && cause < 64 &&
2383         (((hdeleg >> cause) & 1) || vs_injected);
2384 
2385     /*
2386      * Check double trap condition only if already in S-mode and targeting
2387      * S-mode
2388      */
2389     if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
2390         bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
2391         bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
2392         /* In VS or HS */
2393         if (riscv_has_ext(env, RVH)) {
2394             if (vsmode_exc) {
2395                 /* VS -> VS, use henvcfg instead of menvcfg*/
2396                 dte = (env->henvcfg & HENVCFG_DTE) != 0;
2397             } else if (env->virt_enabled) {
2398                 /* VS -> HS, use mstatus_hs */
2399                 sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
2400             }
2401         }
2402         smode_double_trap = dte && sdt;
2403         if (smode_double_trap) {
2404             mode = PRV_M;
2405         }
2406     }
2407 
2408     if (mode == PRV_S) {
2409         /* handle the trap in S-mode */
2410         /* save elp status */
2411         if (cpu_get_fcfien(env)) {
2412             env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
2413         }
2414 
2415         if (riscv_has_ext(env, RVH)) {
2416             if (vsmode_exc) {
2417                 /* Trap to VS mode */
2418                 /*
2419                  * See if we need to adjust cause. Yes if its VS mode interrupt
2420                  * no if hypervisor has delegated one of hs mode's interrupt
2421                  */
2422                 if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
2423                               cause == IRQ_VS_EXT)) {
2424                     cause = cause - 1;
2425                 }
2426                 write_gva = false;
2427             } else if (env->virt_enabled) {
2428                 /* Trap into HS mode, from virt */
2429                 riscv_cpu_swap_hypervisor_regs(env);
2430                 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
2431                                          env->priv);
2432                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true);
2433 
2434                 htval = env->guest_phys_fault_addr;
2435 
2436                 virt = false;
2437             } else {
2438                 /* Trap into HS mode */
2439                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
2440                 htval = env->guest_phys_fault_addr;
2441             }
2442             env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
2443         }
2444 
2445         s = env->mstatus;
2446         s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
2447         s = set_field(s, MSTATUS_SPP, env->priv);
2448         s = set_field(s, MSTATUS_SIE, 0);
2449         if (riscv_env_smode_dbltrp_enabled(env, virt)) {
2450             s = set_field(s, MSTATUS_SDT, 1);
2451         }
2452         env->mstatus = s;
2453         sxlen = 16 << riscv_cpu_sxl(env);
2454         env->scause = cause | ((target_ulong)async << (sxlen - 1));
2455         env->sepc = env->pc;
2456         env->stval = tval;
2457         env->htval = htval;
2458         env->htinst = tinst;
2459         env->pc = (env->stvec >> 2 << 2) +
2460                   ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
2461         riscv_cpu_set_mode(env, PRV_S, virt);
2462 
2463         src = env->sepc;
2464     } else {
2465         /*
2466          * If the hart encounters an exception while executing in M-mode
2467          * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
2468          */
2469         nnmi_excep = cpu->cfg.ext_smrnmi &&
2470                      !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2471                      !async;
2472 
2473         /* handle the trap in M-mode */
2474         /* save elp status */
2475         if (cpu_get_fcfien(env)) {
2476             if (nnmi_excep) {
2477                 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
2478                                           env->elp);
2479             } else {
2480                 env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
2481             }
2482         }
2483 
2484         if (riscv_has_ext(env, RVH)) {
2485             if (env->virt_enabled) {
2486                 riscv_cpu_swap_hypervisor_regs(env);
2487             }
2488             env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
2489                                      env->virt_enabled);
2490             if (env->virt_enabled && tval) {
2491                 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
2492             }
2493 
2494             mtval2 = env->guest_phys_fault_addr;
2495 
2496             /* Trapping to M mode, virt is disabled */
2497             virt = false;
2498         }
2499         /*
2500          * If the hart encounters an exception while executing in M-mode,
2501          * with the mnstatus.NMIE bit clear, the program counter is set to
2502          * the RNMI exception trap handler address.
2503          */
2504         nnmi_excep = cpu->cfg.ext_smrnmi &&
2505                      !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2506                      !async;
2507 
2508         s = env->mstatus;
2509         s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
2510         s = set_field(s, MSTATUS_MPP, env->priv);
2511         s = set_field(s, MSTATUS_MIE, 0);
2512         if (cpu->cfg.ext_smdbltrp) {
2513             if (env->mstatus & MSTATUS_MDT) {
2514                 assert(env->priv == PRV_M);
2515                 if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
2516                     cpu_abort(CPU(cpu), "M-mode double trap\n");
2517                 } else {
2518                     riscv_do_nmi(env, cause, false);
2519                     return;
2520                 }
2521             }
2522 
2523             s = set_field(s, MSTATUS_MDT, 1);
2524         }
2525         env->mstatus = s;
2526         env->mcause = cause | ((target_ulong)async << (mxlen - 1));
2527         if (smode_double_trap) {
2528             env->mtval2 = env->mcause;
2529             env->mcause = RISCV_EXCP_DOUBLE_TRAP;
2530         } else {
2531             env->mtval2 = mtval2;
2532         }
2533         env->mepc = env->pc;
2534         env->mtval = tval;
2535         env->mtinst = tinst;
2536 
2537         /*
2538          * For RNMI exception, program counter is set to the RNMI exception
2539          * trap handler address.
2540          */
2541         if (nnmi_excep) {
2542             env->pc = env->rnmi_excpvec;
2543         } else {
2544             env->pc = (env->mtvec >> 2 << 2) +
2545                       ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
2546         }
2547         riscv_cpu_set_mode(env, PRV_M, virt);
2548         src = env->mepc;
2549     }
2550 
2551     if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
2552         if (async && cause == IRQ_PMU_OVF) {
2553             riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
2554         } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
2555             riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
2556         }
2557 
2558         riscv_ctr_add_entry(env, src, env->pc,
2559                         async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
2560                         prev_priv, prev_virt);
2561     }
2562 
2563     /*
2564      * Interrupt/exception/trap delivery is asynchronous event and as per
2565      * zicfilp spec CPU should clear up the ELP state. No harm in clearing
2566      * unconditionally.
2567      */
2568     env->elp = false;
2569 
2570     /*
2571      * NOTE: it is not necessary to yield load reservations here. It is only
2572      * necessary for an SC from "another hart" to cause a load reservation
2573      * to be yielded. Refer to the memory consistency model section of the
2574      * RISC-V ISA Specification.
2575      */
2576 
2577     env->two_stage_lookup = false;
2578     env->two_stage_indirect_lookup = false;
2579 }
2580 
2581 #endif /* !CONFIG_USER_ONLY */
2582