xref: /qemu/target/riscv/cpu_helper.c (revision ffd5a60e9b67e14f7bac7ea29300ea46a944e508)
1 /*
2  * RISC-V CPU helpers for qemu.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "pmu.h"
26 #include "exec/cputlb.h"
27 #include "exec/exec-all.h"
28 #include "exec/page-protection.h"
29 #include "exec/target_page.h"
30 #include "system/memory.h"
31 #include "instmap.h"
32 #include "tcg/tcg-op.h"
33 #include "accel/tcg/cpu-ops.h"
34 #include "trace.h"
35 #include "semihosting/common-semi.h"
36 #include "exec/icount.h"
37 #include "cpu_bits.h"
38 #include "debug.h"
39 #include "pmp.h"
40 
41 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
42 {
43 #ifdef CONFIG_USER_ONLY
44     return 0;
45 #else
46     bool virt = env->virt_enabled;
47     int mode = env->priv;
48 
49     /* All priv -> mmu_idx mapping are here */
50     if (!ifetch) {
51         uint64_t status = env->mstatus;
52 
53         if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
54             mode = get_field(env->mstatus, MSTATUS_MPP);
55             virt = get_field(env->mstatus, MSTATUS_MPV) &&
56                    (mode != PRV_M);
57             if (virt) {
58                 status = env->vsstatus;
59             }
60         }
61         if (mode == PRV_S && get_field(status, MSTATUS_SUM)) {
62             mode = MMUIdx_S_SUM;
63         }
64     }
65 
66     return mode | (virt ? MMU_2STAGE_BIT : 0);
67 #endif
68 }
69 
70 bool cpu_get_fcfien(CPURISCVState *env)
71 {
72     /* no cfi extension, return false */
73     if (!env_archcpu(env)->cfg.ext_zicfilp) {
74         return false;
75     }
76 
77     switch (env->priv) {
78     case PRV_U:
79         if (riscv_has_ext(env, RVS)) {
80             return env->senvcfg & SENVCFG_LPE;
81         }
82         return env->menvcfg & MENVCFG_LPE;
83 #ifndef CONFIG_USER_ONLY
84     case PRV_S:
85         if (env->virt_enabled) {
86             return env->henvcfg & HENVCFG_LPE;
87         }
88         return env->menvcfg & MENVCFG_LPE;
89     case PRV_M:
90         return env->mseccfg & MSECCFG_MLPE;
91 #endif
92     default:
93         g_assert_not_reached();
94     }
95 }
96 
97 bool cpu_get_bcfien(CPURISCVState *env)
98 {
99     /* no cfi extension, return false */
100     if (!env_archcpu(env)->cfg.ext_zicfiss) {
101         return false;
102     }
103 
104     switch (env->priv) {
105     case PRV_U:
106         /*
107          * If S is not implemented then shadow stack for U can't be turned on
108          * It is checked in `riscv_cpu_validate_set_extensions`, so no need to
109          * check here or assert here
110          */
111         return env->senvcfg & SENVCFG_SSE;
112 #ifndef CONFIG_USER_ONLY
113     case PRV_S:
114         if (env->virt_enabled) {
115             return env->henvcfg & HENVCFG_SSE;
116         }
117         return env->menvcfg & MENVCFG_SSE;
118     case PRV_M: /* M-mode shadow stack is always off */
119         return false;
120 #endif
121     default:
122         g_assert_not_reached();
123     }
124 }
125 
126 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
127 {
128 #ifdef CONFIG_USER_ONLY
129     return false;
130 #else
131     if (virt) {
132         return (env->henvcfg & HENVCFG_DTE) != 0;
133     } else {
134         return (env->menvcfg & MENVCFG_DTE) != 0;
135     }
136 #endif
137 }
138 
139 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
140                           uint64_t *cs_base, uint32_t *pflags)
141 {
142     RISCVCPU *cpu = env_archcpu(env);
143     RISCVExtStatus fs, vs;
144     uint32_t flags = 0;
145     bool pm_signext = riscv_cpu_virt_mem_enabled(env);
146 
147     *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
148     *cs_base = 0;
149 
150     if (cpu->cfg.ext_zve32x) {
151         /*
152          * If env->vl equals to VLMAX, we can use generic vector operation
153          * expanders (GVEC) to accerlate the vector operations.
154          * However, as LMUL could be a fractional number. The maximum
155          * vector size can be operated might be less than 8 bytes,
156          * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
157          * only when maxsz >= 8 bytes.
158          */
159 
160         /* lmul encoded as in DisasContext::lmul */
161         int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
162         uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
163         uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
164         uint32_t maxsz = vlmax << vsew;
165         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
166                            (maxsz >= 8);
167         flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
168         flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
169         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
170                            FIELD_EX64(env->vtype, VTYPE, VLMUL));
171         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
172         flags = FIELD_DP32(flags, TB_FLAGS, VTA,
173                            FIELD_EX64(env->vtype, VTYPE, VTA));
174         flags = FIELD_DP32(flags, TB_FLAGS, VMA,
175                            FIELD_EX64(env->vtype, VTYPE, VMA));
176         flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
177     } else {
178         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
179     }
180 
181     if (cpu_get_fcfien(env)) {
182         /*
183          * For Forward CFI, only the expectation of a lpad at
184          * the start of the block is tracked via env->elp. env->elp
185          * is turned on during jalr translation.
186          */
187         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
188         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
189     }
190 
191     if (cpu_get_bcfien(env)) {
192         flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
193     }
194 
195 #ifdef CONFIG_USER_ONLY
196     fs = EXT_STATUS_DIRTY;
197     vs = EXT_STATUS_DIRTY;
198 #else
199     flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
200 
201     flags |= riscv_env_mmu_index(env, 0);
202     fs = get_field(env->mstatus, MSTATUS_FS);
203     vs = get_field(env->mstatus, MSTATUS_VS);
204 
205     if (env->virt_enabled) {
206         flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
207         /*
208          * Merge DISABLED and !DIRTY states using MIN.
209          * We will set both fields when dirtying.
210          */
211         fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
212         vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
213     }
214 
215     /* With Zfinx, floating point is enabled/disabled by Smstateen. */
216     if (!riscv_has_ext(env, RVF)) {
217         fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
218              ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
219     }
220 
221     if (cpu->cfg.debug && !icount_enabled()) {
222         flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
223     }
224 #endif
225 
226     flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
227     flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
228     flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
229     flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
230     flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
231     flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
232 
233     *pflags = flags;
234 }
235 
236 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
237 {
238 #ifndef CONFIG_USER_ONLY
239     int priv_mode = cpu_address_mode(env);
240 
241     if (get_field(env->mstatus, MSTATUS_MPRV) &&
242         get_field(env->mstatus, MSTATUS_MXR)) {
243         return PMM_FIELD_DISABLED;
244     }
245 
246     /* Get current PMM field */
247     switch (priv_mode) {
248     case PRV_M:
249         if (riscv_cpu_cfg(env)->ext_smmpm) {
250             return get_field(env->mseccfg, MSECCFG_PMM);
251         }
252         break;
253     case PRV_S:
254         if (riscv_cpu_cfg(env)->ext_smnpm) {
255             if (get_field(env->mstatus, MSTATUS_MPV)) {
256                 return get_field(env->henvcfg, HENVCFG_PMM);
257             } else {
258                 return get_field(env->menvcfg, MENVCFG_PMM);
259             }
260         }
261         break;
262     case PRV_U:
263         if (riscv_has_ext(env, RVS)) {
264             if (riscv_cpu_cfg(env)->ext_ssnpm) {
265                 return get_field(env->senvcfg, SENVCFG_PMM);
266             }
267         } else {
268             if (riscv_cpu_cfg(env)->ext_smnpm) {
269                 return get_field(env->menvcfg, MENVCFG_PMM);
270             }
271         }
272         break;
273     default:
274         g_assert_not_reached();
275     }
276     return PMM_FIELD_DISABLED;
277 #else
278     return PMM_FIELD_DISABLED;
279 #endif
280 }
281 
282 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
283 {
284 #ifndef CONFIG_USER_ONLY
285     int priv_mode = cpu_address_mode(env);
286 
287     if (priv_mode == PRV_U) {
288         return get_field(env->hstatus, HSTATUS_HUPMM);
289     } else {
290         if (get_field(env->hstatus, HSTATUS_SPVP)) {
291             return get_field(env->henvcfg, HENVCFG_PMM);
292         } else {
293             return get_field(env->senvcfg, SENVCFG_PMM);
294         }
295     }
296 #else
297     return PMM_FIELD_DISABLED;
298 #endif
299 }
300 
301 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
302 {
303 #ifndef CONFIG_USER_ONLY
304     int satp_mode = 0;
305     int priv_mode = cpu_address_mode(env);
306 
307     if (riscv_cpu_mxl(env) == MXL_RV32) {
308         satp_mode = get_field(env->satp, SATP32_MODE);
309     } else {
310         satp_mode = get_field(env->satp, SATP64_MODE);
311     }
312 
313     return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
314 #else
315     return false;
316 #endif
317 }
318 
319 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
320 {
321     switch (pmm) {
322     case PMM_FIELD_DISABLED:
323         return 0;
324     case PMM_FIELD_PMLEN7:
325         return 7;
326     case PMM_FIELD_PMLEN16:
327         return 16;
328     default:
329         g_assert_not_reached();
330     }
331 }
332 
333 #ifndef CONFIG_USER_ONLY
334 
335 /*
336  * The HS-mode is allowed to configure priority only for the
337  * following VS-mode local interrupts:
338  *
339  * 0  (Reserved interrupt, reads as zero)
340  * 1  Supervisor software interrupt
341  * 4  (Reserved interrupt, reads as zero)
342  * 5  Supervisor timer interrupt
343  * 8  (Reserved interrupt, reads as zero)
344  * 13 (Reserved interrupt)
345  * 14 "
346  * 15 "
347  * 16 "
348  * 17 "
349  * 18 "
350  * 19 "
351  * 20 "
352  * 21 "
353  * 22 "
354  * 23 "
355  */
356 
357 static const int hviprio_index2irq[] = {
358     0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
359 static const int hviprio_index2rdzero[] = {
360     1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
361 
362 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
363 {
364     if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
365         return -EINVAL;
366     }
367 
368     if (out_irq) {
369         *out_irq = hviprio_index2irq[index];
370     }
371 
372     if (out_rdzero) {
373         *out_rdzero = hviprio_index2rdzero[index];
374     }
375 
376     return 0;
377 }
378 
379 /*
380  * Default priorities of local interrupts are defined in the
381  * RISC-V Advanced Interrupt Architecture specification.
382  *
383  * ----------------------------------------------------------------
384  *  Default  |
385  *  Priority | Major Interrupt Numbers
386  * ----------------------------------------------------------------
387  *  Highest  | 47, 23, 46, 45, 22, 44,
388  *           | 43, 21, 42, 41, 20, 40
389  *           |
390  *           | 11 (0b),  3 (03),  7 (07)
391  *           |  9 (09),  1 (01),  5 (05)
392  *           | 12 (0c)
393  *           | 10 (0a),  2 (02),  6 (06)
394  *           |
395  *           | 39, 19, 38, 37, 18, 36,
396  *  Lowest   | 35, 17, 34, 33, 16, 32
397  * ----------------------------------------------------------------
398  */
399 static const uint8_t default_iprio[64] = {
400     /* Custom interrupts 48 to 63 */
401     [63] = IPRIO_MMAXIPRIO,
402     [62] = IPRIO_MMAXIPRIO,
403     [61] = IPRIO_MMAXIPRIO,
404     [60] = IPRIO_MMAXIPRIO,
405     [59] = IPRIO_MMAXIPRIO,
406     [58] = IPRIO_MMAXIPRIO,
407     [57] = IPRIO_MMAXIPRIO,
408     [56] = IPRIO_MMAXIPRIO,
409     [55] = IPRIO_MMAXIPRIO,
410     [54] = IPRIO_MMAXIPRIO,
411     [53] = IPRIO_MMAXIPRIO,
412     [52] = IPRIO_MMAXIPRIO,
413     [51] = IPRIO_MMAXIPRIO,
414     [50] = IPRIO_MMAXIPRIO,
415     [49] = IPRIO_MMAXIPRIO,
416     [48] = IPRIO_MMAXIPRIO,
417 
418     /* Custom interrupts 24 to 31 */
419     [31] = IPRIO_MMAXIPRIO,
420     [30] = IPRIO_MMAXIPRIO,
421     [29] = IPRIO_MMAXIPRIO,
422     [28] = IPRIO_MMAXIPRIO,
423     [27] = IPRIO_MMAXIPRIO,
424     [26] = IPRIO_MMAXIPRIO,
425     [25] = IPRIO_MMAXIPRIO,
426     [24] = IPRIO_MMAXIPRIO,
427 
428     [47] = IPRIO_DEFAULT_UPPER,
429     [23] = IPRIO_DEFAULT_UPPER + 1,
430     [46] = IPRIO_DEFAULT_UPPER + 2,
431     [45] = IPRIO_DEFAULT_UPPER + 3,
432     [22] = IPRIO_DEFAULT_UPPER + 4,
433     [44] = IPRIO_DEFAULT_UPPER + 5,
434 
435     [43] = IPRIO_DEFAULT_UPPER + 6,
436     [21] = IPRIO_DEFAULT_UPPER + 7,
437     [42] = IPRIO_DEFAULT_UPPER + 8,
438     [41] = IPRIO_DEFAULT_UPPER + 9,
439     [20] = IPRIO_DEFAULT_UPPER + 10,
440     [40] = IPRIO_DEFAULT_UPPER + 11,
441 
442     [11] = IPRIO_DEFAULT_M,
443     [3]  = IPRIO_DEFAULT_M + 1,
444     [7]  = IPRIO_DEFAULT_M + 2,
445 
446     [9]  = IPRIO_DEFAULT_S,
447     [1]  = IPRIO_DEFAULT_S + 1,
448     [5]  = IPRIO_DEFAULT_S + 2,
449 
450     [12] = IPRIO_DEFAULT_SGEXT,
451 
452     [10] = IPRIO_DEFAULT_VS,
453     [2]  = IPRIO_DEFAULT_VS + 1,
454     [6]  = IPRIO_DEFAULT_VS + 2,
455 
456     [39] = IPRIO_DEFAULT_LOWER,
457     [19] = IPRIO_DEFAULT_LOWER + 1,
458     [38] = IPRIO_DEFAULT_LOWER + 2,
459     [37] = IPRIO_DEFAULT_LOWER + 3,
460     [18] = IPRIO_DEFAULT_LOWER + 4,
461     [36] = IPRIO_DEFAULT_LOWER + 5,
462 
463     [35] = IPRIO_DEFAULT_LOWER + 6,
464     [17] = IPRIO_DEFAULT_LOWER + 7,
465     [34] = IPRIO_DEFAULT_LOWER + 8,
466     [33] = IPRIO_DEFAULT_LOWER + 9,
467     [16] = IPRIO_DEFAULT_LOWER + 10,
468     [32] = IPRIO_DEFAULT_LOWER + 11,
469 };
470 
471 uint8_t riscv_cpu_default_priority(int irq)
472 {
473     if (irq < 0 || irq > 63) {
474         return IPRIO_MMAXIPRIO;
475     }
476 
477     return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
478 };
479 
480 static int riscv_cpu_pending_to_irq(CPURISCVState *env,
481                                     int extirq, unsigned int extirq_def_prio,
482                                     uint64_t pending, uint8_t *iprio)
483 {
484     int irq, best_irq = RISCV_EXCP_NONE;
485     unsigned int prio, best_prio = UINT_MAX;
486 
487     if (!pending) {
488         return RISCV_EXCP_NONE;
489     }
490 
491     irq = ctz64(pending);
492     if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia :
493                                   riscv_cpu_cfg(env)->ext_ssaia)) {
494         return irq;
495     }
496 
497     pending = pending >> irq;
498     while (pending) {
499         prio = iprio[irq];
500         if (!prio) {
501             if (irq == extirq) {
502                 prio = extirq_def_prio;
503             } else {
504                 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
505                        1 : IPRIO_MMAXIPRIO;
506             }
507         }
508         if ((pending & 0x1) && (prio <= best_prio)) {
509             best_irq = irq;
510             best_prio = prio;
511         }
512         irq++;
513         pending = pending >> 1;
514     }
515 
516     return best_irq;
517 }
518 
519 /*
520  * Doesn't report interrupts inserted using mvip from M-mode firmware or
521  * using hvip bits 13:63 from HS-mode. Those are returned in
522  * riscv_cpu_sirq_pending() and riscv_cpu_vsirq_pending().
523  */
524 uint64_t riscv_cpu_all_pending(CPURISCVState *env)
525 {
526     uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
527     uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
528     uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
529 
530     return (env->mip | vsgein | vstip) & env->mie;
531 }
532 
533 int riscv_cpu_mirq_pending(CPURISCVState *env)
534 {
535     uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
536                     ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
537 
538     return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
539                                     irqs, env->miprio);
540 }
541 
542 int riscv_cpu_sirq_pending(CPURISCVState *env)
543 {
544     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
545                     ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
546     uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie;
547 
548     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
549                                     irqs | irqs_f, env->siprio);
550 }
551 
552 int riscv_cpu_vsirq_pending(CPURISCVState *env)
553 {
554     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg;
555     uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
556     uint64_t vsbits;
557 
558     /* Bring VS-level bits to correct position */
559     vsbits = irqs & VS_MODE_INTERRUPTS;
560     irqs &= ~VS_MODE_INTERRUPTS;
561     irqs |= vsbits >> 1;
562 
563     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
564                                     (irqs | irqs_f_vs), env->hviprio);
565 }
566 
567 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
568 {
569     uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs;
570     uint64_t vsbits, irq_delegated;
571     int virq;
572 
573     /* Priority: RNMI > Other interrupt. */
574     if (riscv_cpu_cfg(env)->ext_smrnmi) {
575         /* If mnstatus.NMIE == 0, all interrupts are disabled. */
576         if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
577             return RISCV_EXCP_NONE;
578         }
579 
580         if (env->rnmip) {
581             return ctz64(env->rnmip); /* since non-zero */
582         }
583     }
584 
585     /* Determine interrupt enable state of all privilege modes */
586     if (env->virt_enabled) {
587         mie = 1;
588         hsie = 1;
589         vsie = (env->priv < PRV_S) ||
590                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
591     } else {
592         mie = (env->priv < PRV_M) ||
593               (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
594         hsie = (env->priv < PRV_S) ||
595                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
596         vsie = 0;
597     }
598 
599     /* Determine all pending interrupts */
600     pending = riscv_cpu_all_pending(env);
601 
602     /* Check M-mode interrupts */
603     irqs = pending & ~env->mideleg & -mie;
604     if (irqs) {
605         return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
606                                         irqs, env->miprio);
607     }
608 
609     /* Check for virtual S-mode interrupts. */
610     irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie;
611 
612     /* Check HS-mode interrupts */
613     irqs =  ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie;
614     if (irqs) {
615         return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
616                                         irqs, env->siprio);
617     }
618 
619     /* Check for virtual VS-mode interrupts. */
620     irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
621 
622     /* Check VS-mode interrupts */
623     irq_delegated = pending & env->mideleg & env->hideleg;
624 
625     /* Bring VS-level bits to correct position */
626     vsbits = irq_delegated & VS_MODE_INTERRUPTS;
627     irq_delegated &= ~VS_MODE_INTERRUPTS;
628     irq_delegated |= vsbits >> 1;
629 
630     irqs = (irq_delegated | irqs_f_vs) & -vsie;
631     if (irqs) {
632         virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
633                                         irqs, env->hviprio);
634         if (virq <= 0 || (virq > 12 && virq <= 63)) {
635             return virq;
636         } else {
637             return virq + 1;
638         }
639     }
640 
641     /* Indicate no pending interrupt */
642     return RISCV_EXCP_NONE;
643 }
644 
645 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
646 {
647     uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
648 
649     if (interrupt_request & mask) {
650         RISCVCPU *cpu = RISCV_CPU(cs);
651         CPURISCVState *env = &cpu->env;
652         int interruptno = riscv_cpu_local_irq_pending(env);
653         if (interruptno >= 0) {
654             cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
655             riscv_cpu_do_interrupt(cs);
656             return true;
657         }
658     }
659     return false;
660 }
661 
662 /* Return true is floating point support is currently enabled */
663 bool riscv_cpu_fp_enabled(CPURISCVState *env)
664 {
665     if (env->mstatus & MSTATUS_FS) {
666         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) {
667             return false;
668         }
669         return true;
670     }
671 
672     return false;
673 }
674 
675 /* Return true is vector support is currently enabled */
676 bool riscv_cpu_vector_enabled(CPURISCVState *env)
677 {
678     if (env->mstatus & MSTATUS_VS) {
679         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) {
680             return false;
681         }
682         return true;
683     }
684 
685     return false;
686 }
687 
688 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
689 {
690     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
691                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
692                             MSTATUS64_UXL | MSTATUS_VS;
693 
694     if (riscv_has_ext(env, RVF)) {
695         mstatus_mask |= MSTATUS_FS;
696     }
697     bool current_virt = env->virt_enabled;
698 
699     /*
700      * If zicfilp extension available and henvcfg.LPE = 1,
701      * then apply SPELP mask on mstatus
702      */
703     if (env_archcpu(env)->cfg.ext_zicfilp &&
704         get_field(env->henvcfg, HENVCFG_LPE)) {
705         mstatus_mask |= SSTATUS_SPELP;
706     }
707 
708     g_assert(riscv_has_ext(env, RVH));
709 
710     if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
711         mstatus_mask |= MSTATUS_SDT;
712     }
713 
714     if (current_virt) {
715         /* Current V=1 and we are about to change to V=0 */
716         env->vsstatus = env->mstatus & mstatus_mask;
717         env->mstatus &= ~mstatus_mask;
718         env->mstatus |= env->mstatus_hs;
719 
720         env->vstvec = env->stvec;
721         env->stvec = env->stvec_hs;
722 
723         env->vsscratch = env->sscratch;
724         env->sscratch = env->sscratch_hs;
725 
726         env->vsepc = env->sepc;
727         env->sepc = env->sepc_hs;
728 
729         env->vscause = env->scause;
730         env->scause = env->scause_hs;
731 
732         env->vstval = env->stval;
733         env->stval = env->stval_hs;
734 
735         env->vsatp = env->satp;
736         env->satp = env->satp_hs;
737     } else {
738         /* Current V=0 and we are about to change to V=1 */
739         env->mstatus_hs = env->mstatus & mstatus_mask;
740         env->mstatus &= ~mstatus_mask;
741         env->mstatus |= env->vsstatus;
742 
743         env->stvec_hs = env->stvec;
744         env->stvec = env->vstvec;
745 
746         env->sscratch_hs = env->sscratch;
747         env->sscratch = env->vsscratch;
748 
749         env->sepc_hs = env->sepc;
750         env->sepc = env->vsepc;
751 
752         env->scause_hs = env->scause;
753         env->scause = env->vscause;
754 
755         env->stval_hs = env->stval;
756         env->stval = env->vstval;
757 
758         env->satp_hs = env->satp;
759         env->satp = env->vsatp;
760     }
761 }
762 
763 target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
764 {
765     if (!riscv_has_ext(env, RVH)) {
766         return 0;
767     }
768 
769     return env->geilen;
770 }
771 
772 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
773 {
774     if (!riscv_has_ext(env, RVH)) {
775         return;
776     }
777 
778     if (geilen > (TARGET_LONG_BITS - 1)) {
779         return;
780     }
781 
782     env->geilen = geilen;
783 }
784 
785 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
786 {
787     CPURISCVState *env = &cpu->env;
788     CPUState *cs = CPU(cpu);
789     bool release_lock = false;
790 
791     if (!bql_locked()) {
792         release_lock = true;
793         bql_lock();
794     }
795 
796     if (level) {
797         env->rnmip |= 1 << irq;
798         cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
799     } else {
800         env->rnmip &= ~(1 << irq);
801         cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
802     }
803 
804     if (release_lock) {
805         bql_unlock();
806     }
807 }
808 
809 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
810 {
811     CPURISCVState *env = &cpu->env;
812     if (env->miclaim & interrupts) {
813         return -1;
814     } else {
815         env->miclaim |= interrupts;
816         return 0;
817     }
818 }
819 
820 void riscv_cpu_interrupt(CPURISCVState *env)
821 {
822     uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
823     CPUState *cs = env_cpu(env);
824 
825     BQL_LOCK_GUARD();
826 
827     if (env->virt_enabled) {
828         gein = get_field(env->hstatus, HSTATUS_VGEIN);
829         vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
830         irqf = env->hvien & env->hvip & env->vsie;
831     } else {
832         irqf = env->mvien & env->mvip & env->sie;
833     }
834 
835     vstip = env->vstime_irq ? MIP_VSTIP : 0;
836 
837     if (env->mip | vsgein | vstip | irqf) {
838         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
839     } else {
840         cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
841     }
842 }
843 
844 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
845 {
846     uint64_t old = env->mip;
847 
848     /* No need to update mip for VSTIP */
849     mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
850 
851     BQL_LOCK_GUARD();
852 
853     env->mip = (env->mip & ~mask) | (value & mask);
854 
855     riscv_cpu_interrupt(env);
856 
857     return old;
858 }
859 
860 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
861                              void *arg)
862 {
863     env->rdtime_fn = fn;
864     env->rdtime_fn_arg = arg;
865 }
866 
867 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
868                                    int (*rmw_fn)(void *arg,
869                                                  target_ulong reg,
870                                                  target_ulong *val,
871                                                  target_ulong new_val,
872                                                  target_ulong write_mask),
873                                    void *rmw_fn_arg)
874 {
875     if (priv <= PRV_M) {
876         env->aia_ireg_rmw_fn[priv] = rmw_fn;
877         env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
878     }
879 }
880 
881 static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
882                              bool virt)
883 {
884     uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
885 
886     assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
887 
888     if (ctl & freeze_mask) {
889         env->sctrstatus |= SCTRSTATUS_FROZEN;
890     }
891 }
892 
893 void riscv_ctr_clear(CPURISCVState *env)
894 {
895     memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
896     memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
897     memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
898 }
899 
900 static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
901 {
902     switch (priv) {
903     case PRV_M:
904         return MCTRCTL_M;
905     case PRV_S:
906         if (virt) {
907             return XCTRCTL_S;
908         }
909         return XCTRCTL_S;
910     case PRV_U:
911         if (virt) {
912             return XCTRCTL_U;
913         }
914         return XCTRCTL_U;
915     }
916 
917     g_assert_not_reached();
918 }
919 
920 static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
921                                       bool virt)
922 {
923     switch (priv) {
924     case PRV_M:
925         return env->mctrctl;
926     case PRV_S:
927     case PRV_U:
928         if (virt) {
929             return env->vsctrctl;
930         }
931         return env->mctrctl;
932     }
933 
934     g_assert_not_reached();
935 }
936 
937 /*
938  * This function assumes that src privilege and target privilege are not same
939  * and src privilege is less than target privilege. This includes the virtual
940  * state as well.
941  */
942 static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
943                                 bool src_virt)
944 {
945     target_long tgt_prv = env->priv;
946     bool res = true;
947 
948     /*
949      * VS and U mode are same in terms of xTE bits required to record an
950      * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
951      * Requirements. This changes VS to U to simplify the logic a bit.
952      */
953     if (src_virt && src_prv == PRV_S) {
954         src_prv = PRV_U;
955     } else if (env->virt_enabled && tgt_prv == PRV_S) {
956         tgt_prv = PRV_U;
957     }
958 
959     /* VU mode is an outlier here. */
960     if (src_virt && src_prv == PRV_U) {
961         res &= !!(env->vsctrctl & XCTRCTL_STE);
962     }
963 
964     switch (src_prv) {
965     case PRV_U:
966         if (tgt_prv == PRV_U) {
967             break;
968         }
969         res &= !!(env->mctrctl & XCTRCTL_STE);
970         /* fall-through */
971     case PRV_S:
972         if (tgt_prv == PRV_S) {
973             break;
974         }
975         res &= !!(env->mctrctl & MCTRCTL_MTE);
976         /* fall-through */
977     case PRV_M:
978         break;
979     }
980 
981     return res;
982 }
983 
984 /*
985  * Special cases for traps and trap returns:
986  *
987  * 1- Traps, and trap returns, between enabled modes are recorded as normal.
988  * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
989  * enabled mode back to an inhibited mode, are partially recorded.  In such
990  * cases, the PC from the inhibited mode (source PC for traps, and target PC
991  * for trap returns) is 0.
992  *
993  * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
994  * Traps from an enabled mode to an inhibited mode, known as external traps,
995  * receive special handling.
996  * By default external traps are not recorded, but a handshake mechanism exists
997  * to allow partial recording.  Software running in the target mode of the trap
998  * can opt-in to allowing CTR to record traps into that mode even when the mode
999  * is inhibited.  The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
1000  * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
1001  * x is the target privilege mode of the trap, will CTR record the trap. In such
1002  * cases, the target PC is 0.
1003  */
1004 /*
1005  * CTR arrays are implemented as circular buffers and new entry is stored at
1006  * sctrstatus.WRPTR, but they are presented to software as moving circular
1007  * buffers. Which means, software get's the illusion that whenever a new entry
1008  * is added the whole buffer is moved by one place and the new entry is added at
1009  * the start keeping new entry at idx 0 and older ones follow.
1010  *
1011  * Depth = 16.
1012  *
1013  * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
1014  * WRPTR                                   W
1015  * entry   7   6   5   4   3   2   1   0   F   E   D   C   B   A   9   8
1016  *
1017  * When a new entry is added:
1018  * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
1019  * WRPTR                                       W
1020  * entry   8   7   6   5   4   3   2   1   0   F   E   D   C   B   A   9
1021  *
1022  * entry here denotes the logical entry number that software can access
1023  * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
1024  * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
1025  * buffer[7]. Here is how we convert entry to buffer idx.
1026  *
1027  *    entry = isel - CTR_ENTRIES_FIRST;
1028  *    idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
1029  */
1030 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
1031     enum CTRType type, target_ulong src_priv, bool src_virt)
1032 {
1033     bool tgt_virt = env->virt_enabled;
1034     uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
1035     uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
1036     uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
1037     uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
1038     uint64_t depth, head;
1039     bool ext_trap = false;
1040 
1041     /*
1042      * Return immediately if both target and src recording is disabled or if
1043      * CTR is in frozen state.
1044      */
1045     if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
1046         env->sctrstatus & SCTRSTATUS_FROZEN) {
1047         return;
1048     }
1049 
1050     /*
1051      * With RAS Emul enabled, only allow Indirect, direct calls, Function
1052      * returns and Co-routine swap types.
1053      */
1054     if (tgt_ctrl & XCTRCTL_RASEMU &&
1055         type != CTRDATA_TYPE_INDIRECT_CALL &&
1056         type != CTRDATA_TYPE_DIRECT_CALL &&
1057         type != CTRDATA_TYPE_RETURN &&
1058         type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1059         return;
1060     }
1061 
1062     if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
1063         /* Case 2 for traps. */
1064         if (!(src_ctrl & src_mask)) {
1065             src = 0;
1066         } else if (!(tgt_ctrl & tgt_mask)) {
1067             /* Check if target priv-mode has allowed external trap recording. */
1068             if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
1069                 return;
1070             }
1071 
1072             ext_trap = true;
1073             dst = 0;
1074         }
1075     } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
1076         /*
1077          * Case 3 for trap returns.  Trap returns from inhibited mode are not
1078          * recorded.
1079          */
1080         if (!(src_ctrl & src_mask)) {
1081             return;
1082         }
1083 
1084         /* Case 2 for trap returns. */
1085         if (!(tgt_ctrl & tgt_mask)) {
1086             dst = 0;
1087         }
1088     }
1089 
1090     /* Ignore filters in case of RASEMU mode or External trap. */
1091     if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
1092         /*
1093          * Check if the specific type is inhibited. Not taken branch filter is
1094          * an enable bit and needs to be checked separatly.
1095          */
1096         bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
1097         if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
1098             (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
1099             return;
1100         }
1101     }
1102 
1103     head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
1104 
1105     depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
1106     if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
1107         head = (head - 1) & (depth - 1);
1108 
1109         env->ctr_src[head] &= ~CTRSOURCE_VALID;
1110         env->sctrstatus =
1111             set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1112         return;
1113     }
1114 
1115     /* In case of Co-routine SWAP we overwrite latest entry. */
1116     if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1117         head = (head - 1) & (depth - 1);
1118     }
1119 
1120     env->ctr_src[head] = src | CTRSOURCE_VALID;
1121     env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
1122     env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
1123 
1124     head = (head + 1) & (depth - 1);
1125 
1126     env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1127 }
1128 
1129 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
1130 {
1131     g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
1132 
1133     if (newpriv != env->priv || env->virt_enabled != virt_en) {
1134         if (icount_enabled()) {
1135             riscv_itrigger_update_priv(env);
1136         }
1137 
1138         riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
1139     }
1140 
1141     /* tlb_flush is unnecessary as mode is contained in mmu_idx */
1142     env->priv = newpriv;
1143     env->xl = cpu_recompute_xl(env);
1144 
1145     /*
1146      * Clear the load reservation - otherwise a reservation placed in one
1147      * context/process can be used by another, resulting in an SC succeeding
1148      * incorrectly. Version 2.2 of the ISA specification explicitly requires
1149      * this behaviour, while later revisions say that the kernel "should" use
1150      * an SC instruction to force the yielding of a load reservation on a
1151      * preemptive context switch. As a result, do both.
1152      */
1153     env->load_res = -1;
1154 
1155     if (riscv_has_ext(env, RVH)) {
1156         /* Flush the TLB on all virt mode changes. */
1157         if (env->virt_enabled != virt_en) {
1158             tlb_flush(env_cpu(env));
1159         }
1160 
1161         env->virt_enabled = virt_en;
1162         if (virt_en) {
1163             /*
1164              * The guest external interrupts from an interrupt controller are
1165              * delivered only when the Guest/VM is running (i.e. V=1). This
1166              * means any guest external interrupt which is triggered while the
1167              * Guest/VM is not running (i.e. V=0) will be missed on QEMU
1168              * resulting in guest with sluggish response to serial console
1169              * input and other I/O events.
1170              *
1171              * To solve this, we check and inject interrupt after setting V=1.
1172              */
1173             riscv_cpu_update_mip(env, 0, 0);
1174         }
1175     }
1176 }
1177 
1178 /*
1179  * get_physical_address_pmp - check PMP permission for this physical address
1180  *
1181  * Match the PMP region and check permission for this physical address and it's
1182  * TLB page. Returns 0 if the permission checking was successful
1183  *
1184  * @env: CPURISCVState
1185  * @prot: The returned protection attributes
1186  * @addr: The physical address to be checked permission
1187  * @access_type: The type of MMU access
1188  * @mode: Indicates current privilege level.
1189  */
1190 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
1191                                     int size, MMUAccessType access_type,
1192                                     int mode)
1193 {
1194     pmp_priv_t pmp_priv;
1195     bool pmp_has_privs;
1196 
1197     if (!riscv_cpu_cfg(env)->pmp) {
1198         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1199         return TRANSLATE_SUCCESS;
1200     }
1201 
1202     pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
1203                                        &pmp_priv, mode);
1204     if (!pmp_has_privs) {
1205         *prot = 0;
1206         return TRANSLATE_PMP_FAIL;
1207     }
1208 
1209     *prot = pmp_priv_to_page_prot(pmp_priv);
1210 
1211     return TRANSLATE_SUCCESS;
1212 }
1213 
1214 /* Returns 'true' if a svukte address check is needed */
1215 static bool do_svukte_check(CPURISCVState *env, bool first_stage,
1216                              int mode, bool virt)
1217 {
1218     /* Svukte extension depends on Sv39. */
1219     if (!(env_archcpu(env)->cfg.ext_svukte ||
1220         !first_stage ||
1221         VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
1222         return false;
1223     }
1224 
1225     /*
1226      * Check hstatus.HUKTE if the effective mode is switched to VU-mode by
1227      * executing HLV/HLVX/HSV in U-mode.
1228      * For other cases, check senvcfg.UKTE.
1229      */
1230     if (env->priv == PRV_U && !env->virt_enabled && virt) {
1231         if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
1232             return false;
1233         }
1234     } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
1235         return false;
1236     }
1237 
1238     /*
1239      * Svukte extension is qualified only in U or VU-mode.
1240      *
1241      * Effective mode can be switched to U or VU-mode by:
1242      *   - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode.
1243      *   - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0.
1244      *   - U-mode.
1245      *   - VU-mode.
1246      *   - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1.
1247      */
1248     if (mode != PRV_U) {
1249         return false;
1250     }
1251 
1252     return true;
1253 }
1254 
1255 static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
1256 {
1257     /* svukte extension excludes RV32 */
1258     uint32_t sxlen = 32 * riscv_cpu_sxl(env);
1259     uint64_t high_bit = addr & (1UL << (sxlen - 1));
1260     return !high_bit;
1261 }
1262 
1263 /*
1264  * get_physical_address - get the physical address for this virtual address
1265  *
1266  * Do a page table walk to obtain the physical address corresponding to a
1267  * virtual address. Returns 0 if the translation was successful
1268  *
1269  * Adapted from Spike's mmu_t::translate and mmu_t::walk
1270  *
1271  * @env: CPURISCVState
1272  * @physical: This will be set to the calculated physical address
1273  * @prot: The returned protection attributes
1274  * @addr: The virtual address or guest physical address to be translated
1275  * @fault_pte_addr: If not NULL, this will be set to fault pte address
1276  *                  when a error occurs on pte address translation.
1277  *                  This will already be shifted to match htval.
1278  * @access_type: The type of MMU access
1279  * @mmu_idx: Indicates current privilege level
1280  * @first_stage: Are we in first stage translation?
1281  *               Second stage is used for hypervisor guest translation
1282  * @two_stage: Are we going to perform two stage translation
1283  * @is_debug: Is this access from a debugger or the monitor?
1284  */
1285 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
1286                                 int *ret_prot, vaddr addr,
1287                                 target_ulong *fault_pte_addr,
1288                                 int access_type, int mmu_idx,
1289                                 bool first_stage, bool two_stage,
1290                                 bool is_debug, bool is_probe)
1291 {
1292     /*
1293      * NOTE: the env->pc value visible here will not be
1294      * correct, but the value visible to the exception handler
1295      * (riscv_cpu_do_interrupt) is correct
1296      */
1297     MemTxResult res;
1298     MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
1299     int mode = mmuidx_priv(mmu_idx);
1300     bool virt = mmuidx_2stage(mmu_idx);
1301     bool use_background = false;
1302     hwaddr ppn;
1303     int napot_bits = 0;
1304     target_ulong napot_mask;
1305     bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
1306     bool sstack_page = false;
1307 
1308     if (do_svukte_check(env, first_stage, mode, virt) &&
1309         !check_svukte_addr(env, addr)) {
1310         return TRANSLATE_FAIL;
1311     }
1312 
1313     /*
1314      * Check if we should use the background registers for the two
1315      * stage translation. We don't need to check if we actually need
1316      * two stage translation as that happened before this function
1317      * was called. Background registers will be used if the guest has
1318      * forced a two stage translation to be on (in HS or M mode).
1319      */
1320     if (!env->virt_enabled && two_stage) {
1321         use_background = true;
1322     }
1323 
1324     if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
1325         *physical = addr;
1326         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1327         return TRANSLATE_SUCCESS;
1328     }
1329 
1330     *ret_prot = 0;
1331 
1332     hwaddr base;
1333     int levels, ptidxbits, ptesize, vm, widened;
1334 
1335     if (first_stage == true) {
1336         if (use_background) {
1337             if (riscv_cpu_mxl(env) == MXL_RV32) {
1338                 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
1339                 vm = get_field(env->vsatp, SATP32_MODE);
1340             } else {
1341                 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
1342                 vm = get_field(env->vsatp, SATP64_MODE);
1343             }
1344         } else {
1345             if (riscv_cpu_mxl(env) == MXL_RV32) {
1346                 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
1347                 vm = get_field(env->satp, SATP32_MODE);
1348             } else {
1349                 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
1350                 vm = get_field(env->satp, SATP64_MODE);
1351             }
1352         }
1353         widened = 0;
1354     } else {
1355         if (riscv_cpu_mxl(env) == MXL_RV32) {
1356             base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
1357             vm = get_field(env->hgatp, SATP32_MODE);
1358         } else {
1359             base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
1360             vm = get_field(env->hgatp, SATP64_MODE);
1361         }
1362         widened = 2;
1363     }
1364 
1365     switch (vm) {
1366     case VM_1_10_SV32:
1367       levels = 2; ptidxbits = 10; ptesize = 4; break;
1368     case VM_1_10_SV39:
1369       levels = 3; ptidxbits = 9; ptesize = 8; break;
1370     case VM_1_10_SV48:
1371       levels = 4; ptidxbits = 9; ptesize = 8; break;
1372     case VM_1_10_SV57:
1373       levels = 5; ptidxbits = 9; ptesize = 8; break;
1374     case VM_1_10_MBARE:
1375         *physical = addr;
1376         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1377         return TRANSLATE_SUCCESS;
1378     default:
1379       g_assert_not_reached();
1380     }
1381 
1382     CPUState *cs = env_cpu(env);
1383     int va_bits = PGSHIFT + levels * ptidxbits + widened;
1384     int sxlen = 16 << riscv_cpu_sxl(env);
1385     int sxlen_bytes = sxlen / 8;
1386 
1387     if (first_stage == true) {
1388         target_ulong mask, masked_msbs;
1389 
1390         if (sxlen > (va_bits - 1)) {
1391             mask = (1L << (sxlen - (va_bits - 1))) - 1;
1392         } else {
1393             mask = 0;
1394         }
1395         masked_msbs = (addr >> (va_bits - 1)) & mask;
1396 
1397         if (masked_msbs != 0 && masked_msbs != mask) {
1398             return TRANSLATE_FAIL;
1399         }
1400     } else {
1401         if (vm != VM_1_10_SV32 && addr >> va_bits != 0) {
1402             return TRANSLATE_FAIL;
1403         }
1404     }
1405 
1406     bool pbmte = env->menvcfg & MENVCFG_PBMTE;
1407     bool svade = riscv_cpu_cfg(env)->ext_svade;
1408     bool svadu = riscv_cpu_cfg(env)->ext_svadu;
1409     bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade;
1410 
1411     if (first_stage && two_stage && env->virt_enabled) {
1412         pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
1413         adue = adue && (env->henvcfg & HENVCFG_ADUE);
1414     }
1415 
1416     int ptshift = (levels - 1) * ptidxbits;
1417     target_ulong pte;
1418     hwaddr pte_addr;
1419     int i;
1420 
1421  restart:
1422     for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
1423         target_ulong idx;
1424         if (i == 0) {
1425             idx = (addr >> (PGSHIFT + ptshift)) &
1426                            ((1 << (ptidxbits + widened)) - 1);
1427         } else {
1428             idx = (addr >> (PGSHIFT + ptshift)) &
1429                            ((1 << ptidxbits) - 1);
1430         }
1431 
1432         /* check that physical address of PTE is legal */
1433 
1434         if (two_stage && first_stage) {
1435             int vbase_prot;
1436             hwaddr vbase;
1437 
1438             /* Do the second stage translation on the base PTE address. */
1439             int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
1440                                                  base, NULL, MMU_DATA_LOAD,
1441                                                  MMUIdx_U, false, true,
1442                                                  is_debug, false);
1443 
1444             if (vbase_ret != TRANSLATE_SUCCESS) {
1445                 if (fault_pte_addr) {
1446                     *fault_pte_addr = (base + idx * ptesize) >> 2;
1447                 }
1448                 return TRANSLATE_G_STAGE_FAIL;
1449             }
1450 
1451             pte_addr = vbase + idx * ptesize;
1452         } else {
1453             pte_addr = base + idx * ptesize;
1454         }
1455 
1456         int pmp_prot;
1457         int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
1458                                                sxlen_bytes,
1459                                                MMU_DATA_LOAD, PRV_S);
1460         if (pmp_ret != TRANSLATE_SUCCESS) {
1461             return TRANSLATE_PMP_FAIL;
1462         }
1463 
1464         if (riscv_cpu_mxl(env) == MXL_RV32) {
1465             pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
1466         } else {
1467             pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
1468         }
1469 
1470         if (res != MEMTX_OK) {
1471             return TRANSLATE_FAIL;
1472         }
1473 
1474         if (riscv_cpu_sxl(env) == MXL_RV32) {
1475             ppn = pte >> PTE_PPN_SHIFT;
1476         } else {
1477             if (pte & PTE_RESERVED) {
1478                 qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
1479                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1480                               __func__, pte_addr, pte);
1481                 return TRANSLATE_FAIL;
1482             }
1483 
1484             if (!pbmte && (pte & PTE_PBMT)) {
1485                 /* Reserved without Svpbmt. */
1486                 qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1487                               "and Svpbmt extension is disabled: "
1488                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1489                               __func__, pte_addr, pte);
1490                 return TRANSLATE_FAIL;
1491             }
1492 
1493             if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1494                 /* Reserved without Svnapot extension */
1495                 qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
1496                               "and Svnapot extension is disabled: "
1497                               "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1498                               __func__, pte_addr, pte);
1499                 return TRANSLATE_FAIL;
1500             }
1501 
1502             ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
1503         }
1504 
1505         if (!(pte & PTE_V)) {
1506             /* Invalid PTE */
1507             return TRANSLATE_FAIL;
1508         }
1509 
1510         if (pte & (PTE_R | PTE_W | PTE_X)) {
1511             goto leaf;
1512         }
1513 
1514         if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
1515             /* D, A, and U bits are reserved in non-leaf/inner PTEs */
1516             qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
1517                           "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1518                           __func__, pte_addr, pte);
1519             return TRANSLATE_FAIL;
1520         }
1521         /* Inner PTE, continue walking */
1522         base = ppn << PGSHIFT;
1523     }
1524 
1525     /* No leaf pte at any translation level. */
1526     return TRANSLATE_FAIL;
1527 
1528  leaf:
1529     if (ppn & ((1ULL << ptshift) - 1)) {
1530         /* Misaligned PPN */
1531         qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
1532                       "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1533                       __func__, pte_addr, pte);
1534         return TRANSLATE_FAIL;
1535     }
1536     if (!pbmte && (pte & PTE_PBMT)) {
1537         /* Reserved without Svpbmt. */
1538         qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1539                       "and Svpbmt extension is disabled: "
1540                       "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1541                       __func__, pte_addr, pte);
1542         return TRANSLATE_FAIL;
1543     }
1544 
1545     target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
1546     /* Check for reserved combinations of RWX flags. */
1547     switch (rwx) {
1548     case PTE_W | PTE_X:
1549         return TRANSLATE_FAIL;
1550     case PTE_W:
1551         /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
1552         if (cpu_get_bcfien(env) && first_stage) {
1553             sstack_page = true;
1554             /*
1555              * if ss index, read and write allowed. else if not a probe
1556              * then only read allowed
1557              */
1558             rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 :  PTE_R);
1559             break;
1560         }
1561         return TRANSLATE_FAIL;
1562     case PTE_R:
1563         /*
1564          * no matter what's the `access_type`, shadow stack access to readonly
1565          * memory are always store page faults. During unwind, loads will be
1566          * promoted as store fault.
1567          */
1568         if (is_sstack_idx) {
1569             return TRANSLATE_FAIL;
1570         }
1571         break;
1572     }
1573 
1574     int prot = 0;
1575     if (rwx & PTE_R) {
1576         prot |= PAGE_READ;
1577     }
1578     if (rwx & PTE_W) {
1579         prot |= PAGE_WRITE;
1580     }
1581     if (rwx & PTE_X) {
1582         bool mxr = false;
1583 
1584         /*
1585          * Use mstatus for first stage or for the second stage without
1586          * virt_enabled (MPRV+MPV)
1587          */
1588         if (first_stage || !env->virt_enabled) {
1589             mxr = get_field(env->mstatus, MSTATUS_MXR);
1590         }
1591 
1592         /* MPRV+MPV case, check VSSTATUS */
1593         if (first_stage && two_stage && !env->virt_enabled) {
1594             mxr |= get_field(env->vsstatus, MSTATUS_MXR);
1595         }
1596 
1597         /*
1598          * Setting MXR at HS-level overrides both VS-stage and G-stage
1599          * execute-only permissions
1600          */
1601         if (env->virt_enabled) {
1602             mxr |= get_field(env->mstatus_hs, MSTATUS_MXR);
1603         }
1604 
1605         if (mxr) {
1606             prot |= PAGE_READ;
1607         }
1608         prot |= PAGE_EXEC;
1609     }
1610 
1611     if (pte & PTE_U) {
1612         if (mode != PRV_U) {
1613             if (!mmuidx_sum(mmu_idx)) {
1614                 return TRANSLATE_FAIL;
1615             }
1616             /* SUM allows only read+write, not execute. */
1617             prot &= PAGE_READ | PAGE_WRITE;
1618         }
1619     } else if (mode != PRV_S) {
1620         /* Supervisor PTE flags when not S mode */
1621         return TRANSLATE_FAIL;
1622     }
1623 
1624     if (!((prot >> access_type) & 1)) {
1625         /*
1626          * Access check failed, access check failures for shadow stack are
1627          * access faults.
1628          */
1629         return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
1630     }
1631 
1632     target_ulong updated_pte = pte;
1633 
1634     /*
1635      * If ADUE is enabled, set accessed and dirty bits.
1636      * Otherwise raise an exception if necessary.
1637      */
1638     if (adue) {
1639         updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0);
1640     } else if (!(pte & PTE_A) ||
1641                (access_type == MMU_DATA_STORE && !(pte & PTE_D))) {
1642         return TRANSLATE_FAIL;
1643     }
1644 
1645     /* Page table updates need to be atomic with MTTCG enabled */
1646     if (updated_pte != pte && !is_debug) {
1647         if (!adue) {
1648             return TRANSLATE_FAIL;
1649         }
1650 
1651         /*
1652          * - if accessed or dirty bits need updating, and the PTE is
1653          *   in RAM, then we do so atomically with a compare and swap.
1654          * - if the PTE is in IO space or ROM, then it can't be updated
1655          *   and we return TRANSLATE_FAIL.
1656          * - if the PTE changed by the time we went to update it, then
1657          *   it is no longer valid and we must re-walk the page table.
1658          */
1659         MemoryRegion *mr;
1660         hwaddr l = sxlen_bytes, addr1;
1661         mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
1662                                      false, MEMTXATTRS_UNSPECIFIED);
1663         if (memory_region_is_ram(mr)) {
1664             target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
1665             target_ulong old_pte;
1666             if (riscv_cpu_sxl(env) == MXL_RV32) {
1667                 old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
1668             } else {
1669                 old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
1670             }
1671             if (old_pte != pte) {
1672                 goto restart;
1673             }
1674             pte = updated_pte;
1675         } else {
1676             /*
1677              * Misconfigured PTE in ROM (AD bits are not preset) or
1678              * PTE is in IO space and can't be updated atomically.
1679              */
1680             return TRANSLATE_FAIL;
1681         }
1682     }
1683 
1684     /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
1685     target_ulong vpn = addr >> PGSHIFT;
1686 
1687     if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1688         napot_bits = ctzl(ppn) + 1;
1689         if ((i != (levels - 1)) || (napot_bits != 4)) {
1690             return TRANSLATE_FAIL;
1691         }
1692     }
1693 
1694     napot_mask = (1 << napot_bits) - 1;
1695     *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1696                   (vpn & (((target_ulong)1 << ptshift) - 1))
1697                  ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1698 
1699     /*
1700      * Remove write permission unless this is a store, or the page is
1701      * already dirty, so that we TLB miss on later writes to update
1702      * the dirty bit.
1703      */
1704     if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) {
1705         prot &= ~PAGE_WRITE;
1706     }
1707     *ret_prot = prot;
1708 
1709     return TRANSLATE_SUCCESS;
1710 }
1711 
1712 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1713                                 MMUAccessType access_type, bool pmp_violation,
1714                                 bool first_stage, bool two_stage,
1715                                 bool two_stage_indirect)
1716 {
1717     CPUState *cs = env_cpu(env);
1718 
1719     switch (access_type) {
1720     case MMU_INST_FETCH:
1721         if (pmp_violation) {
1722             cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1723         } else if (env->virt_enabled && !first_stage) {
1724             cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1725         } else {
1726             cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
1727         }
1728         break;
1729     case MMU_DATA_LOAD:
1730         if (pmp_violation) {
1731             cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1732         } else if (two_stage && !first_stage) {
1733             cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1734         } else {
1735             cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
1736         }
1737         break;
1738     case MMU_DATA_STORE:
1739         if (pmp_violation) {
1740             cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1741         } else if (two_stage && !first_stage) {
1742             cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1743         } else {
1744             cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
1745         }
1746         break;
1747     default:
1748         g_assert_not_reached();
1749     }
1750     env->badaddr = address;
1751     env->two_stage_lookup = two_stage;
1752     env->two_stage_indirect_lookup = two_stage_indirect;
1753 }
1754 
1755 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1756 {
1757     RISCVCPU *cpu = RISCV_CPU(cs);
1758     CPURISCVState *env = &cpu->env;
1759     hwaddr phys_addr;
1760     int prot;
1761     int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
1762 
1763     if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1764                              true, env->virt_enabled, true, false)) {
1765         return -1;
1766     }
1767 
1768     if (env->virt_enabled) {
1769         if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1770                                  0, MMUIdx_U, false, true, true, false)) {
1771             return -1;
1772         }
1773     }
1774 
1775     return phys_addr & TARGET_PAGE_MASK;
1776 }
1777 
1778 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1779                                      vaddr addr, unsigned size,
1780                                      MMUAccessType access_type,
1781                                      int mmu_idx, MemTxAttrs attrs,
1782                                      MemTxResult response, uintptr_t retaddr)
1783 {
1784     RISCVCPU *cpu = RISCV_CPU(cs);
1785     CPURISCVState *env = &cpu->env;
1786 
1787     if (access_type == MMU_DATA_STORE) {
1788         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1789     } else if (access_type == MMU_DATA_LOAD) {
1790         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1791     } else {
1792         cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1793     }
1794 
1795     env->badaddr = addr;
1796     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1797     env->two_stage_indirect_lookup = false;
1798     cpu_loop_exit_restore(cs, retaddr);
1799 }
1800 
1801 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1802                                    MMUAccessType access_type, int mmu_idx,
1803                                    uintptr_t retaddr)
1804 {
1805     RISCVCPU *cpu = RISCV_CPU(cs);
1806     CPURISCVState *env = &cpu->env;
1807     switch (access_type) {
1808     case MMU_INST_FETCH:
1809         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1810         break;
1811     case MMU_DATA_LOAD:
1812         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1813         /* shadow stack mis aligned accesses are access faults */
1814         if (mmu_idx & MMU_IDX_SS_WRITE) {
1815             cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1816         }
1817         break;
1818     case MMU_DATA_STORE:
1819         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1820         /* shadow stack mis aligned accesses are access faults */
1821         if (mmu_idx & MMU_IDX_SS_WRITE) {
1822             cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1823         }
1824         break;
1825     default:
1826         g_assert_not_reached();
1827     }
1828     env->badaddr = addr;
1829     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1830     env->two_stage_indirect_lookup = false;
1831     cpu_loop_exit_restore(cs, retaddr);
1832 }
1833 
1834 
1835 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1836 {
1837     enum riscv_pmu_event_idx pmu_event_type;
1838 
1839     switch (access_type) {
1840     case MMU_INST_FETCH:
1841         pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1842         break;
1843     case MMU_DATA_LOAD:
1844         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1845         break;
1846     case MMU_DATA_STORE:
1847         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1848         break;
1849     default:
1850         return;
1851     }
1852 
1853     riscv_pmu_incr_ctr(cpu, pmu_event_type);
1854 }
1855 
1856 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1857                         MMUAccessType access_type, int mmu_idx,
1858                         bool probe, uintptr_t retaddr)
1859 {
1860     RISCVCPU *cpu = RISCV_CPU(cs);
1861     CPURISCVState *env = &cpu->env;
1862     vaddr im_address;
1863     hwaddr pa = 0;
1864     int prot, prot2, prot_pmp;
1865     bool pmp_violation = false;
1866     bool first_stage_error = true;
1867     bool two_stage_lookup = mmuidx_2stage(mmu_idx);
1868     bool two_stage_indirect_error = false;
1869     int ret = TRANSLATE_FAIL;
1870     int mode = mmuidx_priv(mmu_idx);
1871     /* default TLB page size */
1872     hwaddr tlb_size = TARGET_PAGE_SIZE;
1873 
1874     env->guest_phys_fault_addr = 0;
1875 
1876     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1877                   __func__, address, access_type, mmu_idx);
1878 
1879     pmu_tlb_fill_incr_ctr(cpu, access_type);
1880     if (two_stage_lookup) {
1881         /* Two stage lookup */
1882         ret = get_physical_address(env, &pa, &prot, address,
1883                                    &env->guest_phys_fault_addr, access_type,
1884                                    mmu_idx, true, true, false, probe);
1885 
1886         /*
1887          * A G-stage exception may be triggered during two state lookup.
1888          * And the env->guest_phys_fault_addr has already been set in
1889          * get_physical_address().
1890          */
1891         if (ret == TRANSLATE_G_STAGE_FAIL) {
1892             first_stage_error = false;
1893             two_stage_indirect_error = true;
1894         }
1895 
1896         qemu_log_mask(CPU_LOG_MMU,
1897                       "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1898                       HWADDR_FMT_plx " prot %d\n",
1899                       __func__, address, ret, pa, prot);
1900 
1901         if (ret == TRANSLATE_SUCCESS) {
1902             /* Second stage lookup */
1903             im_address = pa;
1904 
1905             ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1906                                        access_type, MMUIdx_U, false, true,
1907                                        false, probe);
1908 
1909             qemu_log_mask(CPU_LOG_MMU,
1910                           "%s 2nd-stage address=%" VADDR_PRIx
1911                           " ret %d physical "
1912                           HWADDR_FMT_plx " prot %d\n",
1913                           __func__, im_address, ret, pa, prot2);
1914 
1915             prot &= prot2;
1916 
1917             if (ret == TRANSLATE_SUCCESS) {
1918                 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1919                                                size, access_type, mode);
1920                 tlb_size = pmp_get_tlb_size(env, pa);
1921 
1922                 qemu_log_mask(CPU_LOG_MMU,
1923                               "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1924                               " %d tlb_size %" HWADDR_PRIu "\n",
1925                               __func__, pa, ret, prot_pmp, tlb_size);
1926 
1927                 prot &= prot_pmp;
1928             } else {
1929                 /*
1930                  * Guest physical address translation failed, this is a HS
1931                  * level exception
1932                  */
1933                 first_stage_error = false;
1934                 if (ret != TRANSLATE_PMP_FAIL) {
1935                     env->guest_phys_fault_addr = (im_address |
1936                                                   (address &
1937                                                    (TARGET_PAGE_SIZE - 1))) >> 2;
1938                 }
1939             }
1940         }
1941     } else {
1942         /* Single stage lookup */
1943         ret = get_physical_address(env, &pa, &prot, address, NULL,
1944                                    access_type, mmu_idx, true, false, false,
1945                                    probe);
1946 
1947         qemu_log_mask(CPU_LOG_MMU,
1948                       "%s address=%" VADDR_PRIx " ret %d physical "
1949                       HWADDR_FMT_plx " prot %d\n",
1950                       __func__, address, ret, pa, prot);
1951 
1952         if (ret == TRANSLATE_SUCCESS) {
1953             ret = get_physical_address_pmp(env, &prot_pmp, pa,
1954                                            size, access_type, mode);
1955             tlb_size = pmp_get_tlb_size(env, pa);
1956 
1957             qemu_log_mask(CPU_LOG_MMU,
1958                           "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1959                           " %d tlb_size %" HWADDR_PRIu "\n",
1960                           __func__, pa, ret, prot_pmp, tlb_size);
1961 
1962             prot &= prot_pmp;
1963         }
1964     }
1965 
1966     if (ret == TRANSLATE_PMP_FAIL) {
1967         pmp_violation = true;
1968     }
1969 
1970     if (ret == TRANSLATE_SUCCESS) {
1971         tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1972                      prot, mmu_idx, tlb_size);
1973         return true;
1974     } else if (probe) {
1975         return false;
1976     } else {
1977         int wp_access = 0;
1978 
1979         if (access_type == MMU_DATA_LOAD) {
1980             wp_access |= BP_MEM_READ;
1981         } else if (access_type == MMU_DATA_STORE) {
1982             wp_access |= BP_MEM_WRITE;
1983         }
1984 
1985         /*
1986          * If a watchpoint isn't found for 'addr' this will
1987          * be a no-op and we'll resume the mmu_exception path.
1988          * Otherwise we'll throw a debug exception and execution
1989          * will continue elsewhere.
1990          */
1991         cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
1992                              wp_access, retaddr);
1993 
1994         raise_mmu_exception(env, address, access_type, pmp_violation,
1995                             first_stage_error, two_stage_lookup,
1996                             two_stage_indirect_error);
1997         cpu_loop_exit_restore(cs, retaddr);
1998     }
1999 
2000     return true;
2001 }
2002 
2003 static target_ulong riscv_transformed_insn(CPURISCVState *env,
2004                                            target_ulong insn,
2005                                            target_ulong taddr)
2006 {
2007     target_ulong xinsn = 0;
2008     target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
2009 
2010     /*
2011      * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
2012      * be uncompressed. The Quadrant 1 of RVC instruction space need
2013      * not be transformed because these instructions won't generate
2014      * any load/store trap.
2015      */
2016 
2017     if ((insn & 0x3) != 0x3) {
2018         /* Transform 16bit instruction into 32bit instruction */
2019         switch (GET_C_OP(insn)) {
2020         case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */
2021             switch (GET_C_FUNC(insn)) {
2022             case OPC_RISC_C_FUNC_FLD_LQ:
2023                 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */
2024                     xinsn = OPC_RISC_FLD;
2025                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2026                     access_rs1 = GET_C_RS1S(insn);
2027                     access_imm = GET_C_LD_IMM(insn);
2028                     access_size = 8;
2029                 }
2030                 break;
2031             case OPC_RISC_C_FUNC_LW: /* C.LW */
2032                 xinsn = OPC_RISC_LW;
2033                 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2034                 access_rs1 = GET_C_RS1S(insn);
2035                 access_imm = GET_C_LW_IMM(insn);
2036                 access_size = 4;
2037                 break;
2038             case OPC_RISC_C_FUNC_FLW_LD:
2039                 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */
2040                     xinsn = OPC_RISC_FLW;
2041                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2042                     access_rs1 = GET_C_RS1S(insn);
2043                     access_imm = GET_C_LW_IMM(insn);
2044                     access_size = 4;
2045                 } else { /* C.LD (RV64/RV128) */
2046                     xinsn = OPC_RISC_LD;
2047                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2048                     access_rs1 = GET_C_RS1S(insn);
2049                     access_imm = GET_C_LD_IMM(insn);
2050                     access_size = 8;
2051                 }
2052                 break;
2053             case OPC_RISC_C_FUNC_FSD_SQ:
2054                 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */
2055                     xinsn = OPC_RISC_FSD;
2056                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2057                     access_rs1 = GET_C_RS1S(insn);
2058                     access_imm = GET_C_SD_IMM(insn);
2059                     access_size = 8;
2060                 }
2061                 break;
2062             case OPC_RISC_C_FUNC_SW: /* C.SW */
2063                 xinsn = OPC_RISC_SW;
2064                 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2065                 access_rs1 = GET_C_RS1S(insn);
2066                 access_imm = GET_C_SW_IMM(insn);
2067                 access_size = 4;
2068                 break;
2069             case OPC_RISC_C_FUNC_FSW_SD:
2070                 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */
2071                     xinsn = OPC_RISC_FSW;
2072                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2073                     access_rs1 = GET_C_RS1S(insn);
2074                     access_imm = GET_C_SW_IMM(insn);
2075                     access_size = 4;
2076                 } else { /* C.SD (RV64/RV128) */
2077                     xinsn = OPC_RISC_SD;
2078                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2079                     access_rs1 = GET_C_RS1S(insn);
2080                     access_imm = GET_C_SD_IMM(insn);
2081                     access_size = 8;
2082                 }
2083                 break;
2084             default:
2085                 break;
2086             }
2087             break;
2088         case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */
2089             switch (GET_C_FUNC(insn)) {
2090             case OPC_RISC_C_FUNC_FLDSP_LQSP:
2091                 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */
2092                     xinsn = OPC_RISC_FLD;
2093                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2094                     access_rs1 = 2;
2095                     access_imm = GET_C_LDSP_IMM(insn);
2096                     access_size = 8;
2097                 }
2098                 break;
2099             case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */
2100                 xinsn = OPC_RISC_LW;
2101                 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2102                 access_rs1 = 2;
2103                 access_imm = GET_C_LWSP_IMM(insn);
2104                 access_size = 4;
2105                 break;
2106             case OPC_RISC_C_FUNC_FLWSP_LDSP:
2107                 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */
2108                     xinsn = OPC_RISC_FLW;
2109                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2110                     access_rs1 = 2;
2111                     access_imm = GET_C_LWSP_IMM(insn);
2112                     access_size = 4;
2113                 } else { /* C.LDSP (RV64/RV128) */
2114                     xinsn = OPC_RISC_LD;
2115                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
2116                     access_rs1 = 2;
2117                     access_imm = GET_C_LDSP_IMM(insn);
2118                     access_size = 8;
2119                 }
2120                 break;
2121             case OPC_RISC_C_FUNC_FSDSP_SQSP:
2122                 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */
2123                     xinsn = OPC_RISC_FSD;
2124                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2125                     access_rs1 = 2;
2126                     access_imm = GET_C_SDSP_IMM(insn);
2127                     access_size = 8;
2128                 }
2129                 break;
2130             case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */
2131                 xinsn = OPC_RISC_SW;
2132                 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2133                 access_rs1 = 2;
2134                 access_imm = GET_C_SWSP_IMM(insn);
2135                 access_size = 4;
2136                 break;
2137             case 7:
2138                 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */
2139                     xinsn = OPC_RISC_FSW;
2140                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2141                     access_rs1 = 2;
2142                     access_imm = GET_C_SWSP_IMM(insn);
2143                     access_size = 4;
2144                 } else { /* C.SDSP (RV64/RV128) */
2145                     xinsn = OPC_RISC_SD;
2146                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2147                     access_rs1 = 2;
2148                     access_imm = GET_C_SDSP_IMM(insn);
2149                     access_size = 8;
2150                 }
2151                 break;
2152             default:
2153                 break;
2154             }
2155             break;
2156         default:
2157             break;
2158         }
2159 
2160         /*
2161          * Clear Bit1 of transformed instruction to indicate that
2162          * original insruction was a 16bit instruction
2163          */
2164         xinsn &= ~((target_ulong)0x2);
2165     } else {
2166         /* Transform 32bit (or wider) instructions */
2167         switch (MASK_OP_MAJOR(insn)) {
2168         case OPC_RISC_ATOMIC:
2169             xinsn = insn;
2170             access_rs1 = GET_RS1(insn);
2171             access_size = 1 << GET_FUNCT3(insn);
2172             break;
2173         case OPC_RISC_LOAD:
2174         case OPC_RISC_FP_LOAD:
2175             xinsn = SET_I_IMM(insn, 0);
2176             access_rs1 = GET_RS1(insn);
2177             access_imm = GET_IMM(insn);
2178             access_size = 1 << GET_FUNCT3(insn);
2179             break;
2180         case OPC_RISC_STORE:
2181         case OPC_RISC_FP_STORE:
2182             xinsn = SET_S_IMM(insn, 0);
2183             access_rs1 = GET_RS1(insn);
2184             access_imm = GET_STORE_IMM(insn);
2185             access_size = 1 << GET_FUNCT3(insn);
2186             break;
2187         case OPC_RISC_SYSTEM:
2188             if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
2189                 xinsn = insn;
2190                 access_rs1 = GET_RS1(insn);
2191                 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
2192                 access_size = 1 << access_size;
2193             }
2194             break;
2195         default:
2196             break;
2197         }
2198     }
2199 
2200     if (access_size) {
2201         xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
2202                                (access_size - 1));
2203     }
2204 
2205     return xinsn;
2206 }
2207 
2208 static target_ulong promote_load_fault(target_ulong orig_cause)
2209 {
2210     switch (orig_cause) {
2211     case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2212         return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
2213 
2214     case RISCV_EXCP_LOAD_ACCESS_FAULT:
2215         return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
2216 
2217     case RISCV_EXCP_LOAD_PAGE_FAULT:
2218         return RISCV_EXCP_STORE_PAGE_FAULT;
2219     }
2220 
2221     /* if no promotion, return original cause */
2222     return orig_cause;
2223 }
2224 
2225 static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
2226 {
2227     env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
2228     env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
2229     env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
2230     env->mncause = cause;
2231     env->mnepc = env->pc;
2232     env->pc = env->rnmi_irqvec;
2233 
2234     if (cpu_get_fcfien(env)) {
2235         env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
2236     }
2237 
2238     /* Trapping to M mode, virt is disabled */
2239     riscv_cpu_set_mode(env, PRV_M, false);
2240 }
2241 
2242 /*
2243  * Handle Traps
2244  *
2245  * Adapted from Spike's processor_t::take_trap.
2246  *
2247  */
2248 void riscv_cpu_do_interrupt(CPUState *cs)
2249 {
2250     RISCVCPU *cpu = RISCV_CPU(cs);
2251     CPURISCVState *env = &cpu->env;
2252     bool virt = env->virt_enabled;
2253     bool write_gva = false;
2254     bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
2255     bool vsmode_exc;
2256     uint64_t s;
2257     int mode;
2258 
2259     /*
2260      * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
2261      * so we mask off the MSB and separate into trap type and cause.
2262      */
2263     bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
2264     target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
2265     uint64_t deleg = async ? env->mideleg : env->medeleg;
2266     bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
2267         !(env->mip & (1ULL << cause));
2268     bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
2269         !(env->mip & (1ULL << cause));
2270     bool smode_double_trap = false;
2271     uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
2272     const bool prev_virt = env->virt_enabled;
2273     const target_ulong prev_priv = env->priv;
2274     target_ulong tval = 0;
2275     target_ulong tinst = 0;
2276     target_ulong htval = 0;
2277     target_ulong mtval2 = 0;
2278     target_ulong src;
2279     int sxlen = 0;
2280     int mxlen = 16 << riscv_cpu_mxl(env);
2281     bool nnmi_excep = false;
2282 
2283     if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
2284         riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
2285                      env->virt_enabled);
2286         return;
2287     }
2288 
2289     if (!async) {
2290         /* set tval to badaddr for traps with address information */
2291         switch (cause) {
2292 #ifdef CONFIG_TCG
2293         case RISCV_EXCP_SEMIHOST:
2294             do_common_semihosting(cs);
2295             env->pc += 4;
2296             return;
2297 #endif
2298         case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2299         case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
2300         case RISCV_EXCP_LOAD_ADDR_MIS:
2301         case RISCV_EXCP_STORE_AMO_ADDR_MIS:
2302         case RISCV_EXCP_LOAD_ACCESS_FAULT:
2303         case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
2304         case RISCV_EXCP_LOAD_PAGE_FAULT:
2305         case RISCV_EXCP_STORE_PAGE_FAULT:
2306             if (always_storeamo) {
2307                 cause = promote_load_fault(cause);
2308             }
2309             write_gva = env->two_stage_lookup;
2310             tval = env->badaddr;
2311             if (env->two_stage_indirect_lookup) {
2312                 /*
2313                  * special pseudoinstruction for G-stage fault taken while
2314                  * doing VS-stage page table walk.
2315                  */
2316                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2317             } else {
2318                 /*
2319                  * The "Addr. Offset" field in transformed instruction is
2320                  * non-zero only for misaligned access.
2321                  */
2322                 tinst = riscv_transformed_insn(env, env->bins, tval);
2323             }
2324             break;
2325         case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
2326         case RISCV_EXCP_INST_ADDR_MIS:
2327         case RISCV_EXCP_INST_ACCESS_FAULT:
2328         case RISCV_EXCP_INST_PAGE_FAULT:
2329             write_gva = env->two_stage_lookup;
2330             tval = env->badaddr;
2331             if (env->two_stage_indirect_lookup) {
2332                 /*
2333                  * special pseudoinstruction for G-stage fault taken while
2334                  * doing VS-stage page table walk.
2335                  */
2336                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2337             }
2338             break;
2339         case RISCV_EXCP_ILLEGAL_INST:
2340         case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
2341             tval = env->bins;
2342             break;
2343         case RISCV_EXCP_BREAKPOINT:
2344             tval = env->badaddr;
2345             if (cs->watchpoint_hit) {
2346                 tval = cs->watchpoint_hit->hitaddr;
2347                 cs->watchpoint_hit = NULL;
2348             }
2349             break;
2350         case RISCV_EXCP_SW_CHECK:
2351             tval = env->sw_check_code;
2352             break;
2353         default:
2354             break;
2355         }
2356         /* ecall is dispatched as one cause so translate based on mode */
2357         if (cause == RISCV_EXCP_U_ECALL) {
2358             assert(env->priv <= 3);
2359 
2360             if (env->priv == PRV_M) {
2361                 cause = RISCV_EXCP_M_ECALL;
2362             } else if (env->priv == PRV_S && env->virt_enabled) {
2363                 cause = RISCV_EXCP_VS_ECALL;
2364             } else if (env->priv == PRV_S && !env->virt_enabled) {
2365                 cause = RISCV_EXCP_S_ECALL;
2366             } else if (env->priv == PRV_U) {
2367                 cause = RISCV_EXCP_U_ECALL;
2368             }
2369         }
2370     }
2371 
2372     trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
2373                      riscv_cpu_get_trap_name(cause, async));
2374 
2375     qemu_log_mask(CPU_LOG_INT,
2376                   "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
2377                   "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
2378                   __func__, env->mhartid, async, cause, env->pc, tval,
2379                   riscv_cpu_get_trap_name(cause, async));
2380 
2381     mode = env->priv <= PRV_S && cause < 64 &&
2382         (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
2383 
2384     vsmode_exc = env->virt_enabled && cause < 64 &&
2385         (((hdeleg >> cause) & 1) || vs_injected);
2386 
2387     /*
2388      * Check double trap condition only if already in S-mode and targeting
2389      * S-mode
2390      */
2391     if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
2392         bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
2393         bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
2394         /* In VS or HS */
2395         if (riscv_has_ext(env, RVH)) {
2396             if (vsmode_exc) {
2397                 /* VS -> VS, use henvcfg instead of menvcfg*/
2398                 dte = (env->henvcfg & HENVCFG_DTE) != 0;
2399             } else if (env->virt_enabled) {
2400                 /* VS -> HS, use mstatus_hs */
2401                 sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
2402             }
2403         }
2404         smode_double_trap = dte && sdt;
2405         if (smode_double_trap) {
2406             mode = PRV_M;
2407         }
2408     }
2409 
2410     if (mode == PRV_S) {
2411         /* handle the trap in S-mode */
2412         /* save elp status */
2413         if (cpu_get_fcfien(env)) {
2414             env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
2415         }
2416 
2417         if (riscv_has_ext(env, RVH)) {
2418             if (vsmode_exc) {
2419                 /* Trap to VS mode */
2420                 /*
2421                  * See if we need to adjust cause. Yes if its VS mode interrupt
2422                  * no if hypervisor has delegated one of hs mode's interrupt
2423                  */
2424                 if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
2425                               cause == IRQ_VS_EXT)) {
2426                     cause = cause - 1;
2427                 }
2428                 write_gva = false;
2429             } else if (env->virt_enabled) {
2430                 /* Trap into HS mode, from virt */
2431                 riscv_cpu_swap_hypervisor_regs(env);
2432                 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
2433                                          env->priv);
2434                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true);
2435 
2436                 htval = env->guest_phys_fault_addr;
2437 
2438                 virt = false;
2439             } else {
2440                 /* Trap into HS mode */
2441                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
2442                 htval = env->guest_phys_fault_addr;
2443             }
2444             env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
2445         }
2446 
2447         s = env->mstatus;
2448         s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
2449         s = set_field(s, MSTATUS_SPP, env->priv);
2450         s = set_field(s, MSTATUS_SIE, 0);
2451         if (riscv_env_smode_dbltrp_enabled(env, virt)) {
2452             s = set_field(s, MSTATUS_SDT, 1);
2453         }
2454         env->mstatus = s;
2455         sxlen = 16 << riscv_cpu_sxl(env);
2456         env->scause = cause | ((target_ulong)async << (sxlen - 1));
2457         env->sepc = env->pc;
2458         env->stval = tval;
2459         env->htval = htval;
2460         env->htinst = tinst;
2461         env->pc = (env->stvec >> 2 << 2) +
2462                   ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
2463         riscv_cpu_set_mode(env, PRV_S, virt);
2464 
2465         src = env->sepc;
2466     } else {
2467         /*
2468          * If the hart encounters an exception while executing in M-mode
2469          * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
2470          */
2471         nnmi_excep = cpu->cfg.ext_smrnmi &&
2472                      !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2473                      !async;
2474 
2475         /* handle the trap in M-mode */
2476         /* save elp status */
2477         if (cpu_get_fcfien(env)) {
2478             if (nnmi_excep) {
2479                 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
2480                                           env->elp);
2481             } else {
2482                 env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
2483             }
2484         }
2485 
2486         if (riscv_has_ext(env, RVH)) {
2487             if (env->virt_enabled) {
2488                 riscv_cpu_swap_hypervisor_regs(env);
2489             }
2490             env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
2491                                      env->virt_enabled);
2492             if (env->virt_enabled && tval) {
2493                 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
2494             }
2495 
2496             mtval2 = env->guest_phys_fault_addr;
2497 
2498             /* Trapping to M mode, virt is disabled */
2499             virt = false;
2500         }
2501         /*
2502          * If the hart encounters an exception while executing in M-mode,
2503          * with the mnstatus.NMIE bit clear, the program counter is set to
2504          * the RNMI exception trap handler address.
2505          */
2506         nnmi_excep = cpu->cfg.ext_smrnmi &&
2507                      !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2508                      !async;
2509 
2510         s = env->mstatus;
2511         s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
2512         s = set_field(s, MSTATUS_MPP, env->priv);
2513         s = set_field(s, MSTATUS_MIE, 0);
2514         if (cpu->cfg.ext_smdbltrp) {
2515             if (env->mstatus & MSTATUS_MDT) {
2516                 assert(env->priv == PRV_M);
2517                 if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
2518                     cpu_abort(CPU(cpu), "M-mode double trap\n");
2519                 } else {
2520                     riscv_do_nmi(env, cause, false);
2521                     return;
2522                 }
2523             }
2524 
2525             s = set_field(s, MSTATUS_MDT, 1);
2526         }
2527         env->mstatus = s;
2528         env->mcause = cause | ((target_ulong)async << (mxlen - 1));
2529         if (smode_double_trap) {
2530             env->mtval2 = env->mcause;
2531             env->mcause = RISCV_EXCP_DOUBLE_TRAP;
2532         } else {
2533             env->mtval2 = mtval2;
2534         }
2535         env->mepc = env->pc;
2536         env->mtval = tval;
2537         env->mtinst = tinst;
2538 
2539         /*
2540          * For RNMI exception, program counter is set to the RNMI exception
2541          * trap handler address.
2542          */
2543         if (nnmi_excep) {
2544             env->pc = env->rnmi_excpvec;
2545         } else {
2546             env->pc = (env->mtvec >> 2 << 2) +
2547                       ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
2548         }
2549         riscv_cpu_set_mode(env, PRV_M, virt);
2550         src = env->mepc;
2551     }
2552 
2553     if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
2554         if (async && cause == IRQ_PMU_OVF) {
2555             riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
2556         } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
2557             riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
2558         }
2559 
2560         riscv_ctr_add_entry(env, src, env->pc,
2561                         async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
2562                         prev_priv, prev_virt);
2563     }
2564 
2565     /*
2566      * Interrupt/exception/trap delivery is asynchronous event and as per
2567      * zicfilp spec CPU should clear up the ELP state. No harm in clearing
2568      * unconditionally.
2569      */
2570     env->elp = false;
2571 
2572     /*
2573      * NOTE: it is not necessary to yield load reservations here. It is only
2574      * necessary for an SC from "another hart" to cause a load reservation
2575      * to be yielded. Refer to the memory consistency model section of the
2576      * RISC-V ISA Specification.
2577      */
2578 
2579     env->two_stage_lookup = false;
2580     env->two_stage_indirect_lookup = false;
2581 }
2582 
2583 #endif /* !CONFIG_USER_ONLY */
2584