xref: /qemu/target/riscv/cpu_helper.c (revision 7703a1d1e6479084d58ee3106a3c8a72ed7357eb)
1 /*
2  * RISC-V CPU helpers for qemu.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "pmu.h"
26 #include "exec/exec-all.h"
27 #include "exec/page-protection.h"
28 #include "instmap.h"
29 #include "tcg/tcg-op.h"
30 #include "trace.h"
31 #include "semihosting/common-semi.h"
32 #include "system/cpu-timers.h"
33 #include "cpu_bits.h"
34 #include "debug.h"
35 #include "tcg/oversized-guest.h"
36 #include "pmp.h"
37 
38 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
39 {
40 #ifdef CONFIG_USER_ONLY
41     return 0;
42 #else
43     bool virt = env->virt_enabled;
44     int mode = env->priv;
45 
46     /* All priv -> mmu_idx mapping are here */
47     if (!ifetch) {
48         uint64_t status = env->mstatus;
49 
50         if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
51             mode = get_field(env->mstatus, MSTATUS_MPP);
52             virt = get_field(env->mstatus, MSTATUS_MPV) &&
53                    (mode != PRV_M);
54             if (virt) {
55                 status = env->vsstatus;
56             }
57         }
58         if (mode == PRV_S && get_field(status, MSTATUS_SUM)) {
59             mode = MMUIdx_S_SUM;
60         }
61     }
62 
63     return mode | (virt ? MMU_2STAGE_BIT : 0);
64 #endif
65 }
66 
67 bool cpu_get_fcfien(CPURISCVState *env)
68 {
69     /* no cfi extension, return false */
70     if (!env_archcpu(env)->cfg.ext_zicfilp) {
71         return false;
72     }
73 
74     switch (env->priv) {
75     case PRV_U:
76         if (riscv_has_ext(env, RVS)) {
77             return env->senvcfg & SENVCFG_LPE;
78         }
79         return env->menvcfg & MENVCFG_LPE;
80 #ifndef CONFIG_USER_ONLY
81     case PRV_S:
82         if (env->virt_enabled) {
83             return env->henvcfg & HENVCFG_LPE;
84         }
85         return env->menvcfg & MENVCFG_LPE;
86     case PRV_M:
87         return env->mseccfg & MSECCFG_MLPE;
88 #endif
89     default:
90         g_assert_not_reached();
91     }
92 }
93 
94 bool cpu_get_bcfien(CPURISCVState *env)
95 {
96     /* no cfi extension, return false */
97     if (!env_archcpu(env)->cfg.ext_zicfiss) {
98         return false;
99     }
100 
101     switch (env->priv) {
102     case PRV_U:
103         /*
104          * If S is not implemented then shadow stack for U can't be turned on
105          * It is checked in `riscv_cpu_validate_set_extensions`, so no need to
106          * check here or assert here
107          */
108         return env->senvcfg & SENVCFG_SSE;
109 #ifndef CONFIG_USER_ONLY
110     case PRV_S:
111         if (env->virt_enabled) {
112             return env->henvcfg & HENVCFG_SSE;
113         }
114         return env->menvcfg & MENVCFG_SSE;
115     case PRV_M: /* M-mode shadow stack is always off */
116         return false;
117 #endif
118     default:
119         g_assert_not_reached();
120     }
121 }
122 
123 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
124                           uint64_t *cs_base, uint32_t *pflags)
125 {
126     RISCVCPU *cpu = env_archcpu(env);
127     RISCVExtStatus fs, vs;
128     uint32_t flags = 0;
129     bool pm_signext = riscv_cpu_virt_mem_enabled(env);
130 
131     *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
132     *cs_base = 0;
133 
134     if (cpu->cfg.ext_zve32x) {
135         /*
136          * If env->vl equals to VLMAX, we can use generic vector operation
137          * expanders (GVEC) to accerlate the vector operations.
138          * However, as LMUL could be a fractional number. The maximum
139          * vector size can be operated might be less than 8 bytes,
140          * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
141          * only when maxsz >= 8 bytes.
142          */
143 
144         /* lmul encoded as in DisasContext::lmul */
145         int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
146         uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
147         uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
148         uint32_t maxsz = vlmax << vsew;
149         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
150                            (maxsz >= 8);
151         flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
152         flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
153         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
154                            FIELD_EX64(env->vtype, VTYPE, VLMUL));
155         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
156         flags = FIELD_DP32(flags, TB_FLAGS, VTA,
157                            FIELD_EX64(env->vtype, VTYPE, VTA));
158         flags = FIELD_DP32(flags, TB_FLAGS, VMA,
159                            FIELD_EX64(env->vtype, VTYPE, VMA));
160         flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
161     } else {
162         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
163     }
164 
165     if (cpu_get_fcfien(env)) {
166         /*
167          * For Forward CFI, only the expectation of a lpad at
168          * the start of the block is tracked via env->elp. env->elp
169          * is turned on during jalr translation.
170          */
171         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
172         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
173     }
174 
175     if (cpu_get_bcfien(env)) {
176         flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
177     }
178 
179 #ifdef CONFIG_USER_ONLY
180     fs = EXT_STATUS_DIRTY;
181     vs = EXT_STATUS_DIRTY;
182 #else
183     flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
184 
185     flags |= riscv_env_mmu_index(env, 0);
186     fs = get_field(env->mstatus, MSTATUS_FS);
187     vs = get_field(env->mstatus, MSTATUS_VS);
188 
189     if (env->virt_enabled) {
190         flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
191         /*
192          * Merge DISABLED and !DIRTY states using MIN.
193          * We will set both fields when dirtying.
194          */
195         fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
196         vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
197     }
198 
199     /* With Zfinx, floating point is enabled/disabled by Smstateen. */
200     if (!riscv_has_ext(env, RVF)) {
201         fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
202              ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
203     }
204 
205     if (cpu->cfg.debug && !icount_enabled()) {
206         flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
207     }
208 #endif
209 
210     flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
211     flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
212     flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
213     flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
214     flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
215     flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
216 
217     *pflags = flags;
218 }
219 
220 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
221 {
222 #ifndef CONFIG_USER_ONLY
223     int priv_mode = cpu_address_mode(env);
224 
225     if (get_field(env->mstatus, MSTATUS_MPRV) &&
226         get_field(env->mstatus, MSTATUS_MXR)) {
227         return PMM_FIELD_DISABLED;
228     }
229 
230     /* Get current PMM field */
231     switch (priv_mode) {
232     case PRV_M:
233         if (riscv_cpu_cfg(env)->ext_smmpm) {
234             return get_field(env->mseccfg, MSECCFG_PMM);
235         }
236         break;
237     case PRV_S:
238         if (riscv_cpu_cfg(env)->ext_smnpm) {
239             if (get_field(env->mstatus, MSTATUS_MPV)) {
240                 return get_field(env->henvcfg, HENVCFG_PMM);
241             } else {
242                 return get_field(env->menvcfg, MENVCFG_PMM);
243             }
244         }
245         break;
246     case PRV_U:
247         if (riscv_has_ext(env, RVS)) {
248             if (riscv_cpu_cfg(env)->ext_ssnpm) {
249                 return get_field(env->senvcfg, SENVCFG_PMM);
250             }
251         } else {
252             if (riscv_cpu_cfg(env)->ext_smnpm) {
253                 return get_field(env->menvcfg, MENVCFG_PMM);
254             }
255         }
256         break;
257     default:
258         g_assert_not_reached();
259     }
260     return PMM_FIELD_DISABLED;
261 #else
262     return PMM_FIELD_DISABLED;
263 #endif
264 }
265 
266 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
267 {
268 #ifndef CONFIG_USER_ONLY
269     int priv_mode = cpu_address_mode(env);
270 
271     if (priv_mode == PRV_U) {
272         return get_field(env->hstatus, HSTATUS_HUPMM);
273     } else {
274         if (get_field(env->hstatus, HSTATUS_SPVP)) {
275             return get_field(env->henvcfg, HENVCFG_PMM);
276         } else {
277             return get_field(env->senvcfg, SENVCFG_PMM);
278         }
279     }
280 #else
281     return PMM_FIELD_DISABLED;
282 #endif
283 }
284 
285 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
286 {
287 #ifndef CONFIG_USER_ONLY
288     int satp_mode = 0;
289     int priv_mode = cpu_address_mode(env);
290 
291     if (riscv_cpu_mxl(env) == MXL_RV32) {
292         satp_mode = get_field(env->satp, SATP32_MODE);
293     } else {
294         satp_mode = get_field(env->satp, SATP64_MODE);
295     }
296 
297     return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
298 #else
299     return false;
300 #endif
301 }
302 
303 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
304 {
305     switch (pmm) {
306     case PMM_FIELD_DISABLED:
307         return 0;
308     case PMM_FIELD_PMLEN7:
309         return 7;
310     case PMM_FIELD_PMLEN16:
311         return 16;
312     default:
313         g_assert_not_reached();
314     }
315 }
316 
317 #ifndef CONFIG_USER_ONLY
318 
319 /*
320  * The HS-mode is allowed to configure priority only for the
321  * following VS-mode local interrupts:
322  *
323  * 0  (Reserved interrupt, reads as zero)
324  * 1  Supervisor software interrupt
325  * 4  (Reserved interrupt, reads as zero)
326  * 5  Supervisor timer interrupt
327  * 8  (Reserved interrupt, reads as zero)
328  * 13 (Reserved interrupt)
329  * 14 "
330  * 15 "
331  * 16 "
332  * 17 "
333  * 18 "
334  * 19 "
335  * 20 "
336  * 21 "
337  * 22 "
338  * 23 "
339  */
340 
341 static const int hviprio_index2irq[] = {
342     0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
343 static const int hviprio_index2rdzero[] = {
344     1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
345 
346 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
347 {
348     if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
349         return -EINVAL;
350     }
351 
352     if (out_irq) {
353         *out_irq = hviprio_index2irq[index];
354     }
355 
356     if (out_rdzero) {
357         *out_rdzero = hviprio_index2rdzero[index];
358     }
359 
360     return 0;
361 }
362 
363 /*
364  * Default priorities of local interrupts are defined in the
365  * RISC-V Advanced Interrupt Architecture specification.
366  *
367  * ----------------------------------------------------------------
368  *  Default  |
369  *  Priority | Major Interrupt Numbers
370  * ----------------------------------------------------------------
371  *  Highest  | 47, 23, 46, 45, 22, 44,
372  *           | 43, 21, 42, 41, 20, 40
373  *           |
374  *           | 11 (0b),  3 (03),  7 (07)
375  *           |  9 (09),  1 (01),  5 (05)
376  *           | 12 (0c)
377  *           | 10 (0a),  2 (02),  6 (06)
378  *           |
379  *           | 39, 19, 38, 37, 18, 36,
380  *  Lowest   | 35, 17, 34, 33, 16, 32
381  * ----------------------------------------------------------------
382  */
383 static const uint8_t default_iprio[64] = {
384     /* Custom interrupts 48 to 63 */
385     [63] = IPRIO_MMAXIPRIO,
386     [62] = IPRIO_MMAXIPRIO,
387     [61] = IPRIO_MMAXIPRIO,
388     [60] = IPRIO_MMAXIPRIO,
389     [59] = IPRIO_MMAXIPRIO,
390     [58] = IPRIO_MMAXIPRIO,
391     [57] = IPRIO_MMAXIPRIO,
392     [56] = IPRIO_MMAXIPRIO,
393     [55] = IPRIO_MMAXIPRIO,
394     [54] = IPRIO_MMAXIPRIO,
395     [53] = IPRIO_MMAXIPRIO,
396     [52] = IPRIO_MMAXIPRIO,
397     [51] = IPRIO_MMAXIPRIO,
398     [50] = IPRIO_MMAXIPRIO,
399     [49] = IPRIO_MMAXIPRIO,
400     [48] = IPRIO_MMAXIPRIO,
401 
402     /* Custom interrupts 24 to 31 */
403     [31] = IPRIO_MMAXIPRIO,
404     [30] = IPRIO_MMAXIPRIO,
405     [29] = IPRIO_MMAXIPRIO,
406     [28] = IPRIO_MMAXIPRIO,
407     [27] = IPRIO_MMAXIPRIO,
408     [26] = IPRIO_MMAXIPRIO,
409     [25] = IPRIO_MMAXIPRIO,
410     [24] = IPRIO_MMAXIPRIO,
411 
412     [47] = IPRIO_DEFAULT_UPPER,
413     [23] = IPRIO_DEFAULT_UPPER + 1,
414     [46] = IPRIO_DEFAULT_UPPER + 2,
415     [45] = IPRIO_DEFAULT_UPPER + 3,
416     [22] = IPRIO_DEFAULT_UPPER + 4,
417     [44] = IPRIO_DEFAULT_UPPER + 5,
418 
419     [43] = IPRIO_DEFAULT_UPPER + 6,
420     [21] = IPRIO_DEFAULT_UPPER + 7,
421     [42] = IPRIO_DEFAULT_UPPER + 8,
422     [41] = IPRIO_DEFAULT_UPPER + 9,
423     [20] = IPRIO_DEFAULT_UPPER + 10,
424     [40] = IPRIO_DEFAULT_UPPER + 11,
425 
426     [11] = IPRIO_DEFAULT_M,
427     [3]  = IPRIO_DEFAULT_M + 1,
428     [7]  = IPRIO_DEFAULT_M + 2,
429 
430     [9]  = IPRIO_DEFAULT_S,
431     [1]  = IPRIO_DEFAULT_S + 1,
432     [5]  = IPRIO_DEFAULT_S + 2,
433 
434     [12] = IPRIO_DEFAULT_SGEXT,
435 
436     [10] = IPRIO_DEFAULT_VS,
437     [2]  = IPRIO_DEFAULT_VS + 1,
438     [6]  = IPRIO_DEFAULT_VS + 2,
439 
440     [39] = IPRIO_DEFAULT_LOWER,
441     [19] = IPRIO_DEFAULT_LOWER + 1,
442     [38] = IPRIO_DEFAULT_LOWER + 2,
443     [37] = IPRIO_DEFAULT_LOWER + 3,
444     [18] = IPRIO_DEFAULT_LOWER + 4,
445     [36] = IPRIO_DEFAULT_LOWER + 5,
446 
447     [35] = IPRIO_DEFAULT_LOWER + 6,
448     [17] = IPRIO_DEFAULT_LOWER + 7,
449     [34] = IPRIO_DEFAULT_LOWER + 8,
450     [33] = IPRIO_DEFAULT_LOWER + 9,
451     [16] = IPRIO_DEFAULT_LOWER + 10,
452     [32] = IPRIO_DEFAULT_LOWER + 11,
453 };
454 
455 uint8_t riscv_cpu_default_priority(int irq)
456 {
457     if (irq < 0 || irq > 63) {
458         return IPRIO_MMAXIPRIO;
459     }
460 
461     return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
462 };
463 
464 static int riscv_cpu_pending_to_irq(CPURISCVState *env,
465                                     int extirq, unsigned int extirq_def_prio,
466                                     uint64_t pending, uint8_t *iprio)
467 {
468     int irq, best_irq = RISCV_EXCP_NONE;
469     unsigned int prio, best_prio = UINT_MAX;
470 
471     if (!pending) {
472         return RISCV_EXCP_NONE;
473     }
474 
475     irq = ctz64(pending);
476     if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia :
477                                   riscv_cpu_cfg(env)->ext_ssaia)) {
478         return irq;
479     }
480 
481     pending = pending >> irq;
482     while (pending) {
483         prio = iprio[irq];
484         if (!prio) {
485             if (irq == extirq) {
486                 prio = extirq_def_prio;
487             } else {
488                 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
489                        1 : IPRIO_MMAXIPRIO;
490             }
491         }
492         if ((pending & 0x1) && (prio <= best_prio)) {
493             best_irq = irq;
494             best_prio = prio;
495         }
496         irq++;
497         pending = pending >> 1;
498     }
499 
500     return best_irq;
501 }
502 
503 /*
504  * Doesn't report interrupts inserted using mvip from M-mode firmware or
505  * using hvip bits 13:63 from HS-mode. Those are returned in
506  * riscv_cpu_sirq_pending() and riscv_cpu_vsirq_pending().
507  */
508 uint64_t riscv_cpu_all_pending(CPURISCVState *env)
509 {
510     uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
511     uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
512     uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
513 
514     return (env->mip | vsgein | vstip) & env->mie;
515 }
516 
517 int riscv_cpu_mirq_pending(CPURISCVState *env)
518 {
519     uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
520                     ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
521 
522     return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
523                                     irqs, env->miprio);
524 }
525 
526 int riscv_cpu_sirq_pending(CPURISCVState *env)
527 {
528     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
529                     ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
530     uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie;
531 
532     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
533                                     irqs | irqs_f, env->siprio);
534 }
535 
536 int riscv_cpu_vsirq_pending(CPURISCVState *env)
537 {
538     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg;
539     uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
540     uint64_t vsbits;
541 
542     /* Bring VS-level bits to correct position */
543     vsbits = irqs & VS_MODE_INTERRUPTS;
544     irqs &= ~VS_MODE_INTERRUPTS;
545     irqs |= vsbits >> 1;
546 
547     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
548                                     (irqs | irqs_f_vs), env->hviprio);
549 }
550 
551 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
552 {
553     uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs;
554     uint64_t vsbits, irq_delegated;
555     int virq;
556 
557     /* Priority: RNMI > Other interrupt. */
558     if (riscv_cpu_cfg(env)->ext_smrnmi) {
559         /* If mnstatus.NMIE == 0, all interrupts are disabled. */
560         if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
561             return RISCV_EXCP_NONE;
562         }
563 
564         if (env->rnmip) {
565             return ctz64(env->rnmip); /* since non-zero */
566         }
567     }
568 
569     /* Determine interrupt enable state of all privilege modes */
570     if (env->virt_enabled) {
571         mie = 1;
572         hsie = 1;
573         vsie = (env->priv < PRV_S) ||
574                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
575     } else {
576         mie = (env->priv < PRV_M) ||
577               (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
578         hsie = (env->priv < PRV_S) ||
579                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
580         vsie = 0;
581     }
582 
583     /* Determine all pending interrupts */
584     pending = riscv_cpu_all_pending(env);
585 
586     /* Check M-mode interrupts */
587     irqs = pending & ~env->mideleg & -mie;
588     if (irqs) {
589         return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
590                                         irqs, env->miprio);
591     }
592 
593     /* Check for virtual S-mode interrupts. */
594     irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie;
595 
596     /* Check HS-mode interrupts */
597     irqs =  ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie;
598     if (irqs) {
599         return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
600                                         irqs, env->siprio);
601     }
602 
603     /* Check for virtual VS-mode interrupts. */
604     irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
605 
606     /* Check VS-mode interrupts */
607     irq_delegated = pending & env->mideleg & env->hideleg;
608 
609     /* Bring VS-level bits to correct position */
610     vsbits = irq_delegated & VS_MODE_INTERRUPTS;
611     irq_delegated &= ~VS_MODE_INTERRUPTS;
612     irq_delegated |= vsbits >> 1;
613 
614     irqs = (irq_delegated | irqs_f_vs) & -vsie;
615     if (irqs) {
616         virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
617                                         irqs, env->hviprio);
618         if (virq <= 0 || (virq > 12 && virq <= 63)) {
619             return virq;
620         } else {
621             return virq + 1;
622         }
623     }
624 
625     /* Indicate no pending interrupt */
626     return RISCV_EXCP_NONE;
627 }
628 
629 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
630 {
631     uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
632 
633     if (interrupt_request & mask) {
634         RISCVCPU *cpu = RISCV_CPU(cs);
635         CPURISCVState *env = &cpu->env;
636         int interruptno = riscv_cpu_local_irq_pending(env);
637         if (interruptno >= 0) {
638             cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
639             riscv_cpu_do_interrupt(cs);
640             return true;
641         }
642     }
643     return false;
644 }
645 
646 /* Return true is floating point support is currently enabled */
647 bool riscv_cpu_fp_enabled(CPURISCVState *env)
648 {
649     if (env->mstatus & MSTATUS_FS) {
650         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) {
651             return false;
652         }
653         return true;
654     }
655 
656     return false;
657 }
658 
659 /* Return true is vector support is currently enabled */
660 bool riscv_cpu_vector_enabled(CPURISCVState *env)
661 {
662     if (env->mstatus & MSTATUS_VS) {
663         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) {
664             return false;
665         }
666         return true;
667     }
668 
669     return false;
670 }
671 
672 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
673 {
674     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
675                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
676                             MSTATUS64_UXL | MSTATUS_VS;
677 
678     if (riscv_has_ext(env, RVF)) {
679         mstatus_mask |= MSTATUS_FS;
680     }
681     bool current_virt = env->virt_enabled;
682 
683     /*
684      * If zicfilp extension available and henvcfg.LPE = 1,
685      * then apply SPELP mask on mstatus
686      */
687     if (env_archcpu(env)->cfg.ext_zicfilp &&
688         get_field(env->henvcfg, HENVCFG_LPE)) {
689         mstatus_mask |= SSTATUS_SPELP;
690     }
691 
692     g_assert(riscv_has_ext(env, RVH));
693 
694     if (current_virt) {
695         /* Current V=1 and we are about to change to V=0 */
696         env->vsstatus = env->mstatus & mstatus_mask;
697         env->mstatus &= ~mstatus_mask;
698         env->mstatus |= env->mstatus_hs;
699 
700         env->vstvec = env->stvec;
701         env->stvec = env->stvec_hs;
702 
703         env->vsscratch = env->sscratch;
704         env->sscratch = env->sscratch_hs;
705 
706         env->vsepc = env->sepc;
707         env->sepc = env->sepc_hs;
708 
709         env->vscause = env->scause;
710         env->scause = env->scause_hs;
711 
712         env->vstval = env->stval;
713         env->stval = env->stval_hs;
714 
715         env->vsatp = env->satp;
716         env->satp = env->satp_hs;
717     } else {
718         /* Current V=0 and we are about to change to V=1 */
719         env->mstatus_hs = env->mstatus & mstatus_mask;
720         env->mstatus &= ~mstatus_mask;
721         env->mstatus |= env->vsstatus;
722 
723         env->stvec_hs = env->stvec;
724         env->stvec = env->vstvec;
725 
726         env->sscratch_hs = env->sscratch;
727         env->sscratch = env->vsscratch;
728 
729         env->sepc_hs = env->sepc;
730         env->sepc = env->vsepc;
731 
732         env->scause_hs = env->scause;
733         env->scause = env->vscause;
734 
735         env->stval_hs = env->stval;
736         env->stval = env->vstval;
737 
738         env->satp_hs = env->satp;
739         env->satp = env->vsatp;
740     }
741 }
742 
743 target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
744 {
745     if (!riscv_has_ext(env, RVH)) {
746         return 0;
747     }
748 
749     return env->geilen;
750 }
751 
752 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
753 {
754     if (!riscv_has_ext(env, RVH)) {
755         return;
756     }
757 
758     if (geilen > (TARGET_LONG_BITS - 1)) {
759         return;
760     }
761 
762     env->geilen = geilen;
763 }
764 
765 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
766 {
767     CPURISCVState *env = &cpu->env;
768     CPUState *cs = CPU(cpu);
769     bool release_lock = false;
770 
771     if (!bql_locked()) {
772         release_lock = true;
773         bql_lock();
774     }
775 
776     if (level) {
777         env->rnmip |= 1 << irq;
778         cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
779     } else {
780         env->rnmip &= ~(1 << irq);
781         cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
782     }
783 
784     if (release_lock) {
785         bql_unlock();
786     }
787 }
788 
789 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
790 {
791     CPURISCVState *env = &cpu->env;
792     if (env->miclaim & interrupts) {
793         return -1;
794     } else {
795         env->miclaim |= interrupts;
796         return 0;
797     }
798 }
799 
800 void riscv_cpu_interrupt(CPURISCVState *env)
801 {
802     uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
803     CPUState *cs = env_cpu(env);
804 
805     BQL_LOCK_GUARD();
806 
807     if (env->virt_enabled) {
808         gein = get_field(env->hstatus, HSTATUS_VGEIN);
809         vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
810         irqf = env->hvien & env->hvip & env->vsie;
811     } else {
812         irqf = env->mvien & env->mvip & env->sie;
813     }
814 
815     vstip = env->vstime_irq ? MIP_VSTIP : 0;
816 
817     if (env->mip | vsgein | vstip | irqf) {
818         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
819     } else {
820         cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
821     }
822 }
823 
824 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
825 {
826     uint64_t old = env->mip;
827 
828     /* No need to update mip for VSTIP */
829     mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
830 
831     BQL_LOCK_GUARD();
832 
833     env->mip = (env->mip & ~mask) | (value & mask);
834 
835     riscv_cpu_interrupt(env);
836 
837     return old;
838 }
839 
840 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
841                              void *arg)
842 {
843     env->rdtime_fn = fn;
844     env->rdtime_fn_arg = arg;
845 }
846 
847 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
848                                    int (*rmw_fn)(void *arg,
849                                                  target_ulong reg,
850                                                  target_ulong *val,
851                                                  target_ulong new_val,
852                                                  target_ulong write_mask),
853                                    void *rmw_fn_arg)
854 {
855     if (priv <= PRV_M) {
856         env->aia_ireg_rmw_fn[priv] = rmw_fn;
857         env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
858     }
859 }
860 
861 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
862 {
863     g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
864 
865     if (newpriv != env->priv || env->virt_enabled != virt_en) {
866         if (icount_enabled()) {
867             riscv_itrigger_update_priv(env);
868         }
869 
870         riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
871     }
872 
873     /* tlb_flush is unnecessary as mode is contained in mmu_idx */
874     env->priv = newpriv;
875     env->xl = cpu_recompute_xl(env);
876 
877     /*
878      * Clear the load reservation - otherwise a reservation placed in one
879      * context/process can be used by another, resulting in an SC succeeding
880      * incorrectly. Version 2.2 of the ISA specification explicitly requires
881      * this behaviour, while later revisions say that the kernel "should" use
882      * an SC instruction to force the yielding of a load reservation on a
883      * preemptive context switch. As a result, do both.
884      */
885     env->load_res = -1;
886 
887     if (riscv_has_ext(env, RVH)) {
888         /* Flush the TLB on all virt mode changes. */
889         if (env->virt_enabled != virt_en) {
890             tlb_flush(env_cpu(env));
891         }
892 
893         env->virt_enabled = virt_en;
894         if (virt_en) {
895             /*
896              * The guest external interrupts from an interrupt controller are
897              * delivered only when the Guest/VM is running (i.e. V=1). This
898              * means any guest external interrupt which is triggered while the
899              * Guest/VM is not running (i.e. V=0) will be missed on QEMU
900              * resulting in guest with sluggish response to serial console
901              * input and other I/O events.
902              *
903              * To solve this, we check and inject interrupt after setting V=1.
904              */
905             riscv_cpu_update_mip(env, 0, 0);
906         }
907     }
908 }
909 
910 /*
911  * get_physical_address_pmp - check PMP permission for this physical address
912  *
913  * Match the PMP region and check permission for this physical address and it's
914  * TLB page. Returns 0 if the permission checking was successful
915  *
916  * @env: CPURISCVState
917  * @prot: The returned protection attributes
918  * @addr: The physical address to be checked permission
919  * @access_type: The type of MMU access
920  * @mode: Indicates current privilege level.
921  */
922 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
923                                     int size, MMUAccessType access_type,
924                                     int mode)
925 {
926     pmp_priv_t pmp_priv;
927     bool pmp_has_privs;
928 
929     if (!riscv_cpu_cfg(env)->pmp) {
930         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
931         return TRANSLATE_SUCCESS;
932     }
933 
934     pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
935                                        &pmp_priv, mode);
936     if (!pmp_has_privs) {
937         *prot = 0;
938         return TRANSLATE_PMP_FAIL;
939     }
940 
941     *prot = pmp_priv_to_page_prot(pmp_priv);
942 
943     return TRANSLATE_SUCCESS;
944 }
945 
946 /* Returns 'true' if a svukte address check is needed */
947 static bool do_svukte_check(CPURISCVState *env, bool first_stage,
948                              int mode, bool virt)
949 {
950     /* Svukte extension depends on Sv39. */
951     if (!(env_archcpu(env)->cfg.ext_svukte ||
952         !first_stage ||
953         VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
954         return false;
955     }
956 
957     /*
958      * Check hstatus.HUKTE if the effective mode is switched to VU-mode by
959      * executing HLV/HLVX/HSV in U-mode.
960      * For other cases, check senvcfg.UKTE.
961      */
962     if (env->priv == PRV_U && !env->virt_enabled && virt) {
963         if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
964             return false;
965         }
966     } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
967         return false;
968     }
969 
970     /*
971      * Svukte extension is qualified only in U or VU-mode.
972      *
973      * Effective mode can be switched to U or VU-mode by:
974      *   - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode.
975      *   - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0.
976      *   - U-mode.
977      *   - VU-mode.
978      *   - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1.
979      */
980     if (mode != PRV_U) {
981         return false;
982     }
983 
984     return true;
985 }
986 
987 static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
988 {
989     /* svukte extension excludes RV32 */
990     uint32_t sxlen = 32 * riscv_cpu_sxl(env);
991     uint64_t high_bit = addr & (1UL << (sxlen - 1));
992     return !high_bit;
993 }
994 
995 /*
996  * get_physical_address - get the physical address for this virtual address
997  *
998  * Do a page table walk to obtain the physical address corresponding to a
999  * virtual address. Returns 0 if the translation was successful
1000  *
1001  * Adapted from Spike's mmu_t::translate and mmu_t::walk
1002  *
1003  * @env: CPURISCVState
1004  * @physical: This will be set to the calculated physical address
1005  * @prot: The returned protection attributes
1006  * @addr: The virtual address or guest physical address to be translated
1007  * @fault_pte_addr: If not NULL, this will be set to fault pte address
1008  *                  when a error occurs on pte address translation.
1009  *                  This will already be shifted to match htval.
1010  * @access_type: The type of MMU access
1011  * @mmu_idx: Indicates current privilege level
1012  * @first_stage: Are we in first stage translation?
1013  *               Second stage is used for hypervisor guest translation
1014  * @two_stage: Are we going to perform two stage translation
1015  * @is_debug: Is this access from a debugger or the monitor?
1016  */
1017 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
1018                                 int *ret_prot, vaddr addr,
1019                                 target_ulong *fault_pte_addr,
1020                                 int access_type, int mmu_idx,
1021                                 bool first_stage, bool two_stage,
1022                                 bool is_debug, bool is_probe)
1023 {
1024     /*
1025      * NOTE: the env->pc value visible here will not be
1026      * correct, but the value visible to the exception handler
1027      * (riscv_cpu_do_interrupt) is correct
1028      */
1029     MemTxResult res;
1030     MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
1031     int mode = mmuidx_priv(mmu_idx);
1032     bool virt = mmuidx_2stage(mmu_idx);
1033     bool use_background = false;
1034     hwaddr ppn;
1035     int napot_bits = 0;
1036     target_ulong napot_mask;
1037     bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
1038     bool sstack_page = false;
1039 
1040     if (do_svukte_check(env, first_stage, mode, virt) &&
1041         !check_svukte_addr(env, addr)) {
1042         return TRANSLATE_FAIL;
1043     }
1044 
1045     /*
1046      * Check if we should use the background registers for the two
1047      * stage translation. We don't need to check if we actually need
1048      * two stage translation as that happened before this function
1049      * was called. Background registers will be used if the guest has
1050      * forced a two stage translation to be on (in HS or M mode).
1051      */
1052     if (!env->virt_enabled && two_stage) {
1053         use_background = true;
1054     }
1055 
1056     if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
1057         *physical = addr;
1058         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1059         return TRANSLATE_SUCCESS;
1060     }
1061 
1062     *ret_prot = 0;
1063 
1064     hwaddr base;
1065     int levels, ptidxbits, ptesize, vm, widened;
1066 
1067     if (first_stage == true) {
1068         if (use_background) {
1069             if (riscv_cpu_mxl(env) == MXL_RV32) {
1070                 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
1071                 vm = get_field(env->vsatp, SATP32_MODE);
1072             } else {
1073                 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
1074                 vm = get_field(env->vsatp, SATP64_MODE);
1075             }
1076         } else {
1077             if (riscv_cpu_mxl(env) == MXL_RV32) {
1078                 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
1079                 vm = get_field(env->satp, SATP32_MODE);
1080             } else {
1081                 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
1082                 vm = get_field(env->satp, SATP64_MODE);
1083             }
1084         }
1085         widened = 0;
1086     } else {
1087         if (riscv_cpu_mxl(env) == MXL_RV32) {
1088             base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
1089             vm = get_field(env->hgatp, SATP32_MODE);
1090         } else {
1091             base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
1092             vm = get_field(env->hgatp, SATP64_MODE);
1093         }
1094         widened = 2;
1095     }
1096 
1097     switch (vm) {
1098     case VM_1_10_SV32:
1099       levels = 2; ptidxbits = 10; ptesize = 4; break;
1100     case VM_1_10_SV39:
1101       levels = 3; ptidxbits = 9; ptesize = 8; break;
1102     case VM_1_10_SV48:
1103       levels = 4; ptidxbits = 9; ptesize = 8; break;
1104     case VM_1_10_SV57:
1105       levels = 5; ptidxbits = 9; ptesize = 8; break;
1106     case VM_1_10_MBARE:
1107         *physical = addr;
1108         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1109         return TRANSLATE_SUCCESS;
1110     default:
1111       g_assert_not_reached();
1112     }
1113 
1114     CPUState *cs = env_cpu(env);
1115     int va_bits = PGSHIFT + levels * ptidxbits + widened;
1116     int sxlen = 16 << riscv_cpu_sxl(env);
1117     int sxlen_bytes = sxlen / 8;
1118 
1119     if (first_stage == true) {
1120         target_ulong mask, masked_msbs;
1121 
1122         if (sxlen > (va_bits - 1)) {
1123             mask = (1L << (sxlen - (va_bits - 1))) - 1;
1124         } else {
1125             mask = 0;
1126         }
1127         masked_msbs = (addr >> (va_bits - 1)) & mask;
1128 
1129         if (masked_msbs != 0 && masked_msbs != mask) {
1130             return TRANSLATE_FAIL;
1131         }
1132     } else {
1133         if (vm != VM_1_10_SV32 && addr >> va_bits != 0) {
1134             return TRANSLATE_FAIL;
1135         }
1136     }
1137 
1138     bool pbmte = env->menvcfg & MENVCFG_PBMTE;
1139     bool svade = riscv_cpu_cfg(env)->ext_svade;
1140     bool svadu = riscv_cpu_cfg(env)->ext_svadu;
1141     bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade;
1142 
1143     if (first_stage && two_stage && env->virt_enabled) {
1144         pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
1145         adue = adue && (env->henvcfg & HENVCFG_ADUE);
1146     }
1147 
1148     int ptshift = (levels - 1) * ptidxbits;
1149     target_ulong pte;
1150     hwaddr pte_addr;
1151     int i;
1152 
1153 #if !TCG_OVERSIZED_GUEST
1154 restart:
1155 #endif
1156     for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
1157         target_ulong idx;
1158         if (i == 0) {
1159             idx = (addr >> (PGSHIFT + ptshift)) &
1160                            ((1 << (ptidxbits + widened)) - 1);
1161         } else {
1162             idx = (addr >> (PGSHIFT + ptshift)) &
1163                            ((1 << ptidxbits) - 1);
1164         }
1165 
1166         /* check that physical address of PTE is legal */
1167 
1168         if (two_stage && first_stage) {
1169             int vbase_prot;
1170             hwaddr vbase;
1171 
1172             /* Do the second stage translation on the base PTE address. */
1173             int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
1174                                                  base, NULL, MMU_DATA_LOAD,
1175                                                  MMUIdx_U, false, true,
1176                                                  is_debug, false);
1177 
1178             if (vbase_ret != TRANSLATE_SUCCESS) {
1179                 if (fault_pte_addr) {
1180                     *fault_pte_addr = (base + idx * ptesize) >> 2;
1181                 }
1182                 return TRANSLATE_G_STAGE_FAIL;
1183             }
1184 
1185             pte_addr = vbase + idx * ptesize;
1186         } else {
1187             pte_addr = base + idx * ptesize;
1188         }
1189 
1190         int pmp_prot;
1191         int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
1192                                                sxlen_bytes,
1193                                                MMU_DATA_LOAD, PRV_S);
1194         if (pmp_ret != TRANSLATE_SUCCESS) {
1195             return TRANSLATE_PMP_FAIL;
1196         }
1197 
1198         if (riscv_cpu_mxl(env) == MXL_RV32) {
1199             pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
1200         } else {
1201             pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
1202         }
1203 
1204         if (res != MEMTX_OK) {
1205             return TRANSLATE_FAIL;
1206         }
1207 
1208         if (riscv_cpu_sxl(env) == MXL_RV32) {
1209             ppn = pte >> PTE_PPN_SHIFT;
1210         } else {
1211             if (pte & PTE_RESERVED) {
1212                 return TRANSLATE_FAIL;
1213             }
1214 
1215             if (!pbmte && (pte & PTE_PBMT)) {
1216                 return TRANSLATE_FAIL;
1217             }
1218 
1219             if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1220                 return TRANSLATE_FAIL;
1221             }
1222 
1223             ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
1224         }
1225 
1226         if (!(pte & PTE_V)) {
1227             /* Invalid PTE */
1228             return TRANSLATE_FAIL;
1229         }
1230         if (pte & (PTE_R | PTE_W | PTE_X)) {
1231             goto leaf;
1232         }
1233 
1234         /* Inner PTE, continue walking */
1235         if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
1236             return TRANSLATE_FAIL;
1237         }
1238         base = ppn << PGSHIFT;
1239     }
1240 
1241     /* No leaf pte at any translation level. */
1242     return TRANSLATE_FAIL;
1243 
1244  leaf:
1245     if (ppn & ((1ULL << ptshift) - 1)) {
1246         /* Misaligned PPN */
1247         return TRANSLATE_FAIL;
1248     }
1249     if (!pbmte && (pte & PTE_PBMT)) {
1250         /* Reserved without Svpbmt. */
1251         return TRANSLATE_FAIL;
1252     }
1253 
1254     target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
1255     /* Check for reserved combinations of RWX flags. */
1256     switch (rwx) {
1257     case PTE_W | PTE_X:
1258         return TRANSLATE_FAIL;
1259     case PTE_W:
1260         /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
1261         if (cpu_get_bcfien(env) && first_stage) {
1262             sstack_page = true;
1263             /*
1264              * if ss index, read and write allowed. else if not a probe
1265              * then only read allowed
1266              */
1267             rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 :  PTE_R);
1268             break;
1269         }
1270         return TRANSLATE_FAIL;
1271     case PTE_R:
1272         /*
1273          * no matter what's the `access_type`, shadow stack access to readonly
1274          * memory are always store page faults. During unwind, loads will be
1275          * promoted as store fault.
1276          */
1277         if (is_sstack_idx) {
1278             return TRANSLATE_FAIL;
1279         }
1280         break;
1281     }
1282 
1283     int prot = 0;
1284     if (rwx & PTE_R) {
1285         prot |= PAGE_READ;
1286     }
1287     if (rwx & PTE_W) {
1288         prot |= PAGE_WRITE;
1289     }
1290     if (rwx & PTE_X) {
1291         bool mxr = false;
1292 
1293         /*
1294          * Use mstatus for first stage or for the second stage without
1295          * virt_enabled (MPRV+MPV)
1296          */
1297         if (first_stage || !env->virt_enabled) {
1298             mxr = get_field(env->mstatus, MSTATUS_MXR);
1299         }
1300 
1301         /* MPRV+MPV case, check VSSTATUS */
1302         if (first_stage && two_stage && !env->virt_enabled) {
1303             mxr |= get_field(env->vsstatus, MSTATUS_MXR);
1304         }
1305 
1306         /*
1307          * Setting MXR at HS-level overrides both VS-stage and G-stage
1308          * execute-only permissions
1309          */
1310         if (env->virt_enabled) {
1311             mxr |= get_field(env->mstatus_hs, MSTATUS_MXR);
1312         }
1313 
1314         if (mxr) {
1315             prot |= PAGE_READ;
1316         }
1317         prot |= PAGE_EXEC;
1318     }
1319 
1320     if (pte & PTE_U) {
1321         if (mode != PRV_U) {
1322             if (!mmuidx_sum(mmu_idx)) {
1323                 return TRANSLATE_FAIL;
1324             }
1325             /* SUM allows only read+write, not execute. */
1326             prot &= PAGE_READ | PAGE_WRITE;
1327         }
1328     } else if (mode != PRV_S) {
1329         /* Supervisor PTE flags when not S mode */
1330         return TRANSLATE_FAIL;
1331     }
1332 
1333     if (!((prot >> access_type) & 1)) {
1334         /*
1335          * Access check failed, access check failures for shadow stack are
1336          * access faults.
1337          */
1338         return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
1339     }
1340 
1341     target_ulong updated_pte = pte;
1342 
1343     /*
1344      * If ADUE is enabled, set accessed and dirty bits.
1345      * Otherwise raise an exception if necessary.
1346      */
1347     if (adue) {
1348         updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0);
1349     } else if (!(pte & PTE_A) ||
1350                (access_type == MMU_DATA_STORE && !(pte & PTE_D))) {
1351         return TRANSLATE_FAIL;
1352     }
1353 
1354     /* Page table updates need to be atomic with MTTCG enabled */
1355     if (updated_pte != pte && !is_debug) {
1356         if (!adue) {
1357             return TRANSLATE_FAIL;
1358         }
1359 
1360         /*
1361          * - if accessed or dirty bits need updating, and the PTE is
1362          *   in RAM, then we do so atomically with a compare and swap.
1363          * - if the PTE is in IO space or ROM, then it can't be updated
1364          *   and we return TRANSLATE_FAIL.
1365          * - if the PTE changed by the time we went to update it, then
1366          *   it is no longer valid and we must re-walk the page table.
1367          */
1368         MemoryRegion *mr;
1369         hwaddr l = sxlen_bytes, addr1;
1370         mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
1371                                      false, MEMTXATTRS_UNSPECIFIED);
1372         if (memory_region_is_ram(mr)) {
1373             target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
1374 #if TCG_OVERSIZED_GUEST
1375             /*
1376              * MTTCG is not enabled on oversized TCG guests so
1377              * page table updates do not need to be atomic
1378              */
1379             *pte_pa = pte = updated_pte;
1380 #else
1381             target_ulong old_pte;
1382             if (riscv_cpu_sxl(env) == MXL_RV32) {
1383                 old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
1384             } else {
1385                 old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
1386             }
1387             if (old_pte != pte) {
1388                 goto restart;
1389             }
1390             pte = updated_pte;
1391 #endif
1392         } else {
1393             /*
1394              * Misconfigured PTE in ROM (AD bits are not preset) or
1395              * PTE is in IO space and can't be updated atomically.
1396              */
1397             return TRANSLATE_FAIL;
1398         }
1399     }
1400 
1401     /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
1402     target_ulong vpn = addr >> PGSHIFT;
1403 
1404     if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1405         napot_bits = ctzl(ppn) + 1;
1406         if ((i != (levels - 1)) || (napot_bits != 4)) {
1407             return TRANSLATE_FAIL;
1408         }
1409     }
1410 
1411     napot_mask = (1 << napot_bits) - 1;
1412     *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1413                   (vpn & (((target_ulong)1 << ptshift) - 1))
1414                  ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1415 
1416     /*
1417      * Remove write permission unless this is a store, or the page is
1418      * already dirty, so that we TLB miss on later writes to update
1419      * the dirty bit.
1420      */
1421     if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) {
1422         prot &= ~PAGE_WRITE;
1423     }
1424     *ret_prot = prot;
1425 
1426     return TRANSLATE_SUCCESS;
1427 }
1428 
1429 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1430                                 MMUAccessType access_type, bool pmp_violation,
1431                                 bool first_stage, bool two_stage,
1432                                 bool two_stage_indirect)
1433 {
1434     CPUState *cs = env_cpu(env);
1435 
1436     switch (access_type) {
1437     case MMU_INST_FETCH:
1438         if (pmp_violation) {
1439             cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1440         } else if (env->virt_enabled && !first_stage) {
1441             cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1442         } else {
1443             cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
1444         }
1445         break;
1446     case MMU_DATA_LOAD:
1447         if (pmp_violation) {
1448             cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1449         } else if (two_stage && !first_stage) {
1450             cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1451         } else {
1452             cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
1453         }
1454         break;
1455     case MMU_DATA_STORE:
1456         if (pmp_violation) {
1457             cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1458         } else if (two_stage && !first_stage) {
1459             cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1460         } else {
1461             cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
1462         }
1463         break;
1464     default:
1465         g_assert_not_reached();
1466     }
1467     env->badaddr = address;
1468     env->two_stage_lookup = two_stage;
1469     env->two_stage_indirect_lookup = two_stage_indirect;
1470 }
1471 
1472 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1473 {
1474     RISCVCPU *cpu = RISCV_CPU(cs);
1475     CPURISCVState *env = &cpu->env;
1476     hwaddr phys_addr;
1477     int prot;
1478     int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
1479 
1480     if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1481                              true, env->virt_enabled, true, false)) {
1482         return -1;
1483     }
1484 
1485     if (env->virt_enabled) {
1486         if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1487                                  0, MMUIdx_U, false, true, true, false)) {
1488             return -1;
1489         }
1490     }
1491 
1492     return phys_addr & TARGET_PAGE_MASK;
1493 }
1494 
1495 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1496                                      vaddr addr, unsigned size,
1497                                      MMUAccessType access_type,
1498                                      int mmu_idx, MemTxAttrs attrs,
1499                                      MemTxResult response, uintptr_t retaddr)
1500 {
1501     RISCVCPU *cpu = RISCV_CPU(cs);
1502     CPURISCVState *env = &cpu->env;
1503 
1504     if (access_type == MMU_DATA_STORE) {
1505         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1506     } else if (access_type == MMU_DATA_LOAD) {
1507         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1508     } else {
1509         cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1510     }
1511 
1512     env->badaddr = addr;
1513     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1514     env->two_stage_indirect_lookup = false;
1515     cpu_loop_exit_restore(cs, retaddr);
1516 }
1517 
1518 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1519                                    MMUAccessType access_type, int mmu_idx,
1520                                    uintptr_t retaddr)
1521 {
1522     RISCVCPU *cpu = RISCV_CPU(cs);
1523     CPURISCVState *env = &cpu->env;
1524     switch (access_type) {
1525     case MMU_INST_FETCH:
1526         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1527         break;
1528     case MMU_DATA_LOAD:
1529         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1530         /* shadow stack mis aligned accesses are access faults */
1531         if (mmu_idx & MMU_IDX_SS_WRITE) {
1532             cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1533         }
1534         break;
1535     case MMU_DATA_STORE:
1536         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1537         /* shadow stack mis aligned accesses are access faults */
1538         if (mmu_idx & MMU_IDX_SS_WRITE) {
1539             cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1540         }
1541         break;
1542     default:
1543         g_assert_not_reached();
1544     }
1545     env->badaddr = addr;
1546     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1547     env->two_stage_indirect_lookup = false;
1548     cpu_loop_exit_restore(cs, retaddr);
1549 }
1550 
1551 
1552 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1553 {
1554     enum riscv_pmu_event_idx pmu_event_type;
1555 
1556     switch (access_type) {
1557     case MMU_INST_FETCH:
1558         pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1559         break;
1560     case MMU_DATA_LOAD:
1561         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1562         break;
1563     case MMU_DATA_STORE:
1564         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1565         break;
1566     default:
1567         return;
1568     }
1569 
1570     riscv_pmu_incr_ctr(cpu, pmu_event_type);
1571 }
1572 
1573 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1574                         MMUAccessType access_type, int mmu_idx,
1575                         bool probe, uintptr_t retaddr)
1576 {
1577     RISCVCPU *cpu = RISCV_CPU(cs);
1578     CPURISCVState *env = &cpu->env;
1579     vaddr im_address;
1580     hwaddr pa = 0;
1581     int prot, prot2, prot_pmp;
1582     bool pmp_violation = false;
1583     bool first_stage_error = true;
1584     bool two_stage_lookup = mmuidx_2stage(mmu_idx);
1585     bool two_stage_indirect_error = false;
1586     int ret = TRANSLATE_FAIL;
1587     int mode = mmuidx_priv(mmu_idx);
1588     /* default TLB page size */
1589     hwaddr tlb_size = TARGET_PAGE_SIZE;
1590 
1591     env->guest_phys_fault_addr = 0;
1592 
1593     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1594                   __func__, address, access_type, mmu_idx);
1595 
1596     pmu_tlb_fill_incr_ctr(cpu, access_type);
1597     if (two_stage_lookup) {
1598         /* Two stage lookup */
1599         ret = get_physical_address(env, &pa, &prot, address,
1600                                    &env->guest_phys_fault_addr, access_type,
1601                                    mmu_idx, true, true, false, probe);
1602 
1603         /*
1604          * A G-stage exception may be triggered during two state lookup.
1605          * And the env->guest_phys_fault_addr has already been set in
1606          * get_physical_address().
1607          */
1608         if (ret == TRANSLATE_G_STAGE_FAIL) {
1609             first_stage_error = false;
1610             two_stage_indirect_error = true;
1611         }
1612 
1613         qemu_log_mask(CPU_LOG_MMU,
1614                       "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1615                       HWADDR_FMT_plx " prot %d\n",
1616                       __func__, address, ret, pa, prot);
1617 
1618         if (ret == TRANSLATE_SUCCESS) {
1619             /* Second stage lookup */
1620             im_address = pa;
1621 
1622             ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1623                                        access_type, MMUIdx_U, false, true,
1624                                        false, probe);
1625 
1626             qemu_log_mask(CPU_LOG_MMU,
1627                           "%s 2nd-stage address=%" VADDR_PRIx
1628                           " ret %d physical "
1629                           HWADDR_FMT_plx " prot %d\n",
1630                           __func__, im_address, ret, pa, prot2);
1631 
1632             prot &= prot2;
1633 
1634             if (ret == TRANSLATE_SUCCESS) {
1635                 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1636                                                size, access_type, mode);
1637                 tlb_size = pmp_get_tlb_size(env, pa);
1638 
1639                 qemu_log_mask(CPU_LOG_MMU,
1640                               "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1641                               " %d tlb_size %" HWADDR_PRIu "\n",
1642                               __func__, pa, ret, prot_pmp, tlb_size);
1643 
1644                 prot &= prot_pmp;
1645             } else {
1646                 /*
1647                  * Guest physical address translation failed, this is a HS
1648                  * level exception
1649                  */
1650                 first_stage_error = false;
1651                 if (ret != TRANSLATE_PMP_FAIL) {
1652                     env->guest_phys_fault_addr = (im_address |
1653                                                   (address &
1654                                                    (TARGET_PAGE_SIZE - 1))) >> 2;
1655                 }
1656             }
1657         }
1658     } else {
1659         /* Single stage lookup */
1660         ret = get_physical_address(env, &pa, &prot, address, NULL,
1661                                    access_type, mmu_idx, true, false, false,
1662                                    probe);
1663 
1664         qemu_log_mask(CPU_LOG_MMU,
1665                       "%s address=%" VADDR_PRIx " ret %d physical "
1666                       HWADDR_FMT_plx " prot %d\n",
1667                       __func__, address, ret, pa, prot);
1668 
1669         if (ret == TRANSLATE_SUCCESS) {
1670             ret = get_physical_address_pmp(env, &prot_pmp, pa,
1671                                            size, access_type, mode);
1672             tlb_size = pmp_get_tlb_size(env, pa);
1673 
1674             qemu_log_mask(CPU_LOG_MMU,
1675                           "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1676                           " %d tlb_size %" HWADDR_PRIu "\n",
1677                           __func__, pa, ret, prot_pmp, tlb_size);
1678 
1679             prot &= prot_pmp;
1680         }
1681     }
1682 
1683     if (ret == TRANSLATE_PMP_FAIL) {
1684         pmp_violation = true;
1685     }
1686 
1687     if (ret == TRANSLATE_SUCCESS) {
1688         tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1689                      prot, mmu_idx, tlb_size);
1690         return true;
1691     } else if (probe) {
1692         return false;
1693     } else {
1694         raise_mmu_exception(env, address, access_type, pmp_violation,
1695                             first_stage_error, two_stage_lookup,
1696                             two_stage_indirect_error);
1697         cpu_loop_exit_restore(cs, retaddr);
1698     }
1699 
1700     return true;
1701 }
1702 
1703 static target_ulong riscv_transformed_insn(CPURISCVState *env,
1704                                            target_ulong insn,
1705                                            target_ulong taddr)
1706 {
1707     target_ulong xinsn = 0;
1708     target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
1709 
1710     /*
1711      * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
1712      * be uncompressed. The Quadrant 1 of RVC instruction space need
1713      * not be transformed because these instructions won't generate
1714      * any load/store trap.
1715      */
1716 
1717     if ((insn & 0x3) != 0x3) {
1718         /* Transform 16bit instruction into 32bit instruction */
1719         switch (GET_C_OP(insn)) {
1720         case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */
1721             switch (GET_C_FUNC(insn)) {
1722             case OPC_RISC_C_FUNC_FLD_LQ:
1723                 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */
1724                     xinsn = OPC_RISC_FLD;
1725                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1726                     access_rs1 = GET_C_RS1S(insn);
1727                     access_imm = GET_C_LD_IMM(insn);
1728                     access_size = 8;
1729                 }
1730                 break;
1731             case OPC_RISC_C_FUNC_LW: /* C.LW */
1732                 xinsn = OPC_RISC_LW;
1733                 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1734                 access_rs1 = GET_C_RS1S(insn);
1735                 access_imm = GET_C_LW_IMM(insn);
1736                 access_size = 4;
1737                 break;
1738             case OPC_RISC_C_FUNC_FLW_LD:
1739                 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */
1740                     xinsn = OPC_RISC_FLW;
1741                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1742                     access_rs1 = GET_C_RS1S(insn);
1743                     access_imm = GET_C_LW_IMM(insn);
1744                     access_size = 4;
1745                 } else { /* C.LD (RV64/RV128) */
1746                     xinsn = OPC_RISC_LD;
1747                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1748                     access_rs1 = GET_C_RS1S(insn);
1749                     access_imm = GET_C_LD_IMM(insn);
1750                     access_size = 8;
1751                 }
1752                 break;
1753             case OPC_RISC_C_FUNC_FSD_SQ:
1754                 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */
1755                     xinsn = OPC_RISC_FSD;
1756                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1757                     access_rs1 = GET_C_RS1S(insn);
1758                     access_imm = GET_C_SD_IMM(insn);
1759                     access_size = 8;
1760                 }
1761                 break;
1762             case OPC_RISC_C_FUNC_SW: /* C.SW */
1763                 xinsn = OPC_RISC_SW;
1764                 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1765                 access_rs1 = GET_C_RS1S(insn);
1766                 access_imm = GET_C_SW_IMM(insn);
1767                 access_size = 4;
1768                 break;
1769             case OPC_RISC_C_FUNC_FSW_SD:
1770                 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */
1771                     xinsn = OPC_RISC_FSW;
1772                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1773                     access_rs1 = GET_C_RS1S(insn);
1774                     access_imm = GET_C_SW_IMM(insn);
1775                     access_size = 4;
1776                 } else { /* C.SD (RV64/RV128) */
1777                     xinsn = OPC_RISC_SD;
1778                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1779                     access_rs1 = GET_C_RS1S(insn);
1780                     access_imm = GET_C_SD_IMM(insn);
1781                     access_size = 8;
1782                 }
1783                 break;
1784             default:
1785                 break;
1786             }
1787             break;
1788         case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */
1789             switch (GET_C_FUNC(insn)) {
1790             case OPC_RISC_C_FUNC_FLDSP_LQSP:
1791                 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */
1792                     xinsn = OPC_RISC_FLD;
1793                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
1794                     access_rs1 = 2;
1795                     access_imm = GET_C_LDSP_IMM(insn);
1796                     access_size = 8;
1797                 }
1798                 break;
1799             case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */
1800                 xinsn = OPC_RISC_LW;
1801                 xinsn = SET_RD(xinsn, GET_C_RD(insn));
1802                 access_rs1 = 2;
1803                 access_imm = GET_C_LWSP_IMM(insn);
1804                 access_size = 4;
1805                 break;
1806             case OPC_RISC_C_FUNC_FLWSP_LDSP:
1807                 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */
1808                     xinsn = OPC_RISC_FLW;
1809                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
1810                     access_rs1 = 2;
1811                     access_imm = GET_C_LWSP_IMM(insn);
1812                     access_size = 4;
1813                 } else { /* C.LDSP (RV64/RV128) */
1814                     xinsn = OPC_RISC_LD;
1815                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
1816                     access_rs1 = 2;
1817                     access_imm = GET_C_LDSP_IMM(insn);
1818                     access_size = 8;
1819                 }
1820                 break;
1821             case OPC_RISC_C_FUNC_FSDSP_SQSP:
1822                 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */
1823                     xinsn = OPC_RISC_FSD;
1824                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1825                     access_rs1 = 2;
1826                     access_imm = GET_C_SDSP_IMM(insn);
1827                     access_size = 8;
1828                 }
1829                 break;
1830             case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */
1831                 xinsn = OPC_RISC_SW;
1832                 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1833                 access_rs1 = 2;
1834                 access_imm = GET_C_SWSP_IMM(insn);
1835                 access_size = 4;
1836                 break;
1837             case 7:
1838                 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */
1839                     xinsn = OPC_RISC_FSW;
1840                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1841                     access_rs1 = 2;
1842                     access_imm = GET_C_SWSP_IMM(insn);
1843                     access_size = 4;
1844                 } else { /* C.SDSP (RV64/RV128) */
1845                     xinsn = OPC_RISC_SD;
1846                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1847                     access_rs1 = 2;
1848                     access_imm = GET_C_SDSP_IMM(insn);
1849                     access_size = 8;
1850                 }
1851                 break;
1852             default:
1853                 break;
1854             }
1855             break;
1856         default:
1857             break;
1858         }
1859 
1860         /*
1861          * Clear Bit1 of transformed instruction to indicate that
1862          * original insruction was a 16bit instruction
1863          */
1864         xinsn &= ~((target_ulong)0x2);
1865     } else {
1866         /* Transform 32bit (or wider) instructions */
1867         switch (MASK_OP_MAJOR(insn)) {
1868         case OPC_RISC_ATOMIC:
1869             xinsn = insn;
1870             access_rs1 = GET_RS1(insn);
1871             access_size = 1 << GET_FUNCT3(insn);
1872             break;
1873         case OPC_RISC_LOAD:
1874         case OPC_RISC_FP_LOAD:
1875             xinsn = SET_I_IMM(insn, 0);
1876             access_rs1 = GET_RS1(insn);
1877             access_imm = GET_IMM(insn);
1878             access_size = 1 << GET_FUNCT3(insn);
1879             break;
1880         case OPC_RISC_STORE:
1881         case OPC_RISC_FP_STORE:
1882             xinsn = SET_S_IMM(insn, 0);
1883             access_rs1 = GET_RS1(insn);
1884             access_imm = GET_STORE_IMM(insn);
1885             access_size = 1 << GET_FUNCT3(insn);
1886             break;
1887         case OPC_RISC_SYSTEM:
1888             if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
1889                 xinsn = insn;
1890                 access_rs1 = GET_RS1(insn);
1891                 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
1892                 access_size = 1 << access_size;
1893             }
1894             break;
1895         default:
1896             break;
1897         }
1898     }
1899 
1900     if (access_size) {
1901         xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
1902                                (access_size - 1));
1903     }
1904 
1905     return xinsn;
1906 }
1907 
1908 static target_ulong promote_load_fault(target_ulong orig_cause)
1909 {
1910     switch (orig_cause) {
1911     case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1912         return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1913 
1914     case RISCV_EXCP_LOAD_ACCESS_FAULT:
1915         return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1916 
1917     case RISCV_EXCP_LOAD_PAGE_FAULT:
1918         return RISCV_EXCP_STORE_PAGE_FAULT;
1919     }
1920 
1921     /* if no promotion, return original cause */
1922     return orig_cause;
1923 }
1924 /*
1925  * Handle Traps
1926  *
1927  * Adapted from Spike's processor_t::take_trap.
1928  *
1929  */
1930 void riscv_cpu_do_interrupt(CPUState *cs)
1931 {
1932     RISCVCPU *cpu = RISCV_CPU(cs);
1933     CPURISCVState *env = &cpu->env;
1934     bool virt = env->virt_enabled;
1935     bool write_gva = false;
1936     bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
1937     uint64_t s;
1938     int mode;
1939 
1940     /*
1941      * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1942      * so we mask off the MSB and separate into trap type and cause.
1943      */
1944     bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1945     target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1946     uint64_t deleg = async ? env->mideleg : env->medeleg;
1947     bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
1948         !(env->mip & (1ULL << cause));
1949     bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
1950         !(env->mip & (1ULL << cause));
1951     target_ulong tval = 0;
1952     target_ulong tinst = 0;
1953     target_ulong htval = 0;
1954     target_ulong mtval2 = 0;
1955     int sxlen = 0;
1956     int mxlen = 16 << riscv_cpu_mxl(env);
1957     bool nnmi_excep = false;
1958 
1959     if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
1960         env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
1961         env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV,
1962                                   env->virt_enabled);
1963         env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP,
1964                                   env->priv);
1965         env->mncause = cause | ((target_ulong)1U << (mxlen - 1));
1966         env->mnepc = env->pc;
1967         env->pc = env->rnmi_irqvec;
1968 
1969         if (cpu_get_fcfien(env)) {
1970             env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
1971         }
1972 
1973         /* Trapping to M mode, virt is disabled */
1974         riscv_cpu_set_mode(env, PRV_M, false);
1975 
1976         return;
1977     }
1978 
1979     if (!async) {
1980         /* set tval to badaddr for traps with address information */
1981         switch (cause) {
1982 #ifdef CONFIG_TCG
1983         case RISCV_EXCP_SEMIHOST:
1984             do_common_semihosting(cs);
1985             env->pc += 4;
1986             return;
1987 #endif
1988         case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1989         case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
1990         case RISCV_EXCP_LOAD_ADDR_MIS:
1991         case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1992         case RISCV_EXCP_LOAD_ACCESS_FAULT:
1993         case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1994         case RISCV_EXCP_LOAD_PAGE_FAULT:
1995         case RISCV_EXCP_STORE_PAGE_FAULT:
1996             if (always_storeamo) {
1997                 cause = promote_load_fault(cause);
1998             }
1999             write_gva = env->two_stage_lookup;
2000             tval = env->badaddr;
2001             if (env->two_stage_indirect_lookup) {
2002                 /*
2003                  * special pseudoinstruction for G-stage fault taken while
2004                  * doing VS-stage page table walk.
2005                  */
2006                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2007             } else {
2008                 /*
2009                  * The "Addr. Offset" field in transformed instruction is
2010                  * non-zero only for misaligned access.
2011                  */
2012                 tinst = riscv_transformed_insn(env, env->bins, tval);
2013             }
2014             break;
2015         case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
2016         case RISCV_EXCP_INST_ADDR_MIS:
2017         case RISCV_EXCP_INST_ACCESS_FAULT:
2018         case RISCV_EXCP_INST_PAGE_FAULT:
2019             write_gva = env->two_stage_lookup;
2020             tval = env->badaddr;
2021             if (env->two_stage_indirect_lookup) {
2022                 /*
2023                  * special pseudoinstruction for G-stage fault taken while
2024                  * doing VS-stage page table walk.
2025                  */
2026                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2027             }
2028             break;
2029         case RISCV_EXCP_ILLEGAL_INST:
2030         case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
2031             tval = env->bins;
2032             break;
2033         case RISCV_EXCP_BREAKPOINT:
2034             tval = env->badaddr;
2035             if (cs->watchpoint_hit) {
2036                 tval = cs->watchpoint_hit->hitaddr;
2037                 cs->watchpoint_hit = NULL;
2038             }
2039             break;
2040         case RISCV_EXCP_SW_CHECK:
2041             tval = env->sw_check_code;
2042             break;
2043         default:
2044             break;
2045         }
2046         /* ecall is dispatched as one cause so translate based on mode */
2047         if (cause == RISCV_EXCP_U_ECALL) {
2048             assert(env->priv <= 3);
2049 
2050             if (env->priv == PRV_M) {
2051                 cause = RISCV_EXCP_M_ECALL;
2052             } else if (env->priv == PRV_S && env->virt_enabled) {
2053                 cause = RISCV_EXCP_VS_ECALL;
2054             } else if (env->priv == PRV_S && !env->virt_enabled) {
2055                 cause = RISCV_EXCP_S_ECALL;
2056             } else if (env->priv == PRV_U) {
2057                 cause = RISCV_EXCP_U_ECALL;
2058             }
2059         }
2060     }
2061 
2062     trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
2063                      riscv_cpu_get_trap_name(cause, async));
2064 
2065     qemu_log_mask(CPU_LOG_INT,
2066                   "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
2067                   "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
2068                   __func__, env->mhartid, async, cause, env->pc, tval,
2069                   riscv_cpu_get_trap_name(cause, async));
2070 
2071     mode = env->priv <= PRV_S && cause < 64 &&
2072         (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
2073 
2074     if (mode == PRV_S) {
2075         /* handle the trap in S-mode */
2076         /* save elp status */
2077         if (cpu_get_fcfien(env)) {
2078             env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
2079         }
2080 
2081         if (riscv_has_ext(env, RVH)) {
2082             uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
2083 
2084             if (env->virt_enabled &&
2085                 (((hdeleg >> cause) & 1) || vs_injected)) {
2086                 /* Trap to VS mode */
2087                 /*
2088                  * See if we need to adjust cause. Yes if its VS mode interrupt
2089                  * no if hypervisor has delegated one of hs mode's interrupt
2090                  */
2091                 if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
2092                               cause == IRQ_VS_EXT)) {
2093                     cause = cause - 1;
2094                 }
2095                 write_gva = false;
2096             } else if (env->virt_enabled) {
2097                 /* Trap into HS mode, from virt */
2098                 riscv_cpu_swap_hypervisor_regs(env);
2099                 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
2100                                          env->priv);
2101                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true);
2102 
2103                 htval = env->guest_phys_fault_addr;
2104 
2105                 virt = false;
2106             } else {
2107                 /* Trap into HS mode */
2108                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
2109                 htval = env->guest_phys_fault_addr;
2110             }
2111             env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
2112         }
2113 
2114         s = env->mstatus;
2115         s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
2116         s = set_field(s, MSTATUS_SPP, env->priv);
2117         s = set_field(s, MSTATUS_SIE, 0);
2118         env->mstatus = s;
2119         sxlen = 16 << riscv_cpu_sxl(env);
2120         env->scause = cause | ((target_ulong)async << (sxlen - 1));
2121         env->sepc = env->pc;
2122         env->stval = tval;
2123         env->htval = htval;
2124         env->htinst = tinst;
2125         env->pc = (env->stvec >> 2 << 2) +
2126                   ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
2127         riscv_cpu_set_mode(env, PRV_S, virt);
2128     } else {
2129         /*
2130          * If the hart encounters an exception while executing in M-mode
2131          * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
2132          */
2133         nnmi_excep = cpu->cfg.ext_smrnmi &&
2134                      !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2135                      !async;
2136 
2137         /* handle the trap in M-mode */
2138         /* save elp status */
2139         if (cpu_get_fcfien(env)) {
2140             if (nnmi_excep) {
2141                 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
2142                                           env->elp);
2143             } else {
2144                 env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
2145             }
2146         }
2147 
2148         if (riscv_has_ext(env, RVH)) {
2149             if (env->virt_enabled) {
2150                 riscv_cpu_swap_hypervisor_regs(env);
2151             }
2152             env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
2153                                      env->virt_enabled);
2154             if (env->virt_enabled && tval) {
2155                 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
2156             }
2157 
2158             mtval2 = env->guest_phys_fault_addr;
2159 
2160             /* Trapping to M mode, virt is disabled */
2161             virt = false;
2162         }
2163 
2164         s = env->mstatus;
2165         s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
2166         s = set_field(s, MSTATUS_MPP, env->priv);
2167         s = set_field(s, MSTATUS_MIE, 0);
2168         env->mstatus = s;
2169         env->mcause = cause | ((target_ulong)async << (mxlen - 1));
2170         env->mepc = env->pc;
2171         env->mtval = tval;
2172         env->mtval2 = mtval2;
2173         env->mtinst = tinst;
2174 
2175         /*
2176          * For RNMI exception, program counter is set to the RNMI exception
2177          * trap handler address.
2178          */
2179         if (nnmi_excep) {
2180             env->pc = env->rnmi_excpvec;
2181         } else {
2182             env->pc = (env->mtvec >> 2 << 2) +
2183                       ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
2184         }
2185         riscv_cpu_set_mode(env, PRV_M, virt);
2186     }
2187 
2188     /*
2189      * Interrupt/exception/trap delivery is asynchronous event and as per
2190      * zicfilp spec CPU should clear up the ELP state. No harm in clearing
2191      * unconditionally.
2192      */
2193     env->elp = false;
2194 
2195     /*
2196      * NOTE: it is not necessary to yield load reservations here. It is only
2197      * necessary for an SC from "another hart" to cause a load reservation
2198      * to be yielded. Refer to the memory consistency model section of the
2199      * RISC-V ISA Specification.
2200      */
2201 
2202     env->two_stage_lookup = false;
2203     env->two_stage_indirect_lookup = false;
2204 }
2205 
2206 #endif /* !CONFIG_USER_ONLY */
2207