xref: /qemu/target/riscv/cpu.h (revision cc1f4b34d011e908dcaf24721f1d5808e02ab0bd)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-common.h"
27 #include "exec/cpu-defs.h"
28 #include "exec/cpu-interrupt.h"
29 #include "exec/gdbstub.h"
30 #include "qemu/cpu-float.h"
31 #include "qom/object.h"
32 #include "qemu/int128.h"
33 #include "cpu_bits.h"
34 #include "cpu_cfg.h"
35 #include "qapi/qapi-types-common.h"
36 #include "cpu-qom.h"
37 
38 typedef struct CPUArchState CPURISCVState;
39 
40 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
41 
42 #if defined(TARGET_RISCV32)
43 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
44 #elif defined(TARGET_RISCV64)
45 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
46 #endif
47 
48 /*
49  * b0: Whether a instruction always raise a store AMO or not.
50  */
51 #define RISCV_UW2_ALWAYS_STORE_AMO 1
52 
53 #define RV(x) ((target_ulong)1 << (x - 'A'))
54 
55 /*
56  * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
57  * when adding new MISA bits here.
58  */
59 #define RVI RV('I')
60 #define RVE RV('E') /* E and I are mutually exclusive */
61 #define RVM RV('M')
62 #define RVA RV('A')
63 #define RVF RV('F')
64 #define RVD RV('D')
65 #define RVV RV('V')
66 #define RVC RV('C')
67 #define RVS RV('S')
68 #define RVU RV('U')
69 #define RVH RV('H')
70 #define RVG RV('G')
71 #define RVB RV('B')
72 
73 extern const uint32_t misa_bits[];
74 const char *riscv_get_misa_ext_name(uint32_t bit);
75 const char *riscv_get_misa_ext_description(uint32_t bit);
76 
77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
78 
79 typedef struct riscv_cpu_profile {
80     struct riscv_cpu_profile *u_parent;
81     struct riscv_cpu_profile *s_parent;
82     const char *name;
83     uint32_t misa_ext;
84     bool enabled;
85     bool user_set;
86     int priv_spec;
87     int satp_mode;
88     const int32_t ext_offsets[];
89 } RISCVCPUProfile;
90 
91 #define RISCV_PROFILE_EXT_LIST_END -1
92 #define RISCV_PROFILE_ATTR_UNUSED -1
93 
94 extern RISCVCPUProfile *riscv_profiles[];
95 
96 /* Privileged specification version */
97 #define PRIV_VER_1_10_0_STR "v1.10.0"
98 #define PRIV_VER_1_11_0_STR "v1.11.0"
99 #define PRIV_VER_1_12_0_STR "v1.12.0"
100 #define PRIV_VER_1_13_0_STR "v1.13.0"
101 enum {
102     PRIV_VERSION_1_10_0 = 0,
103     PRIV_VERSION_1_11_0,
104     PRIV_VERSION_1_12_0,
105     PRIV_VERSION_1_13_0,
106 
107     PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
108 };
109 
110 #define VEXT_VERSION_1_00_0 0x00010000
111 #define VEXT_VER_1_00_0_STR "v1.0"
112 
113 enum {
114     TRANSLATE_SUCCESS,
115     TRANSLATE_FAIL,
116     TRANSLATE_PMP_FAIL,
117     TRANSLATE_G_STAGE_FAIL
118 };
119 
120 /* Extension context status */
121 typedef enum {
122     EXT_STATUS_DISABLED = 0,
123     EXT_STATUS_INITIAL,
124     EXT_STATUS_CLEAN,
125     EXT_STATUS_DIRTY,
126 } RISCVExtStatus;
127 
128 /* Enum holds PMM field values for Zjpm v1.0 extension */
129 typedef enum {
130     PMM_FIELD_DISABLED = 0,
131     PMM_FIELD_RESERVED = 1,
132     PMM_FIELD_PMLEN7   = 2,
133     PMM_FIELD_PMLEN16  = 3,
134 } RISCVPmPmm;
135 
136 typedef struct riscv_cpu_implied_exts_rule {
137 #ifndef CONFIG_USER_ONLY
138     /*
139      * Bitmask indicates the rule enabled status for the harts.
140      * This enhancement is only available in system-mode QEMU,
141      * as we don't have a good way (e.g. mhartid) to distinguish
142      * the SMP cores in user-mode QEMU.
143      */
144     unsigned long *enabled;
145 #endif
146     /* True if this is a MISA implied rule. */
147     bool is_misa;
148     /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
149     const uint32_t ext;
150     const uint32_t implied_misa_exts;
151     const uint32_t implied_multi_exts[];
152 } RISCVCPUImpliedExtsRule;
153 
154 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
155 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
156 
157 #define RISCV_IMPLIED_EXTS_RULE_END -1
158 
159 #define MMU_USER_IDX 3
160 
161 #define MAX_RISCV_PMPS (16)
162 
163 #if !defined(CONFIG_USER_ONLY)
164 #include "pmp.h"
165 #include "debug.h"
166 #endif
167 
168 #define RV_VLEN_MAX 1024
169 #define RV_MAX_MHPMEVENTS 32
170 #define RV_MAX_MHPMCOUNTERS 32
171 
172 FIELD(VTYPE, VLMUL, 0, 3)
173 FIELD(VTYPE, VSEW, 3, 3)
174 FIELD(VTYPE, VTA, 6, 1)
175 FIELD(VTYPE, VMA, 7, 1)
176 FIELD(VTYPE, VEDIV, 8, 2)
177 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
178 
179 typedef struct PMUCTRState {
180     /* Current value of a counter */
181     target_ulong mhpmcounter_val;
182     /* Current value of a counter in RV32 */
183     target_ulong mhpmcounterh_val;
184     /* Snapshot values of counter */
185     target_ulong mhpmcounter_prev;
186     /* Snapshort value of a counter in RV32 */
187     target_ulong mhpmcounterh_prev;
188     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
189     target_ulong irq_overflow_left;
190 } PMUCTRState;
191 
192 typedef struct PMUFixedCtrState {
193         /* Track cycle and icount for each privilege mode */
194         uint64_t counter[4];
195         uint64_t counter_prev[4];
196         /* Track cycle and icount for each privilege mode when V = 1*/
197         uint64_t counter_virt[2];
198         uint64_t counter_virt_prev[2];
199 } PMUFixedCtrState;
200 
201 struct CPUArchState {
202     target_ulong gpr[32];
203     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
204 
205     /* vector coprocessor state. */
206     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
207     target_ulong vxrm;
208     target_ulong vxsat;
209     target_ulong vl;
210     target_ulong vstart;
211     target_ulong vtype;
212     bool vill;
213 
214     target_ulong pc;
215     target_ulong load_res;
216     target_ulong load_val;
217 
218     /* Floating-Point state */
219     uint64_t fpr[32]; /* assume both F and D extensions */
220     target_ulong frm;
221     float_status fp_status;
222 
223     target_ulong badaddr;
224     target_ulong bins;
225 
226     target_ulong guest_phys_fault_addr;
227 
228     target_ulong priv_ver;
229     target_ulong vext_ver;
230 
231     /* RISCVMXL, but uint32_t for vmstate migration */
232     uint32_t misa_mxl;      /* current mxl */
233     uint32_t misa_ext;      /* current extensions */
234     uint32_t misa_ext_mask; /* max ext for this cpu */
235     uint32_t xl;            /* current xlen */
236 
237     /* 128-bit helpers upper part return value */
238     target_ulong retxh;
239 
240     target_ulong jvt;
241 
242     /* elp state for zicfilp extension */
243     bool      elp;
244     /* shadow stack register for zicfiss extension */
245     target_ulong ssp;
246     /* env place holder for extra word 2 during unwind */
247     target_ulong excp_uw2;
248     /* sw check code for sw check exception */
249     target_ulong sw_check_code;
250 #ifdef CONFIG_USER_ONLY
251     uint32_t elf_flags;
252 #endif
253 
254     target_ulong priv;
255     /* CSRs for execution environment configuration */
256     uint64_t menvcfg;
257     target_ulong senvcfg;
258 
259 #ifndef CONFIG_USER_ONLY
260     /* This contains QEMU specific information about the virt state. */
261     bool virt_enabled;
262     target_ulong geilen;
263     uint64_t resetvec;
264 
265     target_ulong mhartid;
266     /*
267      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
268      * For RV64 this is a 64-bit mstatus.
269      */
270     uint64_t mstatus;
271 
272     uint64_t mip;
273     /*
274      * MIP contains the software writable version of SEIP ORed with the
275      * external interrupt value. The MIP register is always up-to-date.
276      * To keep track of the current source, we also save booleans of the values
277      * here.
278      */
279     bool external_seip;
280     bool software_seip;
281 
282     uint64_t miclaim;
283 
284     uint64_t mie;
285     uint64_t mideleg;
286 
287     /*
288      * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
289      * alias of mie[i] and needs to be maintained separately.
290      */
291     uint64_t sie;
292 
293     /*
294      * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
295      * alias of sie[i] (mie[i]) and needs to be maintained separately.
296      */
297     uint64_t vsie;
298 
299     target_ulong satp;   /* since: priv-1.10.0 */
300     target_ulong stval;
301     target_ulong medeleg;
302 
303     target_ulong stvec;
304     target_ulong sepc;
305     target_ulong scause;
306 
307     target_ulong mtvec;
308     target_ulong mepc;
309     target_ulong mcause;
310     target_ulong mtval;  /* since: priv-1.10.0 */
311 
312     uint64_t mctrctl;
313     uint32_t sctrdepth;
314     uint32_t sctrstatus;
315     uint64_t vsctrctl;
316 
317     uint64_t ctr_src[16 << SCTRDEPTH_MAX];
318     uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
319     uint64_t ctr_data[16 << SCTRDEPTH_MAX];
320 
321     /* Machine and Supervisor interrupt priorities */
322     uint8_t miprio[64];
323     uint8_t siprio[64];
324 
325     /* AIA CSRs */
326     target_ulong miselect;
327     target_ulong siselect;
328     uint64_t mvien;
329     uint64_t mvip;
330 
331     /* Hypervisor CSRs */
332     target_ulong hstatus;
333     target_ulong hedeleg;
334     uint64_t hideleg;
335     uint32_t hcounteren;
336     target_ulong htval;
337     target_ulong htinst;
338     target_ulong hgatp;
339     target_ulong hgeie;
340     target_ulong hgeip;
341     uint64_t htimedelta;
342     uint64_t hvien;
343 
344     /*
345      * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
346      * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
347      * maintain in hvip.
348      */
349     uint64_t hvip;
350 
351     /* Hypervisor controlled virtual interrupt priorities */
352     target_ulong hvictl;
353     uint8_t hviprio[64];
354 
355     /* Upper 64-bits of 128-bit CSRs */
356     uint64_t mscratchh;
357     uint64_t sscratchh;
358 
359     /* Virtual CSRs */
360     /*
361      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
362      * For RV64 this is a 64-bit vsstatus.
363      */
364     uint64_t vsstatus;
365     target_ulong vstvec;
366     target_ulong vsscratch;
367     target_ulong vsepc;
368     target_ulong vscause;
369     target_ulong vstval;
370     target_ulong vsatp;
371 
372     /* AIA VS-mode CSRs */
373     target_ulong vsiselect;
374 
375     target_ulong mtval2;
376     target_ulong mtinst;
377 
378     /* HS Backup CSRs */
379     target_ulong stvec_hs;
380     target_ulong sscratch_hs;
381     target_ulong sepc_hs;
382     target_ulong scause_hs;
383     target_ulong stval_hs;
384     target_ulong satp_hs;
385     uint64_t mstatus_hs;
386 
387     /*
388      * Signals whether the current exception occurred with two-stage address
389      * translation active.
390      */
391     bool two_stage_lookup;
392     /*
393      * Signals whether the current exception occurred while doing two-stage
394      * address translation for the VS-stage page table walk.
395      */
396     bool two_stage_indirect_lookup;
397 
398     uint32_t scounteren;
399     uint32_t mcounteren;
400 
401     uint32_t scountinhibit;
402     uint32_t mcountinhibit;
403 
404     /* PMU cycle & instret privilege mode filtering */
405     target_ulong mcyclecfg;
406     target_ulong mcyclecfgh;
407     target_ulong minstretcfg;
408     target_ulong minstretcfgh;
409 
410     /* PMU counter state */
411     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
412 
413     /* PMU event selector configured values. First three are unused */
414     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
415 
416     /* PMU event selector configured values for RV32 */
417     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
418 
419     PMUFixedCtrState pmu_fixed_ctrs[2];
420 
421     target_ulong sscratch;
422     target_ulong mscratch;
423 
424     /* Sstc CSRs */
425     uint64_t stimecmp;
426 
427     uint64_t vstimecmp;
428 
429     /* physical memory protection */
430     pmp_table_t pmp_state;
431     target_ulong mseccfg;
432 
433     /* trigger module */
434     target_ulong trigger_cur;
435     target_ulong tdata1[RV_MAX_TRIGGERS];
436     target_ulong tdata2[RV_MAX_TRIGGERS];
437     target_ulong tdata3[RV_MAX_TRIGGERS];
438     target_ulong mcontext;
439     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
440     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
441     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
442     int64_t last_icount;
443     bool itrigger_enabled;
444 
445     /* machine specific rdtime callback */
446     uint64_t (*rdtime_fn)(void *);
447     void *rdtime_fn_arg;
448 
449     /* machine specific AIA ireg read-modify-write callback */
450 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
451     ((((__xlen) & 0xff) << 24) | \
452      (((__vgein) & 0x3f) << 20) | \
453      (((__virt) & 0x1) << 18) | \
454      (((__priv) & 0x3) << 16) | \
455      (__isel & 0xffff))
456 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
457 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
458 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
459 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
460 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
461     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
462         target_ulong *val, target_ulong new_val, target_ulong write_mask);
463     void *aia_ireg_rmw_fn_arg[4];
464 
465     /* True if in debugger mode.  */
466     bool debugger;
467 
468     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
469     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
470     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
471     uint64_t henvcfg;
472 #endif
473 
474     /* Fields from here on are preserved across CPU reset. */
475     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
476     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
477     bool vstime_irq;
478 
479     hwaddr kernel_addr;
480     hwaddr fdt_addr;
481 
482 #ifdef CONFIG_KVM
483     /* kvm timer */
484     bool kvm_timer_dirty;
485     uint64_t kvm_timer_time;
486     uint64_t kvm_timer_compare;
487     uint64_t kvm_timer_state;
488     uint64_t kvm_timer_frequency;
489 #endif /* CONFIG_KVM */
490 
491     /* RNMI */
492     target_ulong mnscratch;
493     target_ulong mnepc;
494     target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
495     target_ulong mnstatus;
496     target_ulong rnmip;
497     uint64_t rnmi_irqvec;
498     uint64_t rnmi_excpvec;
499 };
500 
501 /*
502  * RISCVCPU:
503  * @env: #CPURISCVState
504  *
505  * A RISCV CPU.
506  */
507 struct ArchCPU {
508     CPUState parent_obj;
509 
510     CPURISCVState env;
511 
512     GDBFeature dyn_csr_feature;
513     GDBFeature dyn_vreg_feature;
514 
515     /* Configuration Settings */
516     RISCVCPUConfig cfg;
517 
518     QEMUTimer *pmu_timer;
519     /* A bitmask of Available programmable counters */
520     uint32_t pmu_avail_ctrs;
521     /* Mapping of events to counters */
522     GHashTable *pmu_event_ctr_map;
523     const GPtrArray *decoders;
524 };
525 
526 /**
527  * RISCVCPUClass:
528  * @parent_realize: The parent class' realize handler.
529  * @parent_phases: The parent class' reset phase handlers.
530  *
531  * A RISCV CPU model.
532  */
533 struct RISCVCPUClass {
534     CPUClass parent_class;
535 
536     DeviceRealize parent_realize;
537     ResettablePhases parent_phases;
538     RISCVMXL misa_mxl_max;  /* max mxl for this cpu */
539 };
540 
541 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
542 {
543     return (env->misa_ext & ext) != 0;
544 }
545 
546 #include "cpu_user.h"
547 
548 extern const char * const riscv_int_regnames[];
549 extern const char * const riscv_int_regnamesh[];
550 extern const char * const riscv_fpr_regnames[];
551 
552 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
553 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
554                                int cpuid, DumpState *s);
555 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
556                                int cpuid, DumpState *s);
557 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
558 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
559 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
560 uint8_t riscv_cpu_default_priority(int irq);
561 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
562 int riscv_cpu_mirq_pending(CPURISCVState *env);
563 int riscv_cpu_sirq_pending(CPURISCVState *env);
564 int riscv_cpu_vsirq_pending(CPURISCVState *env);
565 bool riscv_cpu_fp_enabled(CPURISCVState *env);
566 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
567 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
568 bool riscv_cpu_vector_enabled(CPURISCVState *env);
569 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
570 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
571 bool cpu_get_fcfien(CPURISCVState *env);
572 bool cpu_get_bcfien(CPURISCVState *env);
573 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
574 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
575                                                MMUAccessType access_type,
576                                                int mmu_idx, uintptr_t retaddr);
577 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
578                         MMUAccessType access_type, int mmu_idx,
579                         bool probe, uintptr_t retaddr);
580 char *riscv_isa_string(RISCVCPU *cpu);
581 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
582 bool riscv_cpu_option_set(const char *optname);
583 
584 #ifndef CONFIG_USER_ONLY
585 void riscv_cpu_do_interrupt(CPUState *cpu);
586 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
587 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
588                                      vaddr addr, unsigned size,
589                                      MMUAccessType access_type,
590                                      int mmu_idx, MemTxAttrs attrs,
591                                      MemTxResult response, uintptr_t retaddr);
592 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
593 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
594 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
595 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
596 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
597                               uint64_t value);
598 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
599 void riscv_cpu_interrupt(CPURISCVState *env);
600 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
601 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
602                              void *arg);
603 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
604                                    int (*rmw_fn)(void *arg,
605                                                  target_ulong reg,
606                                                  target_ulong *val,
607                                                  target_ulong new_val,
608                                                  target_ulong write_mask),
609                                    void *rmw_fn_arg);
610 
611 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
612 #endif /* !CONFIG_USER_ONLY */
613 
614 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
615 
616 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
617     enum CTRType type, target_ulong prev_priv, bool prev_virt);
618 void riscv_ctr_clear(CPURISCVState *env);
619 
620 void riscv_translate_init(void);
621 void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
622                           int *max_insns, vaddr pc, void *host_pc);
623 
624 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
625                                       RISCVException exception,
626                                       uintptr_t pc);
627 
628 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
629 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
630 
631 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
632 FIELD(TB_FLAGS, FS, 3, 2)
633 /* Vector flags */
634 FIELD(TB_FLAGS, VS, 5, 2)
635 FIELD(TB_FLAGS, LMUL, 7, 3)
636 FIELD(TB_FLAGS, SEW, 10, 3)
637 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
638 FIELD(TB_FLAGS, VILL, 14, 1)
639 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
640 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
641 FIELD(TB_FLAGS, XL, 16, 2)
642 /* If PointerMasking should be applied */
643 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
644 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
645 FIELD(TB_FLAGS, VTA, 18, 1)
646 FIELD(TB_FLAGS, VMA, 19, 1)
647 /* Native debug itrigger */
648 FIELD(TB_FLAGS, ITRIGGER, 20, 1)
649 /* Virtual mode enabled */
650 FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
651 FIELD(TB_FLAGS, PRIV, 22, 2)
652 FIELD(TB_FLAGS, AXL, 24, 2)
653 /* zicfilp needs a TB flag to track indirect branches */
654 FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
655 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
656 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */
657 FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
658 /* If pointer masking should be applied and address sign extended */
659 FIELD(TB_FLAGS, PM_PMM, 29, 2)
660 FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
661 
662 #ifdef TARGET_RISCV32
663 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
664 #else
665 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
666 {
667     return env->misa_mxl;
668 }
669 #endif
670 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
671 
672 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
673 {
674     return &env_archcpu(env)->cfg;
675 }
676 
677 #if !defined(CONFIG_USER_ONLY)
678 static inline int cpu_address_mode(CPURISCVState *env)
679 {
680     int mode = env->priv;
681 
682     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
683         mode = get_field(env->mstatus, MSTATUS_MPP);
684     }
685     return mode;
686 }
687 
688 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
689 {
690     RISCVMXL xl = env->misa_mxl;
691     /*
692      * When emulating a 32-bit-only cpu, use RV32.
693      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
694      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
695      * back to RV64 for lower privs.
696      */
697     if (xl != MXL_RV32) {
698         switch (mode) {
699         case PRV_M:
700             break;
701         case PRV_U:
702             xl = get_field(env->mstatus, MSTATUS64_UXL);
703             break;
704         default: /* PRV_S */
705             xl = get_field(env->mstatus, MSTATUS64_SXL);
706             break;
707         }
708     }
709     return xl;
710 }
711 #endif
712 
713 #if defined(TARGET_RISCV32)
714 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
715 #else
716 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
717 {
718 #if !defined(CONFIG_USER_ONLY)
719     return cpu_get_xl(env, env->priv);
720 #else
721     return env->misa_mxl;
722 #endif
723 }
724 #endif
725 
726 #if defined(TARGET_RISCV32)
727 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
728 #else
729 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
730 {
731 #ifdef CONFIG_USER_ONLY
732     return env->xl;
733 #else
734     int mode = cpu_address_mode(env);
735 
736     return cpu_get_xl(env, mode);
737 #endif
738 }
739 #endif
740 
741 static inline int riscv_cpu_xlen(CPURISCVState *env)
742 {
743     return 16 << env->xl;
744 }
745 
746 #ifdef TARGET_RISCV32
747 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
748 #else
749 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
750 {
751 #ifdef CONFIG_USER_ONLY
752     return env->misa_mxl;
753 #else
754     if (env->misa_mxl != MXL_RV32) {
755         return get_field(env->mstatus, MSTATUS64_SXL);
756     }
757 #endif
758     return MXL_RV32;
759 }
760 #endif
761 
762 static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
763                                               target_long priv_ver,
764                                               uint32_t misa_ext)
765 {
766     /* In priv spec version 1.12 or newer, C always implies Zca */
767     if (priv_ver >= PRIV_VERSION_1_12_0) {
768         return cfg->ext_zca;
769     } else {
770         return misa_ext & RVC;
771     }
772 }
773 
774 /*
775  * Encode LMUL to lmul as follows:
776  *     LMUL    vlmul    lmul
777  *      1       000       0
778  *      2       001       1
779  *      4       010       2
780  *      8       011       3
781  *      -       100       -
782  *     1/8      101      -3
783  *     1/4      110      -2
784  *     1/2      111      -1
785  *
786  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
787  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
788  *      => VLMAX = vlen >> (1 + 3 - (-3))
789  *               = 256 >> 7
790  *               = 2
791  */
792 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
793                                       int8_t lmul)
794 {
795     uint32_t vlen = vlenb << 3;
796 
797     /*
798      * We need to use 'vlen' instead of 'vlenb' to
799      * preserve the '+ 3' in the formula. Otherwise
800      * we risk a negative shift if vsew < lmul.
801      */
802     return vlen >> (vsew + 3 - lmul);
803 }
804 
805 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
806 
807 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
808 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
809 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
810 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
811 
812 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
813                           target_ulong *ret_value);
814 
815 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
816                            target_ulong *ret_value,
817                            target_ulong new_value, target_ulong write_mask);
818 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
819                                  target_ulong *ret_value,
820                                  target_ulong new_value,
821                                  target_ulong write_mask);
822 
823 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
824                                    target_ulong val)
825 {
826     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
827 }
828 
829 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
830 {
831     target_ulong val = 0;
832     riscv_csrrw(env, csrno, &val, 0, 0);
833     return val;
834 }
835 
836 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
837                                                  int csrno);
838 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
839                                             target_ulong *ret_value);
840 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
841                                              target_ulong new_value);
842 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
843                                           target_ulong *ret_value,
844                                           target_ulong new_value,
845                                           target_ulong write_mask);
846 
847 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
848                                Int128 *ret_value);
849 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
850                                 Int128 *ret_value,
851                                 Int128 new_value, Int128 write_mask);
852 
853 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
854                                                Int128 *ret_value);
855 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
856                                              Int128 new_value);
857 
858 typedef struct {
859     const char *name;
860     riscv_csr_predicate_fn predicate;
861     riscv_csr_read_fn read;
862     riscv_csr_write_fn write;
863     riscv_csr_op_fn op;
864     riscv_csr_read128_fn read128;
865     riscv_csr_write128_fn write128;
866     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
867     uint32_t min_priv_ver;
868 } riscv_csr_operations;
869 
870 /* CSR function table constants */
871 enum {
872     CSR_TABLE_SIZE = 0x1000
873 };
874 
875 /*
876  * The event id are encoded based on the encoding specified in the
877  * SBI specification v0.3
878  */
879 
880 enum riscv_pmu_event_idx {
881     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
882     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
883     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
884     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
885     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
886 };
887 
888 /* used by tcg/tcg-cpu.c*/
889 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
890 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
891 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
892 bool riscv_cpu_is_vendor(Object *cpu_obj);
893 
894 typedef struct RISCVCPUMultiExtConfig {
895     const char *name;
896     uint32_t offset;
897     bool enabled;
898 } RISCVCPUMultiExtConfig;
899 
900 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
901 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
902 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
903 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
904 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
905 
906 typedef struct isa_ext_data {
907     const char *name;
908     int min_version;
909     int ext_enable_offset;
910 } RISCVIsaExtData;
911 extern const RISCVIsaExtData isa_edata_arr[];
912 char *riscv_cpu_get_name(RISCVCPU *cpu);
913 
914 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
915 void riscv_add_satp_mode_properties(Object *obj);
916 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
917 
918 /* CSR function table */
919 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
920 
921 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
922 
923 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
924 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
925 
926 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
927 
928 target_ulong riscv_new_csr_seed(target_ulong new_value,
929                                 target_ulong write_mask);
930 
931 uint8_t satp_mode_max_from_map(uint32_t map);
932 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
933 
934 /* Implemented in th_csr.c */
935 void th_register_custom_csrs(RISCVCPU *cpu);
936 
937 const char *priv_spec_to_str(int priv_version);
938 #endif /* RISCV_CPU_H */
939