xref: /qemu/target/riscv/cpu.h (revision 5fd23f20e12a56e7ac2dabbe9570fb2f10d7c5b4)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-common.h"
27 #include "exec/cpu-defs.h"
28 #include "exec/cpu-interrupt.h"
29 #include "exec/gdbstub.h"
30 #include "qemu/cpu-float.h"
31 #include "qom/object.h"
32 #include "qemu/int128.h"
33 #include "cpu_bits.h"
34 #include "cpu_cfg.h"
35 #include "qapi/qapi-types-common.h"
36 #include "cpu-qom.h"
37 
38 typedef struct CPUArchState CPURISCVState;
39 
40 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
41 
42 #if defined(TARGET_RISCV32)
43 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
44 #elif defined(TARGET_RISCV64)
45 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
46 #endif
47 
48 /*
49  * b0: Whether a instruction always raise a store AMO or not.
50  */
51 #define RISCV_UW2_ALWAYS_STORE_AMO 1
52 
53 #define RV(x) ((target_ulong)1 << (x - 'A'))
54 
55 /*
56  * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
57  * when adding new MISA bits here.
58  */
59 #define RVI RV('I')
60 #define RVE RV('E') /* E and I are mutually exclusive */
61 #define RVM RV('M')
62 #define RVA RV('A')
63 #define RVF RV('F')
64 #define RVD RV('D')
65 #define RVV RV('V')
66 #define RVC RV('C')
67 #define RVS RV('S')
68 #define RVU RV('U')
69 #define RVH RV('H')
70 #define RVG RV('G')
71 #define RVB RV('B')
72 
73 extern const uint32_t misa_bits[];
74 const char *riscv_get_misa_ext_name(uint32_t bit);
75 const char *riscv_get_misa_ext_description(uint32_t bit);
76 
77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
78 #define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr)
79 
80 typedef struct riscv_cpu_profile {
81     struct riscv_cpu_profile *u_parent;
82     struct riscv_cpu_profile *s_parent;
83     const char *name;
84     uint32_t misa_ext;
85     bool enabled;
86     bool user_set;
87     int priv_spec;
88     int satp_mode;
89     const int32_t ext_offsets[];
90 } RISCVCPUProfile;
91 
92 #define RISCV_PROFILE_EXT_LIST_END -1
93 #define RISCV_PROFILE_ATTR_UNUSED -1
94 
95 extern RISCVCPUProfile *riscv_profiles[];
96 
97 /* Privileged specification version */
98 #define PRIV_VER_1_10_0_STR "v1.10.0"
99 #define PRIV_VER_1_11_0_STR "v1.11.0"
100 #define PRIV_VER_1_12_0_STR "v1.12.0"
101 #define PRIV_VER_1_13_0_STR "v1.13.0"
102 enum {
103     PRIV_VERSION_1_10_0 = 0,
104     PRIV_VERSION_1_11_0,
105     PRIV_VERSION_1_12_0,
106     PRIV_VERSION_1_13_0,
107 
108     PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
109 };
110 
111 #define VEXT_VERSION_1_00_0 0x00010000
112 #define VEXT_VER_1_00_0_STR "v1.0"
113 
114 enum {
115     TRANSLATE_SUCCESS,
116     TRANSLATE_FAIL,
117     TRANSLATE_PMP_FAIL,
118     TRANSLATE_G_STAGE_FAIL
119 };
120 
121 /* Extension context status */
122 typedef enum {
123     EXT_STATUS_DISABLED = 0,
124     EXT_STATUS_INITIAL,
125     EXT_STATUS_CLEAN,
126     EXT_STATUS_DIRTY,
127 } RISCVExtStatus;
128 
129 /* Enum holds PMM field values for Zjpm v1.0 extension */
130 typedef enum {
131     PMM_FIELD_DISABLED = 0,
132     PMM_FIELD_RESERVED = 1,
133     PMM_FIELD_PMLEN7   = 2,
134     PMM_FIELD_PMLEN16  = 3,
135 } RISCVPmPmm;
136 
137 typedef struct riscv_cpu_implied_exts_rule {
138 #ifndef CONFIG_USER_ONLY
139     /*
140      * Bitmask indicates the rule enabled status for the harts.
141      * This enhancement is only available in system-mode QEMU,
142      * as we don't have a good way (e.g. mhartid) to distinguish
143      * the SMP cores in user-mode QEMU.
144      */
145     unsigned long *enabled;
146 #endif
147     /* True if this is a MISA implied rule. */
148     bool is_misa;
149     /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
150     const uint32_t ext;
151     const uint32_t implied_misa_exts;
152     const uint32_t implied_multi_exts[];
153 } RISCVCPUImpliedExtsRule;
154 
155 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
156 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
157 
158 #define RISCV_IMPLIED_EXTS_RULE_END -1
159 
160 #define MMU_USER_IDX 3
161 
162 #define MAX_RISCV_PMPS (16)
163 
164 #if !defined(CONFIG_USER_ONLY)
165 #include "pmp.h"
166 #include "debug.h"
167 #endif
168 
169 #define RV_VLEN_MAX 1024
170 #define RV_MAX_MHPMEVENTS 32
171 #define RV_MAX_MHPMCOUNTERS 32
172 
173 FIELD(VTYPE, VLMUL, 0, 3)
174 FIELD(VTYPE, VSEW, 3, 3)
175 FIELD(VTYPE, VTA, 6, 1)
176 FIELD(VTYPE, VMA, 7, 1)
177 FIELD(VTYPE, VEDIV, 8, 2)
178 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
179 
180 typedef struct PMUCTRState {
181     /* Current value of a counter */
182     target_ulong mhpmcounter_val;
183     /* Current value of a counter in RV32 */
184     target_ulong mhpmcounterh_val;
185     /* Snapshot values of counter */
186     target_ulong mhpmcounter_prev;
187     /* Snapshort value of a counter in RV32 */
188     target_ulong mhpmcounterh_prev;
189     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
190     target_ulong irq_overflow_left;
191 } PMUCTRState;
192 
193 typedef struct PMUFixedCtrState {
194         /* Track cycle and icount for each privilege mode */
195         uint64_t counter[4];
196         uint64_t counter_prev[4];
197         /* Track cycle and icount for each privilege mode when V = 1*/
198         uint64_t counter_virt[2];
199         uint64_t counter_virt_prev[2];
200 } PMUFixedCtrState;
201 
202 struct CPUArchState {
203     target_ulong gpr[32];
204     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
205 
206     /* vector coprocessor state. */
207     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
208     target_ulong vxrm;
209     target_ulong vxsat;
210     target_ulong vl;
211     target_ulong vstart;
212     target_ulong vtype;
213     bool vill;
214 
215     target_ulong pc;
216     target_ulong load_res;
217     target_ulong load_val;
218 
219     /* Floating-Point state */
220     uint64_t fpr[32]; /* assume both F and D extensions */
221     target_ulong frm;
222     float_status fp_status;
223 
224     target_ulong badaddr;
225     target_ulong bins;
226 
227     target_ulong guest_phys_fault_addr;
228 
229     target_ulong priv_ver;
230     target_ulong vext_ver;
231 
232     /* RISCVMXL, but uint32_t for vmstate migration */
233     uint32_t misa_mxl;      /* current mxl */
234     uint32_t misa_ext;      /* current extensions */
235     uint32_t misa_ext_mask; /* max ext for this cpu */
236     uint32_t xl;            /* current xlen */
237 
238     /* 128-bit helpers upper part return value */
239     target_ulong retxh;
240 
241     target_ulong jvt;
242 
243     /* elp state for zicfilp extension */
244     bool      elp;
245     /* shadow stack register for zicfiss extension */
246     target_ulong ssp;
247     /* env place holder for extra word 2 during unwind */
248     target_ulong excp_uw2;
249     /* sw check code for sw check exception */
250     target_ulong sw_check_code;
251 #ifdef CONFIG_USER_ONLY
252     uint32_t elf_flags;
253 #endif
254 
255     target_ulong priv;
256     /* CSRs for execution environment configuration */
257     uint64_t menvcfg;
258     target_ulong senvcfg;
259 
260 #ifndef CONFIG_USER_ONLY
261     /* This contains QEMU specific information about the virt state. */
262     bool virt_enabled;
263     target_ulong geilen;
264     uint64_t resetvec;
265 
266     target_ulong mhartid;
267     /*
268      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
269      * For RV64 this is a 64-bit mstatus.
270      */
271     uint64_t mstatus;
272 
273     uint64_t mip;
274     /*
275      * MIP contains the software writable version of SEIP ORed with the
276      * external interrupt value. The MIP register is always up-to-date.
277      * To keep track of the current source, we also save booleans of the values
278      * here.
279      */
280     bool external_seip;
281     bool software_seip;
282 
283     uint64_t miclaim;
284 
285     uint64_t mie;
286     uint64_t mideleg;
287 
288     /*
289      * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
290      * alias of mie[i] and needs to be maintained separately.
291      */
292     uint64_t sie;
293 
294     /*
295      * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
296      * alias of sie[i] (mie[i]) and needs to be maintained separately.
297      */
298     uint64_t vsie;
299 
300     target_ulong satp;   /* since: priv-1.10.0 */
301     target_ulong stval;
302     target_ulong medeleg;
303 
304     target_ulong stvec;
305     target_ulong sepc;
306     target_ulong scause;
307 
308     target_ulong mtvec;
309     target_ulong mepc;
310     target_ulong mcause;
311     target_ulong mtval;  /* since: priv-1.10.0 */
312 
313     uint64_t mctrctl;
314     uint32_t sctrdepth;
315     uint32_t sctrstatus;
316     uint64_t vsctrctl;
317 
318     uint64_t ctr_src[16 << SCTRDEPTH_MAX];
319     uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
320     uint64_t ctr_data[16 << SCTRDEPTH_MAX];
321 
322     /* Machine and Supervisor interrupt priorities */
323     uint8_t miprio[64];
324     uint8_t siprio[64];
325 
326     /* AIA CSRs */
327     target_ulong miselect;
328     target_ulong siselect;
329     uint64_t mvien;
330     uint64_t mvip;
331 
332     /* Hypervisor CSRs */
333     target_ulong hstatus;
334     target_ulong hedeleg;
335     uint64_t hideleg;
336     uint32_t hcounteren;
337     target_ulong htval;
338     target_ulong htinst;
339     target_ulong hgatp;
340     target_ulong hgeie;
341     target_ulong hgeip;
342     uint64_t htimedelta;
343     uint64_t hvien;
344 
345     /*
346      * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
347      * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
348      * maintain in hvip.
349      */
350     uint64_t hvip;
351 
352     /* Hypervisor controlled virtual interrupt priorities */
353     target_ulong hvictl;
354     uint8_t hviprio[64];
355 
356     /* Upper 64-bits of 128-bit CSRs */
357     uint64_t mscratchh;
358     uint64_t sscratchh;
359 
360     /* Virtual CSRs */
361     /*
362      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
363      * For RV64 this is a 64-bit vsstatus.
364      */
365     uint64_t vsstatus;
366     target_ulong vstvec;
367     target_ulong vsscratch;
368     target_ulong vsepc;
369     target_ulong vscause;
370     target_ulong vstval;
371     target_ulong vsatp;
372 
373     /* AIA VS-mode CSRs */
374     target_ulong vsiselect;
375 
376     target_ulong mtval2;
377     target_ulong mtinst;
378 
379     /* HS Backup CSRs */
380     target_ulong stvec_hs;
381     target_ulong sscratch_hs;
382     target_ulong sepc_hs;
383     target_ulong scause_hs;
384     target_ulong stval_hs;
385     target_ulong satp_hs;
386     uint64_t mstatus_hs;
387 
388     /*
389      * Signals whether the current exception occurred with two-stage address
390      * translation active.
391      */
392     bool two_stage_lookup;
393     /*
394      * Signals whether the current exception occurred while doing two-stage
395      * address translation for the VS-stage page table walk.
396      */
397     bool two_stage_indirect_lookup;
398 
399     uint32_t scounteren;
400     uint32_t mcounteren;
401 
402     uint32_t scountinhibit;
403     uint32_t mcountinhibit;
404 
405     /* PMU cycle & instret privilege mode filtering */
406     target_ulong mcyclecfg;
407     target_ulong mcyclecfgh;
408     target_ulong minstretcfg;
409     target_ulong minstretcfgh;
410 
411     /* PMU counter state */
412     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
413 
414     /* PMU event selector configured values. First three are unused */
415     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
416 
417     /* PMU event selector configured values for RV32 */
418     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
419 
420     PMUFixedCtrState pmu_fixed_ctrs[2];
421 
422     target_ulong sscratch;
423     target_ulong mscratch;
424 
425     /* Sstc CSRs */
426     uint64_t stimecmp;
427 
428     uint64_t vstimecmp;
429 
430     /* physical memory protection */
431     pmp_table_t pmp_state;
432     target_ulong mseccfg;
433 
434     /* trigger module */
435     target_ulong trigger_cur;
436     target_ulong tdata1[RV_MAX_TRIGGERS];
437     target_ulong tdata2[RV_MAX_TRIGGERS];
438     target_ulong tdata3[RV_MAX_TRIGGERS];
439     target_ulong mcontext;
440     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
441     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
442     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
443     int64_t last_icount;
444     bool itrigger_enabled;
445 
446     /* machine specific rdtime callback */
447     uint64_t (*rdtime_fn)(void *);
448     void *rdtime_fn_arg;
449 
450     /* machine specific AIA ireg read-modify-write callback */
451 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
452     ((((__xlen) & 0xff) << 24) | \
453      (((__vgein) & 0x3f) << 20) | \
454      (((__virt) & 0x1) << 18) | \
455      (((__priv) & 0x3) << 16) | \
456      (__isel & 0xffff))
457 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
458 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
459 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
460 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
461 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
462     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
463         target_ulong *val, target_ulong new_val, target_ulong write_mask);
464     void *aia_ireg_rmw_fn_arg[4];
465 
466     /* True if in debugger mode.  */
467     bool debugger;
468 
469     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
470     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
471     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
472     uint64_t henvcfg;
473 #endif
474 
475     /* Fields from here on are preserved across CPU reset. */
476     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
477     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
478     bool vstime_irq;
479 
480     hwaddr kernel_addr;
481     hwaddr fdt_addr;
482 
483 #ifdef CONFIG_KVM
484     /* kvm timer */
485     bool kvm_timer_dirty;
486     uint64_t kvm_timer_time;
487     uint64_t kvm_timer_compare;
488     uint64_t kvm_timer_state;
489     uint64_t kvm_timer_frequency;
490 #endif /* CONFIG_KVM */
491 
492     /* RNMI */
493     target_ulong mnscratch;
494     target_ulong mnepc;
495     target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
496     target_ulong mnstatus;
497     target_ulong rnmip;
498     uint64_t rnmi_irqvec;
499     uint64_t rnmi_excpvec;
500 };
501 
502 /*
503  * map is a 16-bit bitmap: the most significant set bit in map is the maximum
504  * satp mode that is supported. It may be chosen by the user and must respect
505  * what qemu implements (valid_1_10_32/64) and what the hw is capable of
506  * (supported bitmap below).
507  *
508  * init is a 16-bit bitmap used to make sure the user selected a correct
509  * configuration as per the specification.
510  */
511 typedef struct {
512     uint16_t map, init;
513 } RISCVSATPModes;
514 
515 /*
516  * RISCVCPU:
517  * @env: #CPURISCVState
518  *
519  * A RISCV CPU.
520  */
521 struct ArchCPU {
522     CPUState parent_obj;
523 
524     CPURISCVState env;
525 
526     GDBFeature dyn_csr_feature;
527     GDBFeature dyn_vreg_feature;
528 
529     /* Configuration Settings */
530     RISCVCPUConfig cfg;
531     RISCVSATPModes satp_modes;
532 
533     QEMUTimer *pmu_timer;
534     /* A bitmask of Available programmable counters */
535     uint32_t pmu_avail_ctrs;
536     /* Mapping of events to counters */
537     GHashTable *pmu_event_ctr_map;
538     const GPtrArray *decoders;
539 };
540 
541 typedef struct RISCVCPUDef {
542     RISCVMXL misa_mxl_max;  /* max mxl for this cpu */
543 } RISCVCPUDef;
544 
545 /**
546  * RISCVCPUClass:
547  * @parent_realize: The parent class' realize handler.
548  * @parent_phases: The parent class' reset phase handlers.
549  *
550  * A RISCV CPU model.
551  */
552 struct RISCVCPUClass {
553     CPUClass parent_class;
554 
555     DeviceRealize parent_realize;
556     ResettablePhases parent_phases;
557     RISCVCPUDef *def;
558 };
559 
560 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
561 {
562     return (env->misa_ext & ext) != 0;
563 }
564 
565 #include "cpu_user.h"
566 
567 extern const char * const riscv_int_regnames[];
568 extern const char * const riscv_int_regnamesh[];
569 extern const char * const riscv_fpr_regnames[];
570 
571 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
572 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
573                                int cpuid, DumpState *s);
574 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
575                                int cpuid, DumpState *s);
576 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
577 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
578 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
579 uint8_t riscv_cpu_default_priority(int irq);
580 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
581 int riscv_cpu_mirq_pending(CPURISCVState *env);
582 int riscv_cpu_sirq_pending(CPURISCVState *env);
583 int riscv_cpu_vsirq_pending(CPURISCVState *env);
584 bool riscv_cpu_fp_enabled(CPURISCVState *env);
585 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
586 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
587 bool riscv_cpu_vector_enabled(CPURISCVState *env);
588 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
589 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
590 bool cpu_get_fcfien(CPURISCVState *env);
591 bool cpu_get_bcfien(CPURISCVState *env);
592 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
593 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
594                                                MMUAccessType access_type,
595                                                int mmu_idx, uintptr_t retaddr);
596 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
597                         MMUAccessType access_type, int mmu_idx,
598                         bool probe, uintptr_t retaddr);
599 char *riscv_isa_string(RISCVCPU *cpu);
600 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
601 bool riscv_cpu_option_set(const char *optname);
602 
603 #ifndef CONFIG_USER_ONLY
604 void riscv_cpu_do_interrupt(CPUState *cpu);
605 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
606 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
607                                      vaddr addr, unsigned size,
608                                      MMUAccessType access_type,
609                                      int mmu_idx, MemTxAttrs attrs,
610                                      MemTxResult response, uintptr_t retaddr);
611 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
612 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
613 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
614 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
615 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
616                               uint64_t value);
617 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
618 void riscv_cpu_interrupt(CPURISCVState *env);
619 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
620 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
621                              void *arg);
622 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
623                                    int (*rmw_fn)(void *arg,
624                                                  target_ulong reg,
625                                                  target_ulong *val,
626                                                  target_ulong new_val,
627                                                  target_ulong write_mask),
628                                    void *rmw_fn_arg);
629 
630 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
631 #endif /* !CONFIG_USER_ONLY */
632 
633 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
634 
635 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
636     enum CTRType type, target_ulong prev_priv, bool prev_virt);
637 void riscv_ctr_clear(CPURISCVState *env);
638 
639 void riscv_translate_init(void);
640 void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
641                           int *max_insns, vaddr pc, void *host_pc);
642 
643 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
644                                       RISCVException exception,
645                                       uintptr_t pc);
646 
647 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
648 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
649 
650 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
651 FIELD(TB_FLAGS, FS, 3, 2)
652 /* Vector flags */
653 FIELD(TB_FLAGS, VS, 5, 2)
654 FIELD(TB_FLAGS, LMUL, 7, 3)
655 FIELD(TB_FLAGS, SEW, 10, 3)
656 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
657 FIELD(TB_FLAGS, VILL, 14, 1)
658 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
659 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
660 FIELD(TB_FLAGS, XL, 16, 2)
661 /* If PointerMasking should be applied */
662 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
663 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
664 FIELD(TB_FLAGS, VTA, 18, 1)
665 FIELD(TB_FLAGS, VMA, 19, 1)
666 /* Native debug itrigger */
667 FIELD(TB_FLAGS, ITRIGGER, 20, 1)
668 /* Virtual mode enabled */
669 FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
670 FIELD(TB_FLAGS, PRIV, 22, 2)
671 FIELD(TB_FLAGS, AXL, 24, 2)
672 /* zicfilp needs a TB flag to track indirect branches */
673 FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
674 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
675 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */
676 FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
677 /* If pointer masking should be applied and address sign extended */
678 FIELD(TB_FLAGS, PM_PMM, 29, 2)
679 FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
680 
681 #ifdef TARGET_RISCV32
682 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
683 #else
684 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
685 {
686     return env->misa_mxl;
687 }
688 #endif
689 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
690 
691 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
692 {
693     return &env_archcpu(env)->cfg;
694 }
695 
696 #if !defined(CONFIG_USER_ONLY)
697 static inline int cpu_address_mode(CPURISCVState *env)
698 {
699     int mode = env->priv;
700 
701     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
702         mode = get_field(env->mstatus, MSTATUS_MPP);
703     }
704     return mode;
705 }
706 
707 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
708 {
709     RISCVMXL xl = env->misa_mxl;
710     /*
711      * When emulating a 32-bit-only cpu, use RV32.
712      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
713      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
714      * back to RV64 for lower privs.
715      */
716     if (xl != MXL_RV32) {
717         switch (mode) {
718         case PRV_M:
719             break;
720         case PRV_U:
721             xl = get_field(env->mstatus, MSTATUS64_UXL);
722             break;
723         default: /* PRV_S */
724             xl = get_field(env->mstatus, MSTATUS64_SXL);
725             break;
726         }
727     }
728     return xl;
729 }
730 #endif
731 
732 #if defined(TARGET_RISCV32)
733 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
734 #else
735 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
736 {
737 #if !defined(CONFIG_USER_ONLY)
738     return cpu_get_xl(env, env->priv);
739 #else
740     return env->misa_mxl;
741 #endif
742 }
743 #endif
744 
745 #if defined(TARGET_RISCV32)
746 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
747 #else
748 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
749 {
750 #ifdef CONFIG_USER_ONLY
751     return env->xl;
752 #else
753     int mode = cpu_address_mode(env);
754 
755     return cpu_get_xl(env, mode);
756 #endif
757 }
758 #endif
759 
760 static inline int riscv_cpu_xlen(CPURISCVState *env)
761 {
762     return 16 << env->xl;
763 }
764 
765 #ifdef TARGET_RISCV32
766 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
767 #else
768 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
769 {
770 #ifdef CONFIG_USER_ONLY
771     return env->misa_mxl;
772 #else
773     if (env->misa_mxl != MXL_RV32) {
774         return get_field(env->mstatus, MSTATUS64_SXL);
775     }
776 #endif
777     return MXL_RV32;
778 }
779 #endif
780 
781 static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
782                                               target_long priv_ver,
783                                               uint32_t misa_ext)
784 {
785     /* In priv spec version 1.12 or newer, C always implies Zca */
786     if (priv_ver >= PRIV_VERSION_1_12_0) {
787         return cfg->ext_zca;
788     } else {
789         return misa_ext & RVC;
790     }
791 }
792 
793 /*
794  * Encode LMUL to lmul as follows:
795  *     LMUL    vlmul    lmul
796  *      1       000       0
797  *      2       001       1
798  *      4       010       2
799  *      8       011       3
800  *      -       100       -
801  *     1/8      101      -3
802  *     1/4      110      -2
803  *     1/2      111      -1
804  *
805  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
806  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
807  *      => VLMAX = vlen >> (1 + 3 - (-3))
808  *               = 256 >> 7
809  *               = 2
810  */
811 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
812                                       int8_t lmul)
813 {
814     uint32_t vlen = vlenb << 3;
815 
816     /*
817      * We need to use 'vlen' instead of 'vlenb' to
818      * preserve the '+ 3' in the formula. Otherwise
819      * we risk a negative shift if vsew < lmul.
820      */
821     return vlen >> (vsew + 3 - lmul);
822 }
823 
824 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
825 
826 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
827 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
828 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
829 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
830 
831 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
832                           target_ulong *ret_value);
833 
834 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
835                            target_ulong *ret_value, target_ulong new_value,
836                            target_ulong write_mask, uintptr_t ra);
837 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
838                                  target_ulong *ret_value,
839                                  target_ulong new_value,
840                                  target_ulong write_mask);
841 
842 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
843                                    target_ulong val)
844 {
845     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
846 }
847 
848 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
849 {
850     target_ulong val = 0;
851     riscv_csrrw(env, csrno, &val, 0, 0, 0);
852     return val;
853 }
854 
855 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
856                                                  int csrno);
857 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
858                                             target_ulong *ret_value);
859 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
860                                              target_ulong new_value,
861                                              uintptr_t ra);
862 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
863                                           target_ulong *ret_value,
864                                           target_ulong new_value,
865                                           target_ulong write_mask);
866 
867 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
868                                Int128 *ret_value);
869 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
870                                 Int128 *ret_value, Int128 new_value,
871                                 Int128 write_mask, uintptr_t ra);
872 
873 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
874                                                Int128 *ret_value);
875 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
876                                              Int128 new_value);
877 
878 typedef struct {
879     const char *name;
880     riscv_csr_predicate_fn predicate;
881     riscv_csr_read_fn read;
882     riscv_csr_write_fn write;
883     riscv_csr_op_fn op;
884     riscv_csr_read128_fn read128;
885     riscv_csr_write128_fn write128;
886     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
887     uint32_t min_priv_ver;
888 } riscv_csr_operations;
889 
890 /* CSR function table constants */
891 enum {
892     CSR_TABLE_SIZE = 0x1000
893 };
894 
895 /*
896  * The event id are encoded based on the encoding specified in the
897  * SBI specification v0.3
898  */
899 
900 enum riscv_pmu_event_idx {
901     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
902     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
903     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
904     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
905     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
906 };
907 
908 /* used by tcg/tcg-cpu.c*/
909 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
910 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
911 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
912 bool riscv_cpu_is_vendor(Object *cpu_obj);
913 
914 typedef struct RISCVCPUMultiExtConfig {
915     const char *name;
916     uint32_t offset;
917     bool enabled;
918 } RISCVCPUMultiExtConfig;
919 
920 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
921 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
922 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
923 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
924 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
925 
926 typedef struct isa_ext_data {
927     const char *name;
928     int min_version;
929     int ext_enable_offset;
930 } RISCVIsaExtData;
931 extern const RISCVIsaExtData isa_edata_arr[];
932 char *riscv_cpu_get_name(RISCVCPU *cpu);
933 
934 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
935 void riscv_add_satp_mode_properties(Object *obj);
936 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
937 
938 /* CSR function table */
939 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
940 
941 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
942 
943 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
944 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
945 
946 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
947 
948 target_ulong riscv_new_csr_seed(target_ulong new_value,
949                                 target_ulong write_mask);
950 
951 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
952 
953 /* Implemented in th_csr.c */
954 void th_register_custom_csrs(RISCVCPU *cpu);
955 
956 const char *priv_spec_to_str(int priv_version);
957 #endif /* RISCV_CPU_H */
958