1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-common.h"
27 #include "exec/cpu-defs.h"
28 #include "exec/cpu-interrupt.h"
29 #include "exec/gdbstub.h"
30 #include "qemu/cpu-float.h"
31 #include "qom/object.h"
32 #include "qemu/int128.h"
33 #include "cpu_bits.h"
34 #include "cpu_cfg.h"
35 #include "qapi/qapi-types-common.h"
36 #include "cpu-qom.h"
37
38 typedef struct CPUArchState CPURISCVState;
39
40 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
41
42 #if defined(TARGET_RISCV32)
43 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32
44 #elif defined(TARGET_RISCV64)
45 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64
46 #endif
47
48 /*
49 * b0: Whether a instruction always raise a store AMO or not.
50 */
51 #define RISCV_UW2_ALWAYS_STORE_AMO 1
52
53 #define RV(x) ((target_ulong)1 << (x - 'A'))
54
55 /*
56 * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
57 * when adding new MISA bits here.
58 */
59 #define RVI RV('I')
60 #define RVE RV('E') /* E and I are mutually exclusive */
61 #define RVM RV('M')
62 #define RVA RV('A')
63 #define RVF RV('F')
64 #define RVD RV('D')
65 #define RVV RV('V')
66 #define RVC RV('C')
67 #define RVS RV('S')
68 #define RVU RV('U')
69 #define RVH RV('H')
70 #define RVG RV('G')
71 #define RVB RV('B')
72
73 extern const uint32_t misa_bits[];
74 const char *riscv_get_misa_ext_name(uint32_t bit);
75 const char *riscv_get_misa_ext_description(uint32_t bit);
76
77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
78 #define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr)
79
80 typedef struct riscv_cpu_profile {
81 struct riscv_cpu_profile *u_parent;
82 struct riscv_cpu_profile *s_parent;
83 const char *name;
84 uint32_t misa_ext;
85 /*
86 * The profile is enabled/disabled via command line or
87 * via cpu_init(). Enabling a profile will add all its
88 * mandatory extensions in the CPU during init().
89 */
90 bool enabled;
91 /*
92 * The profile is present in the CPU, i.e. the current set of
93 * CPU extensions complies with it. A profile can be enabled
94 * and not present (e.g. the user disabled a mandatory extension)
95 * and the other way around (e.g. all mandatory extensions are
96 * present in a non-profile CPU).
97 *
98 * QMP uses this flag.
99 */
100 bool present;
101 bool user_set;
102 int priv_spec;
103 int satp_mode;
104 const int32_t ext_offsets[];
105 } RISCVCPUProfile;
106
107 #define RISCV_PROFILE_EXT_LIST_END -1
108 #define RISCV_PROFILE_ATTR_UNUSED -1
109
110 extern RISCVCPUProfile *riscv_profiles[];
111
112 /* Privileged specification version */
113 #define PRIV_VER_1_10_0_STR "v1.10.0"
114 #define PRIV_VER_1_11_0_STR "v1.11.0"
115 #define PRIV_VER_1_12_0_STR "v1.12.0"
116 #define PRIV_VER_1_13_0_STR "v1.13.0"
117 enum {
118 PRIV_VERSION_1_10_0 = 0,
119 PRIV_VERSION_1_11_0,
120 PRIV_VERSION_1_12_0,
121 PRIV_VERSION_1_13_0,
122
123 PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
124 };
125
126 #define VEXT_VERSION_1_00_0 0x00010000
127 #define VEXT_VER_1_00_0_STR "v1.0"
128
129 enum {
130 TRANSLATE_SUCCESS,
131 TRANSLATE_FAIL,
132 TRANSLATE_PMP_FAIL,
133 TRANSLATE_G_STAGE_FAIL
134 };
135
136 /* Extension context status */
137 typedef enum {
138 EXT_STATUS_DISABLED = 0,
139 EXT_STATUS_INITIAL,
140 EXT_STATUS_CLEAN,
141 EXT_STATUS_DIRTY,
142 } RISCVExtStatus;
143
144 /* Enum holds PMM field values for Zjpm v1.0 extension */
145 typedef enum {
146 PMM_FIELD_DISABLED = 0,
147 PMM_FIELD_RESERVED = 1,
148 PMM_FIELD_PMLEN7 = 2,
149 PMM_FIELD_PMLEN16 = 3,
150 } RISCVPmPmm;
151
152 typedef struct riscv_cpu_implied_exts_rule {
153 #ifndef CONFIG_USER_ONLY
154 /*
155 * Bitmask indicates the rule enabled status for the harts.
156 * This enhancement is only available in system-mode QEMU,
157 * as we don't have a good way (e.g. mhartid) to distinguish
158 * the SMP cores in user-mode QEMU.
159 */
160 unsigned long *enabled;
161 #endif
162 /* True if this is a MISA implied rule. */
163 bool is_misa;
164 /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
165 const uint32_t ext;
166 const uint32_t implied_misa_exts;
167 const uint32_t implied_multi_exts[];
168 } RISCVCPUImpliedExtsRule;
169
170 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
171 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
172
173 #define RISCV_IMPLIED_EXTS_RULE_END -1
174
175 #define MMU_USER_IDX 3
176
177 #define MAX_RISCV_PMPS (64)
178 #define OLD_MAX_RISCV_PMPS (16)
179
180 #if !defined(CONFIG_USER_ONLY)
181 #include "pmp.h"
182 #include "debug.h"
183 #endif
184
185 #define RV_VLEN_MAX 1024
186 #define RV_MAX_MHPMEVENTS 32
187 #define RV_MAX_MHPMCOUNTERS 32
188
189 FIELD(VTYPE, VLMUL, 0, 3)
190 FIELD(VTYPE, VSEW, 3, 3)
191 FIELD(VTYPE, VTA, 6, 1)
192 FIELD(VTYPE, VMA, 7, 1)
193 FIELD(VTYPE, VEDIV, 8, 2)
194 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
195
196 typedef struct PMUCTRState {
197 /* Current value of a counter */
198 target_ulong mhpmcounter_val;
199 /* Current value of a counter in RV32 */
200 target_ulong mhpmcounterh_val;
201 /* Snapshot values of counter */
202 target_ulong mhpmcounter_prev;
203 /* Snapshort value of a counter in RV32 */
204 target_ulong mhpmcounterh_prev;
205 /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
206 target_ulong irq_overflow_left;
207 } PMUCTRState;
208
209 typedef struct PMUFixedCtrState {
210 /* Track cycle and icount for each privilege mode */
211 uint64_t counter[4];
212 uint64_t counter_prev[4];
213 /* Track cycle and icount for each privilege mode when V = 1*/
214 uint64_t counter_virt[2];
215 uint64_t counter_virt_prev[2];
216 } PMUFixedCtrState;
217
218 struct CPUArchState {
219 target_ulong gpr[32];
220 target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
221
222 /* vector coprocessor state. */
223 uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
224 target_ulong vxrm;
225 target_ulong vxsat;
226 target_ulong vl;
227 target_ulong vstart;
228 target_ulong vtype;
229 bool vill;
230
231 target_ulong pc;
232 target_ulong load_res;
233 target_ulong load_val;
234
235 /* Floating-Point state */
236 uint64_t fpr[32]; /* assume both F and D extensions */
237 target_ulong frm;
238 float_status fp_status;
239
240 target_ulong badaddr;
241 target_ulong bins;
242
243 target_ulong guest_phys_fault_addr;
244
245 target_ulong priv_ver;
246 target_ulong vext_ver;
247
248 /* RISCVMXL, but uint32_t for vmstate migration */
249 uint32_t misa_mxl; /* current mxl */
250 uint32_t misa_ext; /* current extensions */
251 uint32_t misa_ext_mask; /* max ext for this cpu */
252 uint32_t xl; /* current xlen */
253
254 /* 128-bit helpers upper part return value */
255 target_ulong retxh;
256
257 target_ulong jvt;
258
259 /* elp state for zicfilp extension */
260 bool elp;
261 /* shadow stack register for zicfiss extension */
262 target_ulong ssp;
263 /* env place holder for extra word 2 during unwind */
264 target_ulong excp_uw2;
265 /* sw check code for sw check exception */
266 target_ulong sw_check_code;
267 #ifdef CONFIG_USER_ONLY
268 uint32_t elf_flags;
269 #endif
270
271 target_ulong priv;
272 /* CSRs for execution environment configuration */
273 uint64_t menvcfg;
274 target_ulong senvcfg;
275
276 #ifndef CONFIG_USER_ONLY
277 /* This contains QEMU specific information about the virt state. */
278 bool virt_enabled;
279 target_ulong geilen;
280 uint64_t resetvec;
281
282 target_ulong mhartid;
283 /*
284 * For RV32 this is 32-bit mstatus and 32-bit mstatush.
285 * For RV64 this is a 64-bit mstatus.
286 */
287 uint64_t mstatus;
288
289 uint64_t mip;
290 /*
291 * MIP contains the software writable version of SEIP ORed with the
292 * external interrupt value. The MIP register is always up-to-date.
293 * To keep track of the current source, we also save booleans of the values
294 * here.
295 */
296 bool external_seip;
297 bool software_seip;
298
299 uint64_t miclaim;
300
301 uint64_t mie;
302 uint64_t mideleg;
303
304 /*
305 * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
306 * alias of mie[i] and needs to be maintained separately.
307 */
308 uint64_t sie;
309
310 /*
311 * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
312 * alias of sie[i] (mie[i]) and needs to be maintained separately.
313 */
314 uint64_t vsie;
315
316 target_ulong satp; /* since: priv-1.10.0 */
317 target_ulong stval;
318 target_ulong medeleg;
319
320 target_ulong stvec;
321 target_ulong sepc;
322 target_ulong scause;
323
324 target_ulong mtvec;
325 target_ulong mepc;
326 target_ulong mcause;
327 target_ulong mtval; /* since: priv-1.10.0 */
328
329 uint64_t mctrctl;
330 uint32_t sctrdepth;
331 uint32_t sctrstatus;
332 uint64_t vsctrctl;
333
334 uint64_t ctr_src[16 << SCTRDEPTH_MAX];
335 uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
336 uint64_t ctr_data[16 << SCTRDEPTH_MAX];
337
338 /* Machine and Supervisor interrupt priorities */
339 uint8_t miprio[64];
340 uint8_t siprio[64];
341
342 /* AIA CSRs */
343 target_ulong miselect;
344 target_ulong siselect;
345 uint64_t mvien;
346 uint64_t mvip;
347
348 /* Hypervisor CSRs */
349 target_ulong hstatus;
350 target_ulong hedeleg;
351 uint64_t hideleg;
352 uint32_t hcounteren;
353 target_ulong htval;
354 target_ulong htinst;
355 target_ulong hgatp;
356 target_ulong hgeie;
357 target_ulong hgeip;
358 uint64_t htimedelta;
359 uint64_t hvien;
360
361 /*
362 * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
363 * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
364 * maintain in hvip.
365 */
366 uint64_t hvip;
367
368 /* Hypervisor controlled virtual interrupt priorities */
369 target_ulong hvictl;
370 uint8_t hviprio[64];
371
372 /* Upper 64-bits of 128-bit CSRs */
373 uint64_t mscratchh;
374 uint64_t sscratchh;
375
376 /* Virtual CSRs */
377 /*
378 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
379 * For RV64 this is a 64-bit vsstatus.
380 */
381 uint64_t vsstatus;
382 target_ulong vstvec;
383 target_ulong vsscratch;
384 target_ulong vsepc;
385 target_ulong vscause;
386 target_ulong vstval;
387 target_ulong vsatp;
388
389 /* AIA VS-mode CSRs */
390 target_ulong vsiselect;
391
392 target_ulong mtval2;
393 target_ulong mtinst;
394
395 /* HS Backup CSRs */
396 target_ulong stvec_hs;
397 target_ulong sscratch_hs;
398 target_ulong sepc_hs;
399 target_ulong scause_hs;
400 target_ulong stval_hs;
401 target_ulong satp_hs;
402 uint64_t mstatus_hs;
403
404 /*
405 * Signals whether the current exception occurred with two-stage address
406 * translation active.
407 */
408 bool two_stage_lookup;
409 /*
410 * Signals whether the current exception occurred while doing two-stage
411 * address translation for the VS-stage page table walk.
412 */
413 bool two_stage_indirect_lookup;
414
415 uint32_t scounteren;
416 uint32_t mcounteren;
417
418 uint32_t scountinhibit;
419 uint32_t mcountinhibit;
420
421 /* PMU cycle & instret privilege mode filtering */
422 target_ulong mcyclecfg;
423 target_ulong mcyclecfgh;
424 target_ulong minstretcfg;
425 target_ulong minstretcfgh;
426
427 /* PMU counter state */
428 PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
429
430 /* PMU event selector configured values. First three are unused */
431 target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
432
433 /* PMU event selector configured values for RV32 */
434 target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
435
436 PMUFixedCtrState pmu_fixed_ctrs[2];
437
438 target_ulong sscratch;
439 target_ulong mscratch;
440
441 /* Sstc CSRs */
442 uint64_t stimecmp;
443
444 uint64_t vstimecmp;
445
446 /* physical memory protection */
447 pmp_table_t pmp_state;
448 target_ulong mseccfg;
449
450 /* trigger module */
451 target_ulong trigger_cur;
452 target_ulong tdata1[RV_MAX_TRIGGERS];
453 target_ulong tdata2[RV_MAX_TRIGGERS];
454 target_ulong tdata3[RV_MAX_TRIGGERS];
455 target_ulong mcontext;
456 struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
457 struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
458 QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
459 int64_t last_icount;
460 bool itrigger_enabled;
461
462 /* machine specific rdtime callback */
463 uint64_t (*rdtime_fn)(void *);
464 void *rdtime_fn_arg;
465
466 /* machine specific AIA ireg read-modify-write callback */
467 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
468 ((((__xlen) & 0xff) << 24) | \
469 (((__vgein) & 0x3f) << 20) | \
470 (((__virt) & 0x1) << 18) | \
471 (((__priv) & 0x3) << 16) | \
472 (__isel & 0xffff))
473 #define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff)
474 #define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3)
475 #define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1)
476 #define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f)
477 #define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff)
478 int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
479 target_ulong *val, target_ulong new_val, target_ulong write_mask);
480 void *aia_ireg_rmw_fn_arg[4];
481
482 /* True if in debugger mode. */
483 bool debugger;
484
485 uint64_t mstateen[SMSTATEEN_MAX_COUNT];
486 uint64_t hstateen[SMSTATEEN_MAX_COUNT];
487 uint64_t sstateen[SMSTATEEN_MAX_COUNT];
488 uint64_t henvcfg;
489 #endif
490
491 /* Fields from here on are preserved across CPU reset. */
492 QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
493 QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
494 bool vstime_irq;
495
496 hwaddr kernel_addr;
497 hwaddr fdt_addr;
498
499 #ifdef CONFIG_KVM
500 /* kvm timer */
501 bool kvm_timer_dirty;
502 uint64_t kvm_timer_time;
503 uint64_t kvm_timer_compare;
504 uint64_t kvm_timer_state;
505 uint64_t kvm_timer_frequency;
506 #endif /* CONFIG_KVM */
507
508 /* RNMI */
509 target_ulong mnscratch;
510 target_ulong mnepc;
511 target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
512 target_ulong mnstatus;
513 target_ulong rnmip;
514 uint64_t rnmi_irqvec;
515 uint64_t rnmi_excpvec;
516 };
517
518 /*
519 * map is a 16-bit bitmap: the most significant set bit in map is the maximum
520 * satp mode that is supported. It may be chosen by the user and must respect
521 * what qemu implements (valid_1_10_32/64) and what the hw is capable of
522 * (supported bitmap below).
523 *
524 * init is a 16-bit bitmap used to make sure the user selected a correct
525 * configuration as per the specification.
526 */
527 typedef struct {
528 uint16_t map, init;
529 } RISCVSATPModes;
530
531 /*
532 * RISCVCPU:
533 * @env: #CPURISCVState
534 *
535 * A RISCV CPU.
536 */
537 struct ArchCPU {
538 CPUState parent_obj;
539
540 CPURISCVState env;
541
542 GDBFeature dyn_csr_feature;
543 GDBFeature dyn_vreg_feature;
544
545 /* Configuration Settings */
546 RISCVCPUConfig cfg;
547 RISCVSATPModes satp_modes;
548
549 QEMUTimer *pmu_timer;
550 /* A bitmask of Available programmable counters */
551 uint32_t pmu_avail_ctrs;
552 /* Mapping of events to counters */
553 GHashTable *pmu_event_ctr_map;
554 const GPtrArray *decoders;
555 };
556
557 typedef struct RISCVCSR RISCVCSR;
558
559 typedef struct RISCVCPUDef {
560 RISCVMXL misa_mxl_max; /* max mxl for this cpu */
561 RISCVCPUProfile *profile;
562 uint32_t misa_ext;
563 int priv_spec;
564 int32_t vext_spec;
565 RISCVCPUConfig cfg;
566 bool bare;
567 const RISCVCSR *custom_csrs;
568 } RISCVCPUDef;
569
570 /**
571 * RISCVCPUClass:
572 * @parent_realize: The parent class' realize handler.
573 * @parent_phases: The parent class' reset phase handlers.
574 *
575 * A RISCV CPU model.
576 */
577 struct RISCVCPUClass {
578 CPUClass parent_class;
579
580 DeviceRealize parent_realize;
581 ResettablePhases parent_phases;
582 RISCVCPUDef *def;
583 };
584
riscv_has_ext(CPURISCVState * env,target_ulong ext)585 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
586 {
587 return (env->misa_ext & ext) != 0;
588 }
589
590 #include "cpu_user.h"
591
592 extern const char * const riscv_int_regnames[];
593 extern const char * const riscv_int_regnamesh[];
594 extern const char * const riscv_fpr_regnames[];
595
596 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
597 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
598 int cpuid, DumpState *s);
599 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
600 int cpuid, DumpState *s);
601 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
602 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
603 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
604 uint8_t riscv_cpu_default_priority(int irq);
605 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
606 int riscv_cpu_mirq_pending(CPURISCVState *env);
607 int riscv_cpu_sirq_pending(CPURISCVState *env);
608 int riscv_cpu_vsirq_pending(CPURISCVState *env);
609 bool riscv_cpu_fp_enabled(CPURISCVState *env);
610 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
611 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
612 bool riscv_cpu_vector_enabled(CPURISCVState *env);
613 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
614 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
615 bool cpu_get_fcfien(CPURISCVState *env);
616 bool cpu_get_bcfien(CPURISCVState *env);
617 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
618 G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
619 MMUAccessType access_type,
620 int mmu_idx, uintptr_t retaddr);
621 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
622 MMUAccessType access_type, int mmu_idx,
623 bool probe, uintptr_t retaddr);
624 char *riscv_isa_string(RISCVCPU *cpu);
625 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
626 bool riscv_cpu_option_set(const char *optname);
627
628 #ifndef CONFIG_USER_ONLY
629 void riscv_cpu_do_interrupt(CPUState *cpu);
630 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
631 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
632 vaddr addr, unsigned size,
633 MMUAccessType access_type,
634 int mmu_idx, MemTxAttrs attrs,
635 MemTxResult response, uintptr_t retaddr);
636 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
637 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
638 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
639 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
640 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
641 uint64_t value);
642 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
643 void riscv_cpu_interrupt(CPURISCVState *env);
644 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
645 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
646 void *arg);
647 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
648 int (*rmw_fn)(void *arg,
649 target_ulong reg,
650 target_ulong *val,
651 target_ulong new_val,
652 target_ulong write_mask),
653 void *rmw_fn_arg);
654
655 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
656 #endif /* !CONFIG_USER_ONLY */
657
658 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
659
660 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
661 enum CTRType type, target_ulong prev_priv, bool prev_virt);
662 void riscv_ctr_clear(CPURISCVState *env);
663
664 void riscv_translate_init(void);
665 void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
666 int *max_insns, vaddr pc, void *host_pc);
667
668 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
669 RISCVException exception,
670 uintptr_t pc);
671
672 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
673 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
674
675 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
676 FIELD(TB_FLAGS, FS, 3, 2)
677 /* Vector flags */
678 FIELD(TB_FLAGS, VS, 5, 2)
679 FIELD(TB_FLAGS, LMUL, 7, 3)
680 FIELD(TB_FLAGS, SEW, 10, 3)
681 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
682 FIELD(TB_FLAGS, VILL, 14, 1)
683 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
684 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
685 FIELD(TB_FLAGS, XL, 16, 2)
686 /* If PointerMasking should be applied */
687 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
688 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
689 FIELD(TB_FLAGS, VTA, 18, 1)
690 FIELD(TB_FLAGS, VMA, 19, 1)
691 /* Native debug itrigger */
692 FIELD(TB_FLAGS, ITRIGGER, 20, 1)
693 /* Virtual mode enabled */
694 FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
695 FIELD(TB_FLAGS, PRIV, 22, 2)
696 FIELD(TB_FLAGS, AXL, 24, 2)
697 /* zicfilp needs a TB flag to track indirect branches */
698 FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
699 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
700 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */
701 FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
702 /* If pointer masking should be applied and address sign extended */
703 FIELD(TB_FLAGS, PM_PMM, 29, 2)
704 FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
705
706 #ifdef TARGET_RISCV32
707 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
708 #else
709 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
710 {
711 return env->misa_mxl;
712 }
713 #endif
714 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
715
riscv_cpu_cfg(CPURISCVState * env)716 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
717 {
718 return &env_archcpu(env)->cfg;
719 }
720
721 #if !defined(CONFIG_USER_ONLY)
cpu_address_mode(CPURISCVState * env)722 static inline int cpu_address_mode(CPURISCVState *env)
723 {
724 int mode = env->priv;
725
726 if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
727 mode = get_field(env->mstatus, MSTATUS_MPP);
728 }
729 return mode;
730 }
731
cpu_get_xl(CPURISCVState * env,target_ulong mode)732 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
733 {
734 RISCVMXL xl = env->misa_mxl;
735 /*
736 * When emulating a 32-bit-only cpu, use RV32.
737 * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
738 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
739 * back to RV64 for lower privs.
740 */
741 if (xl != MXL_RV32) {
742 switch (mode) {
743 case PRV_M:
744 break;
745 case PRV_U:
746 xl = get_field(env->mstatus, MSTATUS64_UXL);
747 break;
748 default: /* PRV_S */
749 xl = get_field(env->mstatus, MSTATUS64_SXL);
750 break;
751 }
752 }
753 return xl;
754 }
755 #endif
756
757 #if defined(TARGET_RISCV32)
758 #define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
759 #else
cpu_recompute_xl(CPURISCVState * env)760 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
761 {
762 #if !defined(CONFIG_USER_ONLY)
763 return cpu_get_xl(env, env->priv);
764 #else
765 return env->misa_mxl;
766 #endif
767 }
768 #endif
769
770 #if defined(TARGET_RISCV32)
771 #define cpu_address_xl(env) ((void)(env), MXL_RV32)
772 #else
cpu_address_xl(CPURISCVState * env)773 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
774 {
775 #ifdef CONFIG_USER_ONLY
776 return env->xl;
777 #else
778 int mode = cpu_address_mode(env);
779
780 return cpu_get_xl(env, mode);
781 #endif
782 }
783 #endif
784
riscv_cpu_xlen(CPURISCVState * env)785 static inline int riscv_cpu_xlen(CPURISCVState *env)
786 {
787 return 16 << env->xl;
788 }
789
790 #ifdef TARGET_RISCV32
791 #define riscv_cpu_sxl(env) ((void)(env), MXL_RV32)
792 #else
riscv_cpu_sxl(CPURISCVState * env)793 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
794 {
795 #ifdef CONFIG_USER_ONLY
796 return env->misa_mxl;
797 #else
798 if (env->misa_mxl != MXL_RV32) {
799 return get_field(env->mstatus, MSTATUS64_SXL);
800 }
801 #endif
802 return MXL_RV32;
803 }
804 #endif
805
riscv_cpu_allow_16bit_insn(const RISCVCPUConfig * cfg,target_long priv_ver,uint32_t misa_ext)806 static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
807 target_long priv_ver,
808 uint32_t misa_ext)
809 {
810 /* In priv spec version 1.12 or newer, C always implies Zca */
811 if (priv_ver >= PRIV_VERSION_1_12_0) {
812 return cfg->ext_zca;
813 } else {
814 return misa_ext & RVC;
815 }
816 }
817
818 /*
819 * Encode LMUL to lmul as follows:
820 * LMUL vlmul lmul
821 * 1 000 0
822 * 2 001 1
823 * 4 010 2
824 * 8 011 3
825 * - 100 -
826 * 1/8 101 -3
827 * 1/4 110 -2
828 * 1/2 111 -1
829 *
830 * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
831 * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
832 * => VLMAX = vlen >> (1 + 3 - (-3))
833 * = 256 >> 7
834 * = 2
835 */
vext_get_vlmax(uint32_t vlenb,uint32_t vsew,int8_t lmul)836 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
837 int8_t lmul)
838 {
839 uint32_t vlen = vlenb << 3;
840
841 /*
842 * We need to use 'vlen' instead of 'vlenb' to
843 * preserve the '+ 3' in the formula. Otherwise
844 * we risk a negative shift if vsew < lmul.
845 */
846 return vlen >> (vsew + 3 - lmul);
847 }
848
849 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
850
851 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
852 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
853 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
854 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
855
856 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
857 target_ulong *ret_value);
858
859 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
860 target_ulong *ret_value, target_ulong new_value,
861 target_ulong write_mask, uintptr_t ra);
862 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
863 target_ulong *ret_value,
864 target_ulong new_value,
865 target_ulong write_mask);
866
riscv_csr_write(CPURISCVState * env,int csrno,target_ulong val)867 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
868 target_ulong val)
869 {
870 riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
871 }
872
riscv_csr_read(CPURISCVState * env,int csrno)873 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
874 {
875 target_ulong val = 0;
876 riscv_csrrw(env, csrno, &val, 0, 0, 0);
877 return val;
878 }
879
880 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
881 int csrno);
882 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
883 target_ulong *ret_value);
884 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
885 target_ulong new_value,
886 uintptr_t ra);
887 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
888 target_ulong *ret_value,
889 target_ulong new_value,
890 target_ulong write_mask);
891
892 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
893 Int128 *ret_value);
894 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
895 Int128 *ret_value, Int128 new_value,
896 Int128 write_mask, uintptr_t ra);
897
898 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
899 Int128 *ret_value);
900 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
901 Int128 new_value);
902
903 typedef struct {
904 const char *name;
905 riscv_csr_predicate_fn predicate;
906 riscv_csr_read_fn read;
907 riscv_csr_write_fn write;
908 riscv_csr_op_fn op;
909 riscv_csr_read128_fn read128;
910 riscv_csr_write128_fn write128;
911 /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
912 uint32_t min_priv_ver;
913 } riscv_csr_operations;
914
915 struct RISCVCSR {
916 int csrno;
917 bool (*insertion_test)(RISCVCPU *cpu);
918 riscv_csr_operations csr_ops;
919 };
920
921 /* CSR function table constants */
922 enum {
923 CSR_TABLE_SIZE = 0x1000
924 };
925
926 /*
927 * The event id are encoded based on the encoding specified in the
928 * SBI specification v0.3
929 */
930
931 enum riscv_pmu_event_idx {
932 RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
933 RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
934 RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
935 RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
936 RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
937 };
938
939 /* used by tcg/tcg-cpu.c*/
940 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
941 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
942 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
943 bool riscv_cpu_is_vendor(Object *cpu_obj);
944
945 typedef struct RISCVCPUMultiExtConfig {
946 const char *name;
947 uint32_t offset;
948 bool enabled;
949 } RISCVCPUMultiExtConfig;
950
951 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
952 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
953 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
954 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
955
956 typedef struct isa_ext_data {
957 const char *name;
958 int min_version;
959 int ext_enable_offset;
960 } RISCVIsaExtData;
961 extern const RISCVIsaExtData isa_edata_arr[];
962 char *riscv_cpu_get_name(RISCVCPU *cpu);
963
964 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
965 void riscv_add_satp_mode_properties(Object *obj);
966 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
967
968 /* CSR function table */
969 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
970
971 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
972
973 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
974 void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops);
975
976 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
977
978 target_ulong riscv_new_csr_seed(target_ulong new_value,
979 target_ulong write_mask);
980
981 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
982
983 /* In th_csr.c */
984 extern const RISCVCSR th_csr_list[];
985
986 const char *priv_spec_to_str(int priv_version);
987 #endif /* RISCV_CPU_H */
988