1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-common.h"
27 #include "exec/cpu-defs.h"
28 #include "exec/cpu-interrupt.h"
29 #include "exec/gdbstub.h"
30 #include "qemu/cpu-float.h"
31 #include "qom/object.h"
32 #include "qemu/int128.h"
33 #include "cpu_bits.h"
34 #include "cpu_cfg.h"
35 #include "qapi/qapi-types-common.h"
36 #include "cpu-qom.h"
37
38 typedef struct CPUArchState CPURISCVState;
39
40 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
41
42 #if defined(TARGET_RISCV32)
43 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32
44 #elif defined(TARGET_RISCV64)
45 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64
46 #endif
47
48 /*
49 * b0: Whether a instruction always raise a store AMO or not.
50 */
51 #define RISCV_UW2_ALWAYS_STORE_AMO 1
52
53 #define RV(x) ((target_ulong)1 << (x - 'A'))
54
55 /*
56 * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
57 * when adding new MISA bits here.
58 */
59 #define RVI RV('I')
60 #define RVE RV('E') /* E and I are mutually exclusive */
61 #define RVM RV('M')
62 #define RVA RV('A')
63 #define RVF RV('F')
64 #define RVD RV('D')
65 #define RVV RV('V')
66 #define RVC RV('C')
67 #define RVS RV('S')
68 #define RVU RV('U')
69 #define RVH RV('H')
70 #define RVG RV('G')
71 #define RVB RV('B')
72
73 extern const uint32_t misa_bits[];
74 const char *riscv_get_misa_ext_name(uint32_t bit);
75 const char *riscv_get_misa_ext_description(uint32_t bit);
76
77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
78 #define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr)
79
80 typedef struct riscv_cpu_profile {
81 struct riscv_cpu_profile *u_parent;
82 struct riscv_cpu_profile *s_parent;
83 const char *name;
84 uint32_t misa_ext;
85 bool enabled;
86 bool user_set;
87 int priv_spec;
88 int satp_mode;
89 const int32_t ext_offsets[];
90 } RISCVCPUProfile;
91
92 #define RISCV_PROFILE_EXT_LIST_END -1
93 #define RISCV_PROFILE_ATTR_UNUSED -1
94
95 extern RISCVCPUProfile *riscv_profiles[];
96
97 /* Privileged specification version */
98 #define PRIV_VER_1_10_0_STR "v1.10.0"
99 #define PRIV_VER_1_11_0_STR "v1.11.0"
100 #define PRIV_VER_1_12_0_STR "v1.12.0"
101 #define PRIV_VER_1_13_0_STR "v1.13.0"
102 enum {
103 PRIV_VERSION_1_10_0 = 0,
104 PRIV_VERSION_1_11_0,
105 PRIV_VERSION_1_12_0,
106 PRIV_VERSION_1_13_0,
107
108 PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
109 };
110
111 #define VEXT_VERSION_1_00_0 0x00010000
112 #define VEXT_VER_1_00_0_STR "v1.0"
113
114 enum {
115 TRANSLATE_SUCCESS,
116 TRANSLATE_FAIL,
117 TRANSLATE_PMP_FAIL,
118 TRANSLATE_G_STAGE_FAIL
119 };
120
121 /* Extension context status */
122 typedef enum {
123 EXT_STATUS_DISABLED = 0,
124 EXT_STATUS_INITIAL,
125 EXT_STATUS_CLEAN,
126 EXT_STATUS_DIRTY,
127 } RISCVExtStatus;
128
129 /* Enum holds PMM field values for Zjpm v1.0 extension */
130 typedef enum {
131 PMM_FIELD_DISABLED = 0,
132 PMM_FIELD_RESERVED = 1,
133 PMM_FIELD_PMLEN7 = 2,
134 PMM_FIELD_PMLEN16 = 3,
135 } RISCVPmPmm;
136
137 typedef struct riscv_cpu_implied_exts_rule {
138 #ifndef CONFIG_USER_ONLY
139 /*
140 * Bitmask indicates the rule enabled status for the harts.
141 * This enhancement is only available in system-mode QEMU,
142 * as we don't have a good way (e.g. mhartid) to distinguish
143 * the SMP cores in user-mode QEMU.
144 */
145 unsigned long *enabled;
146 #endif
147 /* True if this is a MISA implied rule. */
148 bool is_misa;
149 /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
150 const uint32_t ext;
151 const uint32_t implied_misa_exts;
152 const uint32_t implied_multi_exts[];
153 } RISCVCPUImpliedExtsRule;
154
155 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
156 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
157
158 #define RISCV_IMPLIED_EXTS_RULE_END -1
159
160 #define MMU_USER_IDX 3
161
162 #define MAX_RISCV_PMPS (16)
163
164 #if !defined(CONFIG_USER_ONLY)
165 #include "pmp.h"
166 #include "debug.h"
167 #endif
168
169 #define RV_VLEN_MAX 1024
170 #define RV_MAX_MHPMEVENTS 32
171 #define RV_MAX_MHPMCOUNTERS 32
172
173 FIELD(VTYPE, VLMUL, 0, 3)
174 FIELD(VTYPE, VSEW, 3, 3)
175 FIELD(VTYPE, VTA, 6, 1)
176 FIELD(VTYPE, VMA, 7, 1)
177 FIELD(VTYPE, VEDIV, 8, 2)
178 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
179
180 typedef struct PMUCTRState {
181 /* Current value of a counter */
182 target_ulong mhpmcounter_val;
183 /* Current value of a counter in RV32 */
184 target_ulong mhpmcounterh_val;
185 /* Snapshot values of counter */
186 target_ulong mhpmcounter_prev;
187 /* Snapshort value of a counter in RV32 */
188 target_ulong mhpmcounterh_prev;
189 /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
190 target_ulong irq_overflow_left;
191 } PMUCTRState;
192
193 typedef struct PMUFixedCtrState {
194 /* Track cycle and icount for each privilege mode */
195 uint64_t counter[4];
196 uint64_t counter_prev[4];
197 /* Track cycle and icount for each privilege mode when V = 1*/
198 uint64_t counter_virt[2];
199 uint64_t counter_virt_prev[2];
200 } PMUFixedCtrState;
201
202 struct CPUArchState {
203 target_ulong gpr[32];
204 target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
205
206 /* vector coprocessor state. */
207 uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
208 target_ulong vxrm;
209 target_ulong vxsat;
210 target_ulong vl;
211 target_ulong vstart;
212 target_ulong vtype;
213 bool vill;
214
215 target_ulong pc;
216 target_ulong load_res;
217 target_ulong load_val;
218
219 /* Floating-Point state */
220 uint64_t fpr[32]; /* assume both F and D extensions */
221 target_ulong frm;
222 float_status fp_status;
223
224 target_ulong badaddr;
225 target_ulong bins;
226
227 target_ulong guest_phys_fault_addr;
228
229 target_ulong priv_ver;
230 target_ulong vext_ver;
231
232 /* RISCVMXL, but uint32_t for vmstate migration */
233 uint32_t misa_mxl; /* current mxl */
234 uint32_t misa_ext; /* current extensions */
235 uint32_t misa_ext_mask; /* max ext for this cpu */
236 uint32_t xl; /* current xlen */
237
238 /* 128-bit helpers upper part return value */
239 target_ulong retxh;
240
241 target_ulong jvt;
242
243 /* elp state for zicfilp extension */
244 bool elp;
245 /* shadow stack register for zicfiss extension */
246 target_ulong ssp;
247 /* env place holder for extra word 2 during unwind */
248 target_ulong excp_uw2;
249 /* sw check code for sw check exception */
250 target_ulong sw_check_code;
251 #ifdef CONFIG_USER_ONLY
252 uint32_t elf_flags;
253 #endif
254
255 target_ulong priv;
256 /* CSRs for execution environment configuration */
257 uint64_t menvcfg;
258 target_ulong senvcfg;
259
260 #ifndef CONFIG_USER_ONLY
261 /* This contains QEMU specific information about the virt state. */
262 bool virt_enabled;
263 target_ulong geilen;
264 uint64_t resetvec;
265
266 target_ulong mhartid;
267 /*
268 * For RV32 this is 32-bit mstatus and 32-bit mstatush.
269 * For RV64 this is a 64-bit mstatus.
270 */
271 uint64_t mstatus;
272
273 uint64_t mip;
274 /*
275 * MIP contains the software writable version of SEIP ORed with the
276 * external interrupt value. The MIP register is always up-to-date.
277 * To keep track of the current source, we also save booleans of the values
278 * here.
279 */
280 bool external_seip;
281 bool software_seip;
282
283 uint64_t miclaim;
284
285 uint64_t mie;
286 uint64_t mideleg;
287
288 /*
289 * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
290 * alias of mie[i] and needs to be maintained separately.
291 */
292 uint64_t sie;
293
294 /*
295 * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
296 * alias of sie[i] (mie[i]) and needs to be maintained separately.
297 */
298 uint64_t vsie;
299
300 target_ulong satp; /* since: priv-1.10.0 */
301 target_ulong stval;
302 target_ulong medeleg;
303
304 target_ulong stvec;
305 target_ulong sepc;
306 target_ulong scause;
307
308 target_ulong mtvec;
309 target_ulong mepc;
310 target_ulong mcause;
311 target_ulong mtval; /* since: priv-1.10.0 */
312
313 uint64_t mctrctl;
314 uint32_t sctrdepth;
315 uint32_t sctrstatus;
316 uint64_t vsctrctl;
317
318 uint64_t ctr_src[16 << SCTRDEPTH_MAX];
319 uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
320 uint64_t ctr_data[16 << SCTRDEPTH_MAX];
321
322 /* Machine and Supervisor interrupt priorities */
323 uint8_t miprio[64];
324 uint8_t siprio[64];
325
326 /* AIA CSRs */
327 target_ulong miselect;
328 target_ulong siselect;
329 uint64_t mvien;
330 uint64_t mvip;
331
332 /* Hypervisor CSRs */
333 target_ulong hstatus;
334 target_ulong hedeleg;
335 uint64_t hideleg;
336 uint32_t hcounteren;
337 target_ulong htval;
338 target_ulong htinst;
339 target_ulong hgatp;
340 target_ulong hgeie;
341 target_ulong hgeip;
342 uint64_t htimedelta;
343 uint64_t hvien;
344
345 /*
346 * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
347 * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
348 * maintain in hvip.
349 */
350 uint64_t hvip;
351
352 /* Hypervisor controlled virtual interrupt priorities */
353 target_ulong hvictl;
354 uint8_t hviprio[64];
355
356 /* Upper 64-bits of 128-bit CSRs */
357 uint64_t mscratchh;
358 uint64_t sscratchh;
359
360 /* Virtual CSRs */
361 /*
362 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
363 * For RV64 this is a 64-bit vsstatus.
364 */
365 uint64_t vsstatus;
366 target_ulong vstvec;
367 target_ulong vsscratch;
368 target_ulong vsepc;
369 target_ulong vscause;
370 target_ulong vstval;
371 target_ulong vsatp;
372
373 /* AIA VS-mode CSRs */
374 target_ulong vsiselect;
375
376 target_ulong mtval2;
377 target_ulong mtinst;
378
379 /* HS Backup CSRs */
380 target_ulong stvec_hs;
381 target_ulong sscratch_hs;
382 target_ulong sepc_hs;
383 target_ulong scause_hs;
384 target_ulong stval_hs;
385 target_ulong satp_hs;
386 uint64_t mstatus_hs;
387
388 /*
389 * Signals whether the current exception occurred with two-stage address
390 * translation active.
391 */
392 bool two_stage_lookup;
393 /*
394 * Signals whether the current exception occurred while doing two-stage
395 * address translation for the VS-stage page table walk.
396 */
397 bool two_stage_indirect_lookup;
398
399 uint32_t scounteren;
400 uint32_t mcounteren;
401
402 uint32_t scountinhibit;
403 uint32_t mcountinhibit;
404
405 /* PMU cycle & instret privilege mode filtering */
406 target_ulong mcyclecfg;
407 target_ulong mcyclecfgh;
408 target_ulong minstretcfg;
409 target_ulong minstretcfgh;
410
411 /* PMU counter state */
412 PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
413
414 /* PMU event selector configured values. First three are unused */
415 target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
416
417 /* PMU event selector configured values for RV32 */
418 target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
419
420 PMUFixedCtrState pmu_fixed_ctrs[2];
421
422 target_ulong sscratch;
423 target_ulong mscratch;
424
425 /* Sstc CSRs */
426 uint64_t stimecmp;
427
428 uint64_t vstimecmp;
429
430 /* physical memory protection */
431 pmp_table_t pmp_state;
432 target_ulong mseccfg;
433
434 /* trigger module */
435 target_ulong trigger_cur;
436 target_ulong tdata1[RV_MAX_TRIGGERS];
437 target_ulong tdata2[RV_MAX_TRIGGERS];
438 target_ulong tdata3[RV_MAX_TRIGGERS];
439 target_ulong mcontext;
440 struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
441 struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
442 QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
443 int64_t last_icount;
444 bool itrigger_enabled;
445
446 /* machine specific rdtime callback */
447 uint64_t (*rdtime_fn)(void *);
448 void *rdtime_fn_arg;
449
450 /* machine specific AIA ireg read-modify-write callback */
451 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
452 ((((__xlen) & 0xff) << 24) | \
453 (((__vgein) & 0x3f) << 20) | \
454 (((__virt) & 0x1) << 18) | \
455 (((__priv) & 0x3) << 16) | \
456 (__isel & 0xffff))
457 #define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff)
458 #define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3)
459 #define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1)
460 #define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f)
461 #define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff)
462 int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
463 target_ulong *val, target_ulong new_val, target_ulong write_mask);
464 void *aia_ireg_rmw_fn_arg[4];
465
466 /* True if in debugger mode. */
467 bool debugger;
468
469 uint64_t mstateen[SMSTATEEN_MAX_COUNT];
470 uint64_t hstateen[SMSTATEEN_MAX_COUNT];
471 uint64_t sstateen[SMSTATEEN_MAX_COUNT];
472 uint64_t henvcfg;
473 #endif
474
475 /* Fields from here on are preserved across CPU reset. */
476 QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
477 QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
478 bool vstime_irq;
479
480 hwaddr kernel_addr;
481 hwaddr fdt_addr;
482
483 #ifdef CONFIG_KVM
484 /* kvm timer */
485 bool kvm_timer_dirty;
486 uint64_t kvm_timer_time;
487 uint64_t kvm_timer_compare;
488 uint64_t kvm_timer_state;
489 uint64_t kvm_timer_frequency;
490 #endif /* CONFIG_KVM */
491
492 /* RNMI */
493 target_ulong mnscratch;
494 target_ulong mnepc;
495 target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
496 target_ulong mnstatus;
497 target_ulong rnmip;
498 uint64_t rnmi_irqvec;
499 uint64_t rnmi_excpvec;
500 };
501
502 /*
503 * map is a 16-bit bitmap: the most significant set bit in map is the maximum
504 * satp mode that is supported. It may be chosen by the user and must respect
505 * what qemu implements (valid_1_10_32/64) and what the hw is capable of
506 * (supported bitmap below).
507 *
508 * init is a 16-bit bitmap used to make sure the user selected a correct
509 * configuration as per the specification.
510 */
511 typedef struct {
512 uint16_t map, init;
513 } RISCVSATPModes;
514
515 /*
516 * RISCVCPU:
517 * @env: #CPURISCVState
518 *
519 * A RISCV CPU.
520 */
521 struct ArchCPU {
522 CPUState parent_obj;
523
524 CPURISCVState env;
525
526 GDBFeature dyn_csr_feature;
527 GDBFeature dyn_vreg_feature;
528
529 /* Configuration Settings */
530 RISCVCPUConfig cfg;
531 RISCVSATPModes satp_modes;
532
533 QEMUTimer *pmu_timer;
534 /* A bitmask of Available programmable counters */
535 uint32_t pmu_avail_ctrs;
536 /* Mapping of events to counters */
537 GHashTable *pmu_event_ctr_map;
538 const GPtrArray *decoders;
539 };
540
541 typedef struct RISCVCSR RISCVCSR;
542
543 typedef struct RISCVCPUDef {
544 RISCVMXL misa_mxl_max; /* max mxl for this cpu */
545 RISCVCPUProfile *profile;
546 uint32_t misa_ext;
547 int priv_spec;
548 int32_t vext_spec;
549 RISCVCPUConfig cfg;
550 bool bare;
551 const RISCVCSR *custom_csrs;
552 } RISCVCPUDef;
553
554 /**
555 * RISCVCPUClass:
556 * @parent_realize: The parent class' realize handler.
557 * @parent_phases: The parent class' reset phase handlers.
558 *
559 * A RISCV CPU model.
560 */
561 struct RISCVCPUClass {
562 CPUClass parent_class;
563
564 DeviceRealize parent_realize;
565 ResettablePhases parent_phases;
566 RISCVCPUDef *def;
567 };
568
riscv_has_ext(CPURISCVState * env,target_ulong ext)569 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
570 {
571 return (env->misa_ext & ext) != 0;
572 }
573
574 #include "cpu_user.h"
575
576 extern const char * const riscv_int_regnames[];
577 extern const char * const riscv_int_regnamesh[];
578 extern const char * const riscv_fpr_regnames[];
579
580 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
581 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
582 int cpuid, DumpState *s);
583 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
584 int cpuid, DumpState *s);
585 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
586 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
587 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
588 uint8_t riscv_cpu_default_priority(int irq);
589 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
590 int riscv_cpu_mirq_pending(CPURISCVState *env);
591 int riscv_cpu_sirq_pending(CPURISCVState *env);
592 int riscv_cpu_vsirq_pending(CPURISCVState *env);
593 bool riscv_cpu_fp_enabled(CPURISCVState *env);
594 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
595 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
596 bool riscv_cpu_vector_enabled(CPURISCVState *env);
597 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
598 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
599 bool cpu_get_fcfien(CPURISCVState *env);
600 bool cpu_get_bcfien(CPURISCVState *env);
601 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
602 G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
603 MMUAccessType access_type,
604 int mmu_idx, uintptr_t retaddr);
605 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
606 MMUAccessType access_type, int mmu_idx,
607 bool probe, uintptr_t retaddr);
608 char *riscv_isa_string(RISCVCPU *cpu);
609 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
610 bool riscv_cpu_option_set(const char *optname);
611
612 #ifndef CONFIG_USER_ONLY
613 void riscv_cpu_do_interrupt(CPUState *cpu);
614 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
615 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
616 vaddr addr, unsigned size,
617 MMUAccessType access_type,
618 int mmu_idx, MemTxAttrs attrs,
619 MemTxResult response, uintptr_t retaddr);
620 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
621 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
622 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
623 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
624 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
625 uint64_t value);
626 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
627 void riscv_cpu_interrupt(CPURISCVState *env);
628 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
629 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
630 void *arg);
631 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
632 int (*rmw_fn)(void *arg,
633 target_ulong reg,
634 target_ulong *val,
635 target_ulong new_val,
636 target_ulong write_mask),
637 void *rmw_fn_arg);
638
639 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
640 #endif /* !CONFIG_USER_ONLY */
641
642 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
643
644 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
645 enum CTRType type, target_ulong prev_priv, bool prev_virt);
646 void riscv_ctr_clear(CPURISCVState *env);
647
648 void riscv_translate_init(void);
649 void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
650 int *max_insns, vaddr pc, void *host_pc);
651
652 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
653 RISCVException exception,
654 uintptr_t pc);
655
656 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
657 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
658
659 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
660 FIELD(TB_FLAGS, FS, 3, 2)
661 /* Vector flags */
662 FIELD(TB_FLAGS, VS, 5, 2)
663 FIELD(TB_FLAGS, LMUL, 7, 3)
664 FIELD(TB_FLAGS, SEW, 10, 3)
665 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
666 FIELD(TB_FLAGS, VILL, 14, 1)
667 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
668 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
669 FIELD(TB_FLAGS, XL, 16, 2)
670 /* If PointerMasking should be applied */
671 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
672 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
673 FIELD(TB_FLAGS, VTA, 18, 1)
674 FIELD(TB_FLAGS, VMA, 19, 1)
675 /* Native debug itrigger */
676 FIELD(TB_FLAGS, ITRIGGER, 20, 1)
677 /* Virtual mode enabled */
678 FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
679 FIELD(TB_FLAGS, PRIV, 22, 2)
680 FIELD(TB_FLAGS, AXL, 24, 2)
681 /* zicfilp needs a TB flag to track indirect branches */
682 FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
683 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
684 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */
685 FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
686 /* If pointer masking should be applied and address sign extended */
687 FIELD(TB_FLAGS, PM_PMM, 29, 2)
688 FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
689
690 #ifdef TARGET_RISCV32
691 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
692 #else
693 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
694 {
695 return env->misa_mxl;
696 }
697 #endif
698 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
699
riscv_cpu_cfg(CPURISCVState * env)700 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
701 {
702 return &env_archcpu(env)->cfg;
703 }
704
705 #if !defined(CONFIG_USER_ONLY)
cpu_address_mode(CPURISCVState * env)706 static inline int cpu_address_mode(CPURISCVState *env)
707 {
708 int mode = env->priv;
709
710 if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
711 mode = get_field(env->mstatus, MSTATUS_MPP);
712 }
713 return mode;
714 }
715
cpu_get_xl(CPURISCVState * env,target_ulong mode)716 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
717 {
718 RISCVMXL xl = env->misa_mxl;
719 /*
720 * When emulating a 32-bit-only cpu, use RV32.
721 * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
722 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
723 * back to RV64 for lower privs.
724 */
725 if (xl != MXL_RV32) {
726 switch (mode) {
727 case PRV_M:
728 break;
729 case PRV_U:
730 xl = get_field(env->mstatus, MSTATUS64_UXL);
731 break;
732 default: /* PRV_S */
733 xl = get_field(env->mstatus, MSTATUS64_SXL);
734 break;
735 }
736 }
737 return xl;
738 }
739 #endif
740
741 #if defined(TARGET_RISCV32)
742 #define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
743 #else
cpu_recompute_xl(CPURISCVState * env)744 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
745 {
746 #if !defined(CONFIG_USER_ONLY)
747 return cpu_get_xl(env, env->priv);
748 #else
749 return env->misa_mxl;
750 #endif
751 }
752 #endif
753
754 #if defined(TARGET_RISCV32)
755 #define cpu_address_xl(env) ((void)(env), MXL_RV32)
756 #else
cpu_address_xl(CPURISCVState * env)757 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
758 {
759 #ifdef CONFIG_USER_ONLY
760 return env->xl;
761 #else
762 int mode = cpu_address_mode(env);
763
764 return cpu_get_xl(env, mode);
765 #endif
766 }
767 #endif
768
riscv_cpu_xlen(CPURISCVState * env)769 static inline int riscv_cpu_xlen(CPURISCVState *env)
770 {
771 return 16 << env->xl;
772 }
773
774 #ifdef TARGET_RISCV32
775 #define riscv_cpu_sxl(env) ((void)(env), MXL_RV32)
776 #else
riscv_cpu_sxl(CPURISCVState * env)777 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
778 {
779 #ifdef CONFIG_USER_ONLY
780 return env->misa_mxl;
781 #else
782 if (env->misa_mxl != MXL_RV32) {
783 return get_field(env->mstatus, MSTATUS64_SXL);
784 }
785 #endif
786 return MXL_RV32;
787 }
788 #endif
789
riscv_cpu_allow_16bit_insn(const RISCVCPUConfig * cfg,target_long priv_ver,uint32_t misa_ext)790 static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
791 target_long priv_ver,
792 uint32_t misa_ext)
793 {
794 /* In priv spec version 1.12 or newer, C always implies Zca */
795 if (priv_ver >= PRIV_VERSION_1_12_0) {
796 return cfg->ext_zca;
797 } else {
798 return misa_ext & RVC;
799 }
800 }
801
802 /*
803 * Encode LMUL to lmul as follows:
804 * LMUL vlmul lmul
805 * 1 000 0
806 * 2 001 1
807 * 4 010 2
808 * 8 011 3
809 * - 100 -
810 * 1/8 101 -3
811 * 1/4 110 -2
812 * 1/2 111 -1
813 *
814 * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
815 * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
816 * => VLMAX = vlen >> (1 + 3 - (-3))
817 * = 256 >> 7
818 * = 2
819 */
vext_get_vlmax(uint32_t vlenb,uint32_t vsew,int8_t lmul)820 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
821 int8_t lmul)
822 {
823 uint32_t vlen = vlenb << 3;
824
825 /*
826 * We need to use 'vlen' instead of 'vlenb' to
827 * preserve the '+ 3' in the formula. Otherwise
828 * we risk a negative shift if vsew < lmul.
829 */
830 return vlen >> (vsew + 3 - lmul);
831 }
832
833 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
834
835 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
836 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
837 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
838 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
839
840 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
841 target_ulong *ret_value);
842
843 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
844 target_ulong *ret_value, target_ulong new_value,
845 target_ulong write_mask, uintptr_t ra);
846 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
847 target_ulong *ret_value,
848 target_ulong new_value,
849 target_ulong write_mask);
850
riscv_csr_write(CPURISCVState * env,int csrno,target_ulong val)851 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
852 target_ulong val)
853 {
854 riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
855 }
856
riscv_csr_read(CPURISCVState * env,int csrno)857 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
858 {
859 target_ulong val = 0;
860 riscv_csrrw(env, csrno, &val, 0, 0, 0);
861 return val;
862 }
863
864 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
865 int csrno);
866 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
867 target_ulong *ret_value);
868 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
869 target_ulong new_value,
870 uintptr_t ra);
871 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
872 target_ulong *ret_value,
873 target_ulong new_value,
874 target_ulong write_mask);
875
876 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
877 Int128 *ret_value);
878 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
879 Int128 *ret_value, Int128 new_value,
880 Int128 write_mask, uintptr_t ra);
881
882 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
883 Int128 *ret_value);
884 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
885 Int128 new_value);
886
887 typedef struct {
888 const char *name;
889 riscv_csr_predicate_fn predicate;
890 riscv_csr_read_fn read;
891 riscv_csr_write_fn write;
892 riscv_csr_op_fn op;
893 riscv_csr_read128_fn read128;
894 riscv_csr_write128_fn write128;
895 /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
896 uint32_t min_priv_ver;
897 } riscv_csr_operations;
898
899 struct RISCVCSR {
900 int csrno;
901 bool (*insertion_test)(RISCVCPU *cpu);
902 riscv_csr_operations csr_ops;
903 };
904
905 /* CSR function table constants */
906 enum {
907 CSR_TABLE_SIZE = 0x1000
908 };
909
910 /*
911 * The event id are encoded based on the encoding specified in the
912 * SBI specification v0.3
913 */
914
915 enum riscv_pmu_event_idx {
916 RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
917 RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
918 RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
919 RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
920 RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
921 };
922
923 /* used by tcg/tcg-cpu.c*/
924 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
925 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
926 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
927 bool riscv_cpu_is_vendor(Object *cpu_obj);
928
929 typedef struct RISCVCPUMultiExtConfig {
930 const char *name;
931 uint32_t offset;
932 bool enabled;
933 } RISCVCPUMultiExtConfig;
934
935 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
936 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
937 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
938 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
939 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
940
941 typedef struct isa_ext_data {
942 const char *name;
943 int min_version;
944 int ext_enable_offset;
945 } RISCVIsaExtData;
946 extern const RISCVIsaExtData isa_edata_arr[];
947 char *riscv_cpu_get_name(RISCVCPU *cpu);
948
949 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
950 void riscv_add_satp_mode_properties(Object *obj);
951 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
952
953 /* CSR function table */
954 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
955
956 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
957
958 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
959 void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops);
960
961 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
962
963 target_ulong riscv_new_csr_seed(target_ulong new_value,
964 target_ulong write_mask);
965
966 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
967
968 /* In th_csr.c */
969 extern const RISCVCSR th_csr_list[];
970
971 const char *priv_spec_to_str(int priv_version);
972 #endif /* RISCV_CPU_H */
973