1 /*
2 * QEMU RISC-V CPU -- internal functions and types
3 *
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #ifndef RISCV_CPU_INTERNALS_H
20 #define RISCV_CPU_INTERNALS_H
21
22 #include "exec/cpu-common.h"
23 #include "hw/registerfields.h"
24 #include "fpu/softfloat-types.h"
25 #include "target/riscv/cpu_bits.h"
26
27 /*
28 * The current MMU Modes are:
29 * - U 0b000
30 * - S 0b001
31 * - S+SUM 0b010
32 * - M 0b011
33 * - U+2STAGE 0b100
34 * - S+2STAGE 0b101
35 * - S+SUM+2STAGE 0b110
36 * - Shadow stack+U 0b1000
37 * - Shadow stack+S 0b1001
38 */
39 #define MMUIdx_U 0
40 #define MMUIdx_S 1
41 #define MMUIdx_S_SUM 2
42 #define MMUIdx_M 3
43 #define MMU_2STAGE_BIT (1 << 2)
44 #define MMU_IDX_SS_WRITE (1 << 3)
45
mmuidx_priv(int mmu_idx)46 static inline int mmuidx_priv(int mmu_idx)
47 {
48 int ret = mmu_idx & 3;
49 if (ret == MMUIdx_S_SUM) {
50 ret = PRV_S;
51 }
52 return ret;
53 }
54
mmuidx_sum(int mmu_idx)55 static inline bool mmuidx_sum(int mmu_idx)
56 {
57 return (mmu_idx & 3) == MMUIdx_S_SUM;
58 }
59
mmuidx_2stage(int mmu_idx)60 static inline bool mmuidx_2stage(int mmu_idx)
61 {
62 return mmu_idx & MMU_2STAGE_BIT;
63 }
64
65 /* share data between vector helpers and decode code */
66 FIELD(VDATA, VM, 0, 1)
67 FIELD(VDATA, LMUL, 1, 3)
68 FIELD(VDATA, VTA, 4, 1)
69 FIELD(VDATA, VTA_ALL_1S, 5, 1)
70 FIELD(VDATA, VMA, 6, 1)
71 FIELD(VDATA, NF, 7, 4)
72 FIELD(VDATA, WD, 7, 1)
73
74 /* float point classify helpers */
75 target_ulong fclass_h(uint64_t frs1);
76 target_ulong fclass_s(uint64_t frs1);
77 target_ulong fclass_d(uint64_t frs1);
78
79 #ifndef CONFIG_USER_ONLY
80 extern const VMStateDescription vmstate_riscv_cpu;
81 #endif
82
83 enum {
84 RISCV_FRM_RNE = 0, /* Round to Nearest, ties to Even */
85 RISCV_FRM_RTZ = 1, /* Round towards Zero */
86 RISCV_FRM_RDN = 2, /* Round Down */
87 RISCV_FRM_RUP = 3, /* Round Up */
88 RISCV_FRM_RMM = 4, /* Round to Nearest, ties to Max Magnitude */
89 RISCV_FRM_DYN = 7, /* Dynamic rounding mode */
90 RISCV_FRM_ROD = 8, /* Round to Odd */
91 };
92
nanbox_s(CPURISCVState * env,float32 f)93 static inline uint64_t nanbox_s(CPURISCVState *env, float32 f)
94 {
95 /* the value is sign-extended instead of NaN-boxing for zfinx */
96 if (env_archcpu(env)->cfg.ext_zfinx) {
97 return (int32_t)f;
98 } else {
99 return f | MAKE_64BIT_MASK(32, 32);
100 }
101 }
102
check_nanbox_s(CPURISCVState * env,uint64_t f)103 static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f)
104 {
105 /* Disable NaN-boxing check when enable zfinx */
106 if (env_archcpu(env)->cfg.ext_zfinx) {
107 return (uint32_t)f;
108 }
109
110 uint64_t mask = MAKE_64BIT_MASK(32, 32);
111
112 if (likely((f & mask) == mask)) {
113 return (uint32_t)f;
114 } else {
115 return 0x7fc00000u; /* default qnan */
116 }
117 }
118
nanbox_h(CPURISCVState * env,float16 f)119 static inline uint64_t nanbox_h(CPURISCVState *env, float16 f)
120 {
121 /* the value is sign-extended instead of NaN-boxing for zfinx */
122 if (env_archcpu(env)->cfg.ext_zfinx) {
123 return (int16_t)f;
124 } else {
125 return f | MAKE_64BIT_MASK(16, 48);
126 }
127 }
128
check_nanbox_h(CPURISCVState * env,uint64_t f)129 static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
130 {
131 /* Disable nanbox check when enable zfinx */
132 if (env_archcpu(env)->cfg.ext_zfinx) {
133 return (uint16_t)f;
134 }
135
136 uint64_t mask = MAKE_64BIT_MASK(16, 48);
137
138 if (likely((f & mask) == mask)) {
139 return (uint16_t)f;
140 } else {
141 return 0x7E00u; /* default qnan */
142 }
143 }
144
145 #ifndef CONFIG_USER_ONLY
146 /* Our implementation of SysemuCPUOps::has_work */
147 bool riscv_cpu_has_work(CPUState *cs);
148 #endif
149
150 /* Zjpm addr masking routine */
adjust_addr_body(CPURISCVState * env,target_ulong addr,bool is_virt_addr)151 static inline target_ulong adjust_addr_body(CPURISCVState *env,
152 target_ulong addr,
153 bool is_virt_addr)
154 {
155 RISCVPmPmm pmm = PMM_FIELD_DISABLED;
156 uint32_t pmlen = 0;
157 bool signext = false;
158
159 /* do nothing for rv32 mode */
160 if (riscv_cpu_mxl(env) == MXL_RV32) {
161 return addr;
162 }
163
164 /* get pmm field depending on whether addr is */
165 if (is_virt_addr) {
166 pmm = riscv_pm_get_virt_pmm(env);
167 } else {
168 pmm = riscv_pm_get_pmm(env);
169 }
170
171 /* if pointer masking is disabled, return original addr */
172 if (pmm == PMM_FIELD_DISABLED) {
173 return addr;
174 }
175
176 if (!is_virt_addr) {
177 signext = riscv_cpu_virt_mem_enabled(env);
178 }
179 addr = addr << pmlen;
180 pmlen = riscv_pm_get_pmlen(pmm);
181
182 /* sign/zero extend masked address by N-1 bit */
183 if (signext) {
184 addr = (target_long)addr >> pmlen;
185 } else {
186 addr = addr >> pmlen;
187 }
188
189 return addr;
190 }
191
adjust_addr(CPURISCVState * env,target_ulong addr)192 static inline target_ulong adjust_addr(CPURISCVState *env,
193 target_ulong addr)
194 {
195 return adjust_addr_body(env, addr, false);
196 }
197
adjust_addr_virt(CPURISCVState * env,target_ulong addr)198 static inline target_ulong adjust_addr_virt(CPURISCVState *env,
199 target_ulong addr)
200 {
201 return adjust_addr_body(env, addr, true);
202 }
203
insn_len(uint16_t first_word)204 static inline int insn_len(uint16_t first_word)
205 {
206 return (first_word & 3) == 3 ? 4 : 2;
207 }
208
209 #endif
210