xref: /qemu/target/riscv/internals.h (revision 513823e7521a09ed7ad1e32e6454bac3b2cbf52d)
1 /*
2  * QEMU RISC-V CPU -- internal functions and types
3  *
4  * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef RISCV_CPU_INTERNALS_H
20 #define RISCV_CPU_INTERNALS_H
21 
22 #include "exec/cpu-common.h"
23 #include "hw/registerfields.h"
24 #include "fpu/softfloat-types.h"
25 #include "target/riscv/cpu_bits.h"
26 
27 /*
28  * The current MMU Modes are:
29  *  - U                 0b000
30  *  - S                 0b001
31  *  - S+SUM             0b010
32  *  - M                 0b011
33  *  - U+2STAGE          0b100
34  *  - S+2STAGE          0b101
35  *  - S+SUM+2STAGE      0b110
36  *  - Shadow stack+U   0b1000
37  *  - Shadow stack+S   0b1001
38  */
39 #define MMUIdx_U            0
40 #define MMUIdx_S            1
41 #define MMUIdx_S_SUM        2
42 #define MMUIdx_M            3
43 #define MMU_2STAGE_BIT      (1 << 2)
44 #define MMU_IDX_SS_WRITE    (1 << 3)
45 
46 static inline int mmuidx_priv(int mmu_idx)
47 {
48     int ret = mmu_idx & 3;
49     if (ret == MMUIdx_S_SUM) {
50         ret = PRV_S;
51     }
52     return ret;
53 }
54 
55 static inline bool mmuidx_sum(int mmu_idx)
56 {
57     return (mmu_idx & 3) == MMUIdx_S_SUM;
58 }
59 
60 static inline bool mmuidx_2stage(int mmu_idx)
61 {
62     return mmu_idx & MMU_2STAGE_BIT;
63 }
64 
65 /* share data between vector helpers and decode code */
66 FIELD(VDATA, VM, 0, 1)
67 FIELD(VDATA, LMUL, 1, 3)
68 FIELD(VDATA, VTA, 4, 1)
69 FIELD(VDATA, VTA_ALL_1S, 5, 1)
70 FIELD(VDATA, VMA, 6, 1)
71 FIELD(VDATA, NF, 7, 4)
72 FIELD(VDATA, WD, 7, 1)
73 
74 /* float point classify helpers */
75 target_ulong fclass_h(uint64_t frs1);
76 target_ulong fclass_s(uint64_t frs1);
77 target_ulong fclass_d(uint64_t frs1);
78 
79 #ifndef CONFIG_USER_ONLY
80 extern const VMStateDescription vmstate_riscv_cpu;
81 #endif
82 
83 enum {
84     RISCV_FRM_RNE = 0,  /* Round to Nearest, ties to Even */
85     RISCV_FRM_RTZ = 1,  /* Round towards Zero */
86     RISCV_FRM_RDN = 2,  /* Round Down */
87     RISCV_FRM_RUP = 3,  /* Round Up */
88     RISCV_FRM_RMM = 4,  /* Round to Nearest, ties to Max Magnitude */
89     RISCV_FRM_DYN = 7,  /* Dynamic rounding mode */
90     RISCV_FRM_ROD = 8,  /* Round to Odd */
91 };
92 
93 static inline uint64_t nanbox_s(CPURISCVState *env, float32 f)
94 {
95     /* the value is sign-extended instead of NaN-boxing for zfinx */
96     if (env_archcpu(env)->cfg.ext_zfinx) {
97         return (int32_t)f;
98     } else {
99         return f | MAKE_64BIT_MASK(32, 32);
100     }
101 }
102 
103 static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f)
104 {
105     /* Disable NaN-boxing check when enable zfinx */
106     if (env_archcpu(env)->cfg.ext_zfinx) {
107         return (uint32_t)f;
108     }
109 
110     uint64_t mask = MAKE_64BIT_MASK(32, 32);
111 
112     if (likely((f & mask) == mask)) {
113         return (uint32_t)f;
114     } else {
115         return 0x7fc00000u; /* default qnan */
116     }
117 }
118 
119 static inline uint64_t nanbox_h(CPURISCVState *env, float16 f)
120 {
121     /* the value is sign-extended instead of NaN-boxing for zfinx */
122     if (env_archcpu(env)->cfg.ext_zfinx) {
123         return (int16_t)f;
124     } else {
125         return f | MAKE_64BIT_MASK(16, 48);
126     }
127 }
128 
129 static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
130 {
131     /* Disable nanbox check when enable zfinx */
132     if (env_archcpu(env)->cfg.ext_zfinx) {
133         return (uint16_t)f;
134     }
135 
136     uint64_t mask = MAKE_64BIT_MASK(16, 48);
137 
138     if (likely((f & mask) == mask)) {
139         return (uint16_t)f;
140     } else {
141         return 0x7E00u; /* default qnan */
142     }
143 }
144 
145 /* Our implementation of CPUClass::has_work */
146 bool riscv_cpu_has_work(CPUState *cs);
147 
148 /* Zjpm addr masking routine */
149 static inline target_ulong adjust_addr_body(CPURISCVState *env,
150                                             target_ulong addr,
151                                             bool is_virt_addr)
152 {
153     RISCVPmPmm pmm = PMM_FIELD_DISABLED;
154     uint32_t pmlen = 0;
155     bool signext = false;
156 
157     /* do nothing for rv32 mode */
158     if (riscv_cpu_mxl(env) == MXL_RV32) {
159         return addr;
160     }
161 
162     /* get pmm field depending on whether addr is */
163     if (is_virt_addr) {
164         pmm = riscv_pm_get_virt_pmm(env);
165     } else {
166         pmm = riscv_pm_get_pmm(env);
167     }
168 
169     /* if pointer masking is disabled, return original addr */
170     if (pmm == PMM_FIELD_DISABLED) {
171         return addr;
172     }
173 
174     if (!is_virt_addr) {
175         signext = riscv_cpu_virt_mem_enabled(env);
176     }
177     addr = addr << pmlen;
178     pmlen = riscv_pm_get_pmlen(pmm);
179 
180     /* sign/zero extend masked address by N-1 bit */
181     if (signext) {
182         addr = (target_long)addr >> pmlen;
183     } else {
184         addr = addr >> pmlen;
185     }
186 
187     return addr;
188 }
189 
190 static inline target_ulong adjust_addr(CPURISCVState *env,
191                                        target_ulong addr)
192 {
193     return adjust_addr_body(env, addr, false);
194 }
195 
196 static inline target_ulong adjust_addr_virt(CPURISCVState *env,
197                                             target_ulong addr)
198 {
199     return adjust_addr_body(env, addr, true);
200 }
201 
202 #endif
203