1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "tcg/tcg-cpu.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "exec/cputlb.h"
28 #include "exec/tb-flush.h"
29 #include "exec/icount.h"
30 #include "accel/tcg/getpc.h"
31 #include "qemu/guest-random.h"
32 #include "qapi/error.h"
33 #include "tcg/insn-start-words.h"
34 #include "internals.h"
35 #include <stdbool.h>
36
37 /* CSR function table public API */
riscv_get_csr_ops(int csrno,riscv_csr_operations * ops)38 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
39 {
40 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
41 }
42
riscv_set_csr_ops(int csrno,const riscv_csr_operations * ops)43 void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops)
44 {
45 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
46 }
47
48 /* Predicates */
49 #if !defined(CONFIG_USER_ONLY)
smstateen_acc_ok(CPURISCVState * env,int index,uint64_t bit)50 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit)
51 {
52 bool virt = env->virt_enabled;
53
54 if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
55 return RISCV_EXCP_NONE;
56 }
57
58 if (!(env->mstateen[index] & bit)) {
59 return RISCV_EXCP_ILLEGAL_INST;
60 }
61
62 if (virt) {
63 if (!(env->hstateen[index] & bit)) {
64 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65 }
66
67 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
68 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
69 }
70 }
71
72 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
73 if (!(env->sstateen[index] & bit)) {
74 return RISCV_EXCP_ILLEGAL_INST;
75 }
76 }
77
78 return RISCV_EXCP_NONE;
79 }
80 #endif
81
fs(CPURISCVState * env,int csrno)82 static RISCVException fs(CPURISCVState *env, int csrno)
83 {
84 #if !defined(CONFIG_USER_ONLY)
85 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
86 !riscv_cpu_cfg(env)->ext_zfinx) {
87 return RISCV_EXCP_ILLEGAL_INST;
88 }
89
90 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
91 return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
92 }
93 #endif
94 return RISCV_EXCP_NONE;
95 }
96
vs(CPURISCVState * env,int csrno)97 static RISCVException vs(CPURISCVState *env, int csrno)
98 {
99 if (riscv_cpu_cfg(env)->ext_zve32x) {
100 #if !defined(CONFIG_USER_ONLY)
101 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
102 return RISCV_EXCP_ILLEGAL_INST;
103 }
104 #endif
105 return RISCV_EXCP_NONE;
106 }
107 return RISCV_EXCP_ILLEGAL_INST;
108 }
109
ctr(CPURISCVState * env,int csrno)110 static RISCVException ctr(CPURISCVState *env, int csrno)
111 {
112 #if !defined(CONFIG_USER_ONLY)
113 RISCVCPU *cpu = env_archcpu(env);
114 int ctr_index;
115 target_ulong ctr_mask;
116 int base_csrno = CSR_CYCLE;
117 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
118
119 if (rv32 && csrno >= CSR_CYCLEH) {
120 /* Offset for RV32 hpmcounternh counters */
121 base_csrno += 0x80;
122 }
123 ctr_index = csrno - base_csrno;
124 ctr_mask = BIT(ctr_index);
125
126 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
127 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
128 if (!riscv_cpu_cfg(env)->ext_zicntr) {
129 return RISCV_EXCP_ILLEGAL_INST;
130 }
131
132 goto skip_ext_pmu_check;
133 }
134
135 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
136 /* No counter is enabled in PMU or the counter is out of range */
137 return RISCV_EXCP_ILLEGAL_INST;
138 }
139
140 skip_ext_pmu_check:
141
142 if (env->debugger) {
143 return RISCV_EXCP_NONE;
144 }
145
146 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
147 return RISCV_EXCP_ILLEGAL_INST;
148 }
149
150 if (env->virt_enabled) {
151 if (!get_field(env->hcounteren, ctr_mask) ||
152 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
153 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
154 }
155 }
156
157 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
158 !get_field(env->scounteren, ctr_mask)) {
159 return RISCV_EXCP_ILLEGAL_INST;
160 }
161
162 #endif
163 return RISCV_EXCP_NONE;
164 }
165
ctr32(CPURISCVState * env,int csrno)166 static RISCVException ctr32(CPURISCVState *env, int csrno)
167 {
168 if (riscv_cpu_mxl(env) != MXL_RV32) {
169 return RISCV_EXCP_ILLEGAL_INST;
170 }
171
172 return ctr(env, csrno);
173 }
174
zcmt(CPURISCVState * env,int csrno)175 static RISCVException zcmt(CPURISCVState *env, int csrno)
176 {
177 if (!riscv_cpu_cfg(env)->ext_zcmt) {
178 return RISCV_EXCP_ILLEGAL_INST;
179 }
180
181 #if !defined(CONFIG_USER_ONLY)
182 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT);
183 if (ret != RISCV_EXCP_NONE) {
184 return ret;
185 }
186 #endif
187
188 return RISCV_EXCP_NONE;
189 }
190
cfi_ss(CPURISCVState * env,int csrno)191 static RISCVException cfi_ss(CPURISCVState *env, int csrno)
192 {
193 if (!env_archcpu(env)->cfg.ext_zicfiss) {
194 return RISCV_EXCP_ILLEGAL_INST;
195 }
196
197 /* If ext implemented, M-mode always have access to SSP CSR */
198 if (env->priv == PRV_M) {
199 return RISCV_EXCP_NONE;
200 }
201
202 /* if bcfi not active for current env, access to csr is illegal */
203 if (!cpu_get_bcfien(env)) {
204 #if !defined(CONFIG_USER_ONLY)
205 if (env->debugger) {
206 return RISCV_EXCP_NONE;
207 }
208 #endif
209 return RISCV_EXCP_ILLEGAL_INST;
210 }
211
212 return RISCV_EXCP_NONE;
213 }
214
215 #if !defined(CONFIG_USER_ONLY)
mctr(CPURISCVState * env,int csrno)216 static RISCVException mctr(CPURISCVState *env, int csrno)
217 {
218 RISCVCPU *cpu = env_archcpu(env);
219 uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
220 int ctr_index;
221 int base_csrno = CSR_MHPMCOUNTER3;
222
223 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
224 /* Offset for RV32 mhpmcounternh counters */
225 csrno -= 0x80;
226 }
227
228 g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
229
230 ctr_index = csrno - base_csrno;
231 if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
232 /* The PMU is not enabled or counter is out of range */
233 return RISCV_EXCP_ILLEGAL_INST;
234 }
235
236 return RISCV_EXCP_NONE;
237 }
238
mctr32(CPURISCVState * env,int csrno)239 static RISCVException mctr32(CPURISCVState *env, int csrno)
240 {
241 if (riscv_cpu_mxl(env) != MXL_RV32) {
242 return RISCV_EXCP_ILLEGAL_INST;
243 }
244
245 return mctr(env, csrno);
246 }
247
sscofpmf(CPURISCVState * env,int csrno)248 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
249 {
250 if (!riscv_cpu_cfg(env)->ext_sscofpmf) {
251 return RISCV_EXCP_ILLEGAL_INST;
252 }
253
254 return RISCV_EXCP_NONE;
255 }
256
sscofpmf_32(CPURISCVState * env,int csrno)257 static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
258 {
259 if (riscv_cpu_mxl(env) != MXL_RV32) {
260 return RISCV_EXCP_ILLEGAL_INST;
261 }
262
263 return sscofpmf(env, csrno);
264 }
265
smcntrpmf(CPURISCVState * env,int csrno)266 static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
267 {
268 if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
269 return RISCV_EXCP_ILLEGAL_INST;
270 }
271
272 return RISCV_EXCP_NONE;
273 }
274
smcntrpmf_32(CPURISCVState * env,int csrno)275 static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
276 {
277 if (riscv_cpu_mxl(env) != MXL_RV32) {
278 return RISCV_EXCP_ILLEGAL_INST;
279 }
280
281 return smcntrpmf(env, csrno);
282 }
283
any(CPURISCVState * env,int csrno)284 static RISCVException any(CPURISCVState *env, int csrno)
285 {
286 return RISCV_EXCP_NONE;
287 }
288
any32(CPURISCVState * env,int csrno)289 static RISCVException any32(CPURISCVState *env, int csrno)
290 {
291 if (riscv_cpu_mxl(env) != MXL_RV32) {
292 return RISCV_EXCP_ILLEGAL_INST;
293 }
294
295 return any(env, csrno);
296
297 }
298
aia_any(CPURISCVState * env,int csrno)299 static RISCVException aia_any(CPURISCVState *env, int csrno)
300 {
301 if (!riscv_cpu_cfg(env)->ext_smaia) {
302 return RISCV_EXCP_ILLEGAL_INST;
303 }
304
305 return any(env, csrno);
306 }
307
aia_any32(CPURISCVState * env,int csrno)308 static RISCVException aia_any32(CPURISCVState *env, int csrno)
309 {
310 if (!riscv_cpu_cfg(env)->ext_smaia) {
311 return RISCV_EXCP_ILLEGAL_INST;
312 }
313
314 return any32(env, csrno);
315 }
316
csrind_any(CPURISCVState * env,int csrno)317 static RISCVException csrind_any(CPURISCVState *env, int csrno)
318 {
319 if (!riscv_cpu_cfg(env)->ext_smcsrind) {
320 return RISCV_EXCP_ILLEGAL_INST;
321 }
322
323 return RISCV_EXCP_NONE;
324 }
325
csrind_or_aia_any(CPURISCVState * env,int csrno)326 static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
327 {
328 if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
329 return RISCV_EXCP_ILLEGAL_INST;
330 }
331
332 return any(env, csrno);
333 }
334
smode(CPURISCVState * env,int csrno)335 static RISCVException smode(CPURISCVState *env, int csrno)
336 {
337 if (riscv_has_ext(env, RVS)) {
338 return RISCV_EXCP_NONE;
339 }
340
341 return RISCV_EXCP_ILLEGAL_INST;
342 }
343
smode32(CPURISCVState * env,int csrno)344 static RISCVException smode32(CPURISCVState *env, int csrno)
345 {
346 if (riscv_cpu_mxl(env) != MXL_RV32) {
347 return RISCV_EXCP_ILLEGAL_INST;
348 }
349
350 return smode(env, csrno);
351 }
352
aia_smode(CPURISCVState * env,int csrno)353 static RISCVException aia_smode(CPURISCVState *env, int csrno)
354 {
355 int ret;
356
357 if (!riscv_cpu_cfg(env)->ext_ssaia) {
358 return RISCV_EXCP_ILLEGAL_INST;
359 }
360
361 if (csrno == CSR_STOPEI) {
362 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
363 } else {
364 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
365 }
366
367 if (ret != RISCV_EXCP_NONE) {
368 return ret;
369 }
370
371 return smode(env, csrno);
372 }
373
aia_smode32(CPURISCVState * env,int csrno)374 static RISCVException aia_smode32(CPURISCVState *env, int csrno)
375 {
376 int ret;
377
378 if (!riscv_cpu_cfg(env)->ext_ssaia) {
379 return RISCV_EXCP_ILLEGAL_INST;
380 }
381
382 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
383 if (ret != RISCV_EXCP_NONE) {
384 return ret;
385 }
386
387 return smode32(env, csrno);
388 }
389
scountinhibit_pred(CPURISCVState * env,int csrno)390 static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
391 {
392 RISCVCPU *cpu = env_archcpu(env);
393
394 if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
395 return RISCV_EXCP_ILLEGAL_INST;
396 }
397
398 if (env->virt_enabled) {
399 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
400 }
401
402 return smode(env, csrno);
403 }
404
csrind_extensions_present(CPURISCVState * env)405 static bool csrind_extensions_present(CPURISCVState *env)
406 {
407 return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
408 }
409
aia_extensions_present(CPURISCVState * env)410 static bool aia_extensions_present(CPURISCVState *env)
411 {
412 return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia;
413 }
414
csrind_or_aia_extensions_present(CPURISCVState * env)415 static bool csrind_or_aia_extensions_present(CPURISCVState *env)
416 {
417 return csrind_extensions_present(env) || aia_extensions_present(env);
418 }
419
csrind_smode(CPURISCVState * env,int csrno)420 static RISCVException csrind_smode(CPURISCVState *env, int csrno)
421 {
422 if (!csrind_extensions_present(env)) {
423 return RISCV_EXCP_ILLEGAL_INST;
424 }
425
426 return smode(env, csrno);
427 }
428
csrind_or_aia_smode(CPURISCVState * env,int csrno)429 static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
430 {
431 if (!csrind_or_aia_extensions_present(env)) {
432 return RISCV_EXCP_ILLEGAL_INST;
433 }
434
435 return smode(env, csrno);
436 }
437
hmode(CPURISCVState * env,int csrno)438 static RISCVException hmode(CPURISCVState *env, int csrno)
439 {
440 if (riscv_has_ext(env, RVH)) {
441 return RISCV_EXCP_NONE;
442 }
443
444 return RISCV_EXCP_ILLEGAL_INST;
445 }
446
hmode32(CPURISCVState * env,int csrno)447 static RISCVException hmode32(CPURISCVState *env, int csrno)
448 {
449 if (riscv_cpu_mxl(env) != MXL_RV32) {
450 return RISCV_EXCP_ILLEGAL_INST;
451 }
452
453 return hmode(env, csrno);
454
455 }
456
csrind_hmode(CPURISCVState * env,int csrno)457 static RISCVException csrind_hmode(CPURISCVState *env, int csrno)
458 {
459 if (!csrind_extensions_present(env)) {
460 return RISCV_EXCP_ILLEGAL_INST;
461 }
462
463 return hmode(env, csrno);
464 }
465
csrind_or_aia_hmode(CPURISCVState * env,int csrno)466 static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
467 {
468 if (!csrind_or_aia_extensions_present(env)) {
469 return RISCV_EXCP_ILLEGAL_INST;
470 }
471
472 return hmode(env, csrno);
473 }
474
umode(CPURISCVState * env,int csrno)475 static RISCVException umode(CPURISCVState *env, int csrno)
476 {
477 if (riscv_has_ext(env, RVU)) {
478 return RISCV_EXCP_NONE;
479 }
480
481 return RISCV_EXCP_ILLEGAL_INST;
482 }
483
umode32(CPURISCVState * env,int csrno)484 static RISCVException umode32(CPURISCVState *env, int csrno)
485 {
486 if (riscv_cpu_mxl(env) != MXL_RV32) {
487 return RISCV_EXCP_ILLEGAL_INST;
488 }
489
490 return umode(env, csrno);
491 }
492
mstateen(CPURISCVState * env,int csrno)493 static RISCVException mstateen(CPURISCVState *env, int csrno)
494 {
495 if (!riscv_cpu_cfg(env)->ext_smstateen) {
496 return RISCV_EXCP_ILLEGAL_INST;
497 }
498
499 return any(env, csrno);
500 }
501
hstateen_pred(CPURISCVState * env,int csrno,int base)502 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
503 {
504 if (!riscv_cpu_cfg(env)->ext_smstateen) {
505 return RISCV_EXCP_ILLEGAL_INST;
506 }
507
508 RISCVException ret = hmode(env, csrno);
509 if (ret != RISCV_EXCP_NONE) {
510 return ret;
511 }
512
513 if (env->debugger) {
514 return RISCV_EXCP_NONE;
515 }
516
517 if (env->priv < PRV_M) {
518 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
519 return RISCV_EXCP_ILLEGAL_INST;
520 }
521 }
522
523 return RISCV_EXCP_NONE;
524 }
525
hstateen(CPURISCVState * env,int csrno)526 static RISCVException hstateen(CPURISCVState *env, int csrno)
527 {
528 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
529 }
530
hstateenh(CPURISCVState * env,int csrno)531 static RISCVException hstateenh(CPURISCVState *env, int csrno)
532 {
533 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
534 }
535
sstateen(CPURISCVState * env,int csrno)536 static RISCVException sstateen(CPURISCVState *env, int csrno)
537 {
538 bool virt = env->virt_enabled;
539 int index = csrno - CSR_SSTATEEN0;
540
541 if (!riscv_cpu_cfg(env)->ext_smstateen) {
542 return RISCV_EXCP_ILLEGAL_INST;
543 }
544
545 RISCVException ret = smode(env, csrno);
546 if (ret != RISCV_EXCP_NONE) {
547 return ret;
548 }
549
550 if (env->debugger) {
551 return RISCV_EXCP_NONE;
552 }
553
554 if (env->priv < PRV_M) {
555 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
556 return RISCV_EXCP_ILLEGAL_INST;
557 }
558
559 if (virt) {
560 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
561 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
562 }
563 }
564 }
565
566 return RISCV_EXCP_NONE;
567 }
568
sstc(CPURISCVState * env,int csrno)569 static RISCVException sstc(CPURISCVState *env, int csrno)
570 {
571 bool hmode_check = false;
572
573 if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) {
574 return RISCV_EXCP_ILLEGAL_INST;
575 }
576
577 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
578 hmode_check = true;
579 }
580
581 RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
582 if (ret != RISCV_EXCP_NONE) {
583 return ret;
584 }
585
586 if (env->debugger) {
587 return RISCV_EXCP_NONE;
588 }
589
590 if (env->priv == PRV_M) {
591 return RISCV_EXCP_NONE;
592 }
593
594 /*
595 * No need of separate function for rv32 as menvcfg stores both menvcfg
596 * menvcfgh for RV32.
597 */
598 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
599 get_field(env->menvcfg, MENVCFG_STCE))) {
600 return RISCV_EXCP_ILLEGAL_INST;
601 }
602
603 if (env->virt_enabled) {
604 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
605 get_field(env->henvcfg, HENVCFG_STCE))) {
606 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
607 }
608 }
609
610 return RISCV_EXCP_NONE;
611 }
612
sstc_32(CPURISCVState * env,int csrno)613 static RISCVException sstc_32(CPURISCVState *env, int csrno)
614 {
615 if (riscv_cpu_mxl(env) != MXL_RV32) {
616 return RISCV_EXCP_ILLEGAL_INST;
617 }
618
619 return sstc(env, csrno);
620 }
621
satp(CPURISCVState * env,int csrno)622 static RISCVException satp(CPURISCVState *env, int csrno)
623 {
624 if (env->priv == PRV_S && !env->virt_enabled &&
625 get_field(env->mstatus, MSTATUS_TVM)) {
626 return RISCV_EXCP_ILLEGAL_INST;
627 }
628 if (env->priv == PRV_S && env->virt_enabled &&
629 get_field(env->hstatus, HSTATUS_VTVM)) {
630 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
631 }
632
633 return smode(env, csrno);
634 }
635
hgatp(CPURISCVState * env,int csrno)636 static RISCVException hgatp(CPURISCVState *env, int csrno)
637 {
638 if (env->priv == PRV_S && !env->virt_enabled &&
639 get_field(env->mstatus, MSTATUS_TVM)) {
640 return RISCV_EXCP_ILLEGAL_INST;
641 }
642
643 return hmode(env, csrno);
644 }
645
646 /*
647 * M-mode:
648 * Without ext_smctr raise illegal inst excep.
649 * Otherwise everything is accessible to m-mode.
650 *
651 * S-mode:
652 * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
653 * Otherwise everything other than mctrctl is accessible.
654 *
655 * VS-mode:
656 * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
657 * Without hstateen.ctr raise virtual illegal inst excep.
658 * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
659 * Always raise illegal instruction exception for sctrdepth.
660 */
ctr_mmode(CPURISCVState * env,int csrno)661 static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
662 {
663 /* Check if smctr-ext is present */
664 if (riscv_cpu_cfg(env)->ext_smctr) {
665 return RISCV_EXCP_NONE;
666 }
667
668 return RISCV_EXCP_ILLEGAL_INST;
669 }
670
ctr_smode(CPURISCVState * env,int csrno)671 static RISCVException ctr_smode(CPURISCVState *env, int csrno)
672 {
673 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
674
675 if (!cfg->ext_smctr && !cfg->ext_ssctr) {
676 return RISCV_EXCP_ILLEGAL_INST;
677 }
678
679 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
680 if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
681 env->virt_enabled) {
682 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
683 }
684
685 return ret;
686 }
687
aia_hmode(CPURISCVState * env,int csrno)688 static RISCVException aia_hmode(CPURISCVState *env, int csrno)
689 {
690 int ret;
691
692 if (!riscv_cpu_cfg(env)->ext_ssaia) {
693 return RISCV_EXCP_ILLEGAL_INST;
694 }
695
696 if (csrno == CSR_VSTOPEI) {
697 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
698 } else {
699 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
700 }
701
702 if (ret != RISCV_EXCP_NONE) {
703 return ret;
704 }
705
706 return hmode(env, csrno);
707 }
708
aia_hmode32(CPURISCVState * env,int csrno)709 static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
710 {
711 int ret;
712
713 if (!riscv_cpu_cfg(env)->ext_ssaia) {
714 return RISCV_EXCP_ILLEGAL_INST;
715 }
716
717 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
718 if (ret != RISCV_EXCP_NONE) {
719 return ret;
720 }
721
722 if (!riscv_cpu_cfg(env)->ext_ssaia) {
723 return RISCV_EXCP_ILLEGAL_INST;
724 }
725
726 return hmode32(env, csrno);
727 }
728
dbltrp_hmode(CPURISCVState * env,int csrno)729 static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
730 {
731 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
732 return RISCV_EXCP_NONE;
733 }
734
735 return hmode(env, csrno);
736 }
737
pmp(CPURISCVState * env,int csrno)738 static RISCVException pmp(CPURISCVState *env, int csrno)
739 {
740 if (riscv_cpu_cfg(env)->pmp) {
741 int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
742 + CSR_PMPCFG15 : CSR_PMPCFG3;
743
744 if (csrno <= max_pmpcfg) {
745 uint32_t reg_index = csrno - CSR_PMPCFG0;
746
747 /* TODO: RV128 restriction check */
748 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
749 return RISCV_EXCP_ILLEGAL_INST;
750 }
751 }
752
753 return RISCV_EXCP_NONE;
754 }
755
756 return RISCV_EXCP_ILLEGAL_INST;
757 }
758
have_mseccfg(CPURISCVState * env,int csrno)759 static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
760 {
761 if (riscv_cpu_cfg(env)->ext_smepmp) {
762 return RISCV_EXCP_NONE;
763 }
764 if (riscv_cpu_cfg(env)->ext_zkr) {
765 return RISCV_EXCP_NONE;
766 }
767 if (riscv_cpu_cfg(env)->ext_smmpm) {
768 return RISCV_EXCP_NONE;
769 }
770
771 return RISCV_EXCP_ILLEGAL_INST;
772 }
773
debug(CPURISCVState * env,int csrno)774 static RISCVException debug(CPURISCVState *env, int csrno)
775 {
776 if (riscv_cpu_cfg(env)->debug) {
777 return RISCV_EXCP_NONE;
778 }
779
780 return RISCV_EXCP_ILLEGAL_INST;
781 }
782
rnmi(CPURISCVState * env,int csrno)783 static RISCVException rnmi(CPURISCVState *env, int csrno)
784 {
785 RISCVCPU *cpu = env_archcpu(env);
786
787 if (cpu->cfg.ext_smrnmi) {
788 return RISCV_EXCP_NONE;
789 }
790
791 return RISCV_EXCP_ILLEGAL_INST;
792 }
793 #endif
794
seed(CPURISCVState * env,int csrno)795 static RISCVException seed(CPURISCVState *env, int csrno)
796 {
797 if (!riscv_cpu_cfg(env)->ext_zkr) {
798 return RISCV_EXCP_ILLEGAL_INST;
799 }
800
801 #if !defined(CONFIG_USER_ONLY)
802 if (env->debugger) {
803 return RISCV_EXCP_NONE;
804 }
805
806 /*
807 * With a CSR read-write instruction:
808 * 1) The seed CSR is always available in machine mode as normal.
809 * 2) Attempted access to seed from virtual modes VS and VU always raises
810 * an exception(virtual instruction exception only if mseccfg.sseed=1).
811 * 3) Without the corresponding access control bit set to 1, any attempted
812 * access to seed from U, S or HS modes will raise an illegal instruction
813 * exception.
814 */
815 if (env->priv == PRV_M) {
816 return RISCV_EXCP_NONE;
817 } else if (env->virt_enabled) {
818 if (env->mseccfg & MSECCFG_SSEED) {
819 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
820 } else {
821 return RISCV_EXCP_ILLEGAL_INST;
822 }
823 } else {
824 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
825 return RISCV_EXCP_NONE;
826 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
827 return RISCV_EXCP_NONE;
828 } else {
829 return RISCV_EXCP_ILLEGAL_INST;
830 }
831 }
832 #else
833 return RISCV_EXCP_NONE;
834 #endif
835 }
836
837 /* zicfiss CSR_SSP read and write */
read_ssp(CPURISCVState * env,int csrno,target_ulong * val)838 static RISCVException read_ssp(CPURISCVState *env, int csrno,
839 target_ulong *val)
840 {
841 *val = env->ssp;
842 return RISCV_EXCP_NONE;
843 }
844
write_ssp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)845 static RISCVException write_ssp(CPURISCVState *env, int csrno,
846 target_ulong val, uintptr_t ra)
847 {
848 env->ssp = val;
849 return RISCV_EXCP_NONE;
850 }
851
852 /* User Floating-Point CSRs */
read_fflags(CPURISCVState * env,int csrno,target_ulong * val)853 static RISCVException read_fflags(CPURISCVState *env, int csrno,
854 target_ulong *val)
855 {
856 *val = riscv_cpu_get_fflags(env);
857 return RISCV_EXCP_NONE;
858 }
859
write_fflags(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)860 static RISCVException write_fflags(CPURISCVState *env, int csrno,
861 target_ulong val, uintptr_t ra)
862 {
863 #if !defined(CONFIG_USER_ONLY)
864 if (riscv_has_ext(env, RVF)) {
865 env->mstatus |= MSTATUS_FS;
866 }
867 #endif
868 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
869 return RISCV_EXCP_NONE;
870 }
871
read_frm(CPURISCVState * env,int csrno,target_ulong * val)872 static RISCVException read_frm(CPURISCVState *env, int csrno,
873 target_ulong *val)
874 {
875 *val = env->frm;
876 return RISCV_EXCP_NONE;
877 }
878
write_frm(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)879 static RISCVException write_frm(CPURISCVState *env, int csrno,
880 target_ulong val, uintptr_t ra)
881 {
882 #if !defined(CONFIG_USER_ONLY)
883 if (riscv_has_ext(env, RVF)) {
884 env->mstatus |= MSTATUS_FS;
885 }
886 #endif
887 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
888 return RISCV_EXCP_NONE;
889 }
890
read_fcsr(CPURISCVState * env,int csrno,target_ulong * val)891 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
892 target_ulong *val)
893 {
894 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
895 | (env->frm << FSR_RD_SHIFT);
896 return RISCV_EXCP_NONE;
897 }
898
write_fcsr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)899 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
900 target_ulong val, uintptr_t ra)
901 {
902 #if !defined(CONFIG_USER_ONLY)
903 if (riscv_has_ext(env, RVF)) {
904 env->mstatus |= MSTATUS_FS;
905 }
906 #endif
907 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
908 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
909 return RISCV_EXCP_NONE;
910 }
911
read_vtype(CPURISCVState * env,int csrno,target_ulong * val)912 static RISCVException read_vtype(CPURISCVState *env, int csrno,
913 target_ulong *val)
914 {
915 uint64_t vill;
916 switch (env->xl) {
917 case MXL_RV32:
918 vill = (uint32_t)env->vill << 31;
919 break;
920 case MXL_RV64:
921 vill = (uint64_t)env->vill << 63;
922 break;
923 default:
924 g_assert_not_reached();
925 }
926 *val = (target_ulong)vill | env->vtype;
927 return RISCV_EXCP_NONE;
928 }
929
read_vl(CPURISCVState * env,int csrno,target_ulong * val)930 static RISCVException read_vl(CPURISCVState *env, int csrno,
931 target_ulong *val)
932 {
933 *val = env->vl;
934 return RISCV_EXCP_NONE;
935 }
936
read_vlenb(CPURISCVState * env,int csrno,target_ulong * val)937 static RISCVException read_vlenb(CPURISCVState *env, int csrno,
938 target_ulong *val)
939 {
940 *val = riscv_cpu_cfg(env)->vlenb;
941 return RISCV_EXCP_NONE;
942 }
943
read_vxrm(CPURISCVState * env,int csrno,target_ulong * val)944 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
945 target_ulong *val)
946 {
947 *val = env->vxrm;
948 return RISCV_EXCP_NONE;
949 }
950
write_vxrm(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)951 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
952 target_ulong val, uintptr_t ra)
953 {
954 #if !defined(CONFIG_USER_ONLY)
955 env->mstatus |= MSTATUS_VS;
956 #endif
957 env->vxrm = val;
958 return RISCV_EXCP_NONE;
959 }
960
read_vxsat(CPURISCVState * env,int csrno,target_ulong * val)961 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
962 target_ulong *val)
963 {
964 *val = env->vxsat & BIT(0);
965 return RISCV_EXCP_NONE;
966 }
967
write_vxsat(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)968 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
969 target_ulong val, uintptr_t ra)
970 {
971 #if !defined(CONFIG_USER_ONLY)
972 env->mstatus |= MSTATUS_VS;
973 #endif
974 env->vxsat = val & BIT(0);
975 return RISCV_EXCP_NONE;
976 }
977
read_vstart(CPURISCVState * env,int csrno,target_ulong * val)978 static RISCVException read_vstart(CPURISCVState *env, int csrno,
979 target_ulong *val)
980 {
981 *val = env->vstart;
982 return RISCV_EXCP_NONE;
983 }
984
write_vstart(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)985 static RISCVException write_vstart(CPURISCVState *env, int csrno,
986 target_ulong val, uintptr_t ra)
987 {
988 #if !defined(CONFIG_USER_ONLY)
989 env->mstatus |= MSTATUS_VS;
990 #endif
991 /*
992 * The vstart CSR is defined to have only enough writable bits
993 * to hold the largest element index, i.e. lg2(VLEN) bits.
994 */
995 env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3));
996 return RISCV_EXCP_NONE;
997 }
998
read_vcsr(CPURISCVState * env,int csrno,target_ulong * val)999 static RISCVException read_vcsr(CPURISCVState *env, int csrno,
1000 target_ulong *val)
1001 {
1002 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
1003 return RISCV_EXCP_NONE;
1004 }
1005
write_vcsr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1006 static RISCVException write_vcsr(CPURISCVState *env, int csrno,
1007 target_ulong val, uintptr_t ra)
1008 {
1009 #if !defined(CONFIG_USER_ONLY)
1010 env->mstatus |= MSTATUS_VS;
1011 #endif
1012 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
1013 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
1014 return RISCV_EXCP_NONE;
1015 }
1016
1017 #if defined(CONFIG_USER_ONLY)
1018 /* User Timers and Counters */
get_ticks(bool shift)1019 static target_ulong get_ticks(bool shift)
1020 {
1021 int64_t val = cpu_get_host_ticks();
1022 target_ulong result = shift ? val >> 32 : val;
1023
1024 return result;
1025 }
1026
read_time(CPURISCVState * env,int csrno,target_ulong * val)1027 static RISCVException read_time(CPURISCVState *env, int csrno,
1028 target_ulong *val)
1029 {
1030 *val = cpu_get_host_ticks();
1031 return RISCV_EXCP_NONE;
1032 }
1033
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1034 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1035 target_ulong *val)
1036 {
1037 *val = cpu_get_host_ticks() >> 32;
1038 return RISCV_EXCP_NONE;
1039 }
1040
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1041 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1042 target_ulong *val)
1043 {
1044 *val = get_ticks(false);
1045 return RISCV_EXCP_NONE;
1046 }
1047
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1048 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1049 target_ulong *val)
1050 {
1051 *val = get_ticks(true);
1052 return RISCV_EXCP_NONE;
1053 }
1054
1055 #else /* CONFIG_USER_ONLY */
1056
read_mcyclecfg(CPURISCVState * env,int csrno,target_ulong * val)1057 static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
1058 target_ulong *val)
1059 {
1060 *val = env->mcyclecfg;
1061 return RISCV_EXCP_NONE;
1062 }
1063
write_mcyclecfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1064 static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
1065 target_ulong val, uintptr_t ra)
1066 {
1067 uint64_t inh_avail_mask;
1068
1069 if (riscv_cpu_mxl(env) == MXL_RV32) {
1070 env->mcyclecfg = val;
1071 } else {
1072 /* Set xINH fields if priv mode supported */
1073 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
1074 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
1075 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
1076 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1077 riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
1078 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1079 riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
1080 env->mcyclecfg = val & inh_avail_mask;
1081 }
1082
1083 return RISCV_EXCP_NONE;
1084 }
1085
read_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong * val)1086 static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
1087 target_ulong *val)
1088 {
1089 *val = env->mcyclecfgh;
1090 return RISCV_EXCP_NONE;
1091 }
1092
write_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1093 static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
1094 target_ulong val, uintptr_t ra)
1095 {
1096 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1097 MCYCLECFGH_BIT_MINH);
1098
1099 /* Set xINH fields if priv mode supported */
1100 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
1101 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
1102 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1103 riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
1104 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1105 riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
1106
1107 env->mcyclecfgh = val & inh_avail_mask;
1108 return RISCV_EXCP_NONE;
1109 }
1110
read_minstretcfg(CPURISCVState * env,int csrno,target_ulong * val)1111 static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
1112 target_ulong *val)
1113 {
1114 *val = env->minstretcfg;
1115 return RISCV_EXCP_NONE;
1116 }
1117
write_minstretcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1118 static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
1119 target_ulong val, uintptr_t ra)
1120 {
1121 uint64_t inh_avail_mask;
1122
1123 if (riscv_cpu_mxl(env) == MXL_RV32) {
1124 env->minstretcfg = val;
1125 } else {
1126 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
1127 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
1128 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
1129 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1130 riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
1131 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1132 riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
1133 env->minstretcfg = val & inh_avail_mask;
1134 }
1135 return RISCV_EXCP_NONE;
1136 }
1137
read_minstretcfgh(CPURISCVState * env,int csrno,target_ulong * val)1138 static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
1139 target_ulong *val)
1140 {
1141 *val = env->minstretcfgh;
1142 return RISCV_EXCP_NONE;
1143 }
1144
write_minstretcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1145 static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
1146 target_ulong val, uintptr_t ra)
1147 {
1148 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1149 MINSTRETCFGH_BIT_MINH);
1150
1151 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
1152 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
1153 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1154 riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
1155 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1156 riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
1157
1158 env->minstretcfgh = val & inh_avail_mask;
1159 return RISCV_EXCP_NONE;
1160 }
1161
read_mhpmevent(CPURISCVState * env,int csrno,target_ulong * val)1162 static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
1163 target_ulong *val)
1164 {
1165 int evt_index = csrno - CSR_MCOUNTINHIBIT;
1166
1167 *val = env->mhpmevent_val[evt_index];
1168
1169 return RISCV_EXCP_NONE;
1170 }
1171
write_mhpmevent(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1172 static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
1173 target_ulong val, uintptr_t ra)
1174 {
1175 int evt_index = csrno - CSR_MCOUNTINHIBIT;
1176 uint64_t mhpmevt_val = val;
1177 uint64_t inh_avail_mask;
1178
1179 if (riscv_cpu_mxl(env) == MXL_RV32) {
1180 env->mhpmevent_val[evt_index] = val;
1181 mhpmevt_val = mhpmevt_val |
1182 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
1183 } else {
1184 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH;
1185 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0;
1186 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0;
1187 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1188 riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0;
1189 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1190 riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0;
1191 mhpmevt_val = val & inh_avail_mask;
1192 env->mhpmevent_val[evt_index] = mhpmevt_val;
1193 }
1194
1195 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1196
1197 return RISCV_EXCP_NONE;
1198 }
1199
read_mhpmeventh(CPURISCVState * env,int csrno,target_ulong * val)1200 static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
1201 target_ulong *val)
1202 {
1203 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1204
1205 *val = env->mhpmeventh_val[evt_index];
1206
1207 return RISCV_EXCP_NONE;
1208 }
1209
write_mhpmeventh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1210 static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
1211 target_ulong val, uintptr_t ra)
1212 {
1213 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1214 uint64_t mhpmevth_val;
1215 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1216 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1217 MHPMEVENTH_BIT_MINH);
1218
1219 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0;
1220 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0;
1221 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1222 riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0;
1223 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1224 riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0;
1225
1226 mhpmevth_val = val & inh_avail_mask;
1227 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1228 env->mhpmeventh_val[evt_index] = mhpmevth_val;
1229
1230 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1231
1232 return RISCV_EXCP_NONE;
1233 }
1234
riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState * env,int counter_idx,bool upper_half)1235 static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
1236 int counter_idx,
1237 bool upper_half)
1238 {
1239 int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
1240 uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
1241 uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
1242 target_ulong result = 0;
1243 uint64_t curr_val = 0;
1244 uint64_t cfg_val = 0;
1245
1246 if (counter_idx == 0) {
1247 cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
1248 env->mcyclecfg;
1249 } else if (counter_idx == 2) {
1250 cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
1251 env->minstretcfg;
1252 } else {
1253 cfg_val = upper_half ?
1254 ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
1255 env->mhpmevent_val[counter_idx];
1256 cfg_val &= MHPMEVENT_FILTER_MASK;
1257 }
1258
1259 if (!cfg_val) {
1260 if (icount_enabled()) {
1261 curr_val = inst ? icount_get_raw() : icount_get();
1262 } else {
1263 curr_val = cpu_get_host_ticks();
1264 }
1265
1266 goto done;
1267 }
1268
1269 /* Update counter before reading. */
1270 riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
1271
1272 if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
1273 curr_val += counter_arr[PRV_M];
1274 }
1275
1276 if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
1277 curr_val += counter_arr[PRV_S];
1278 }
1279
1280 if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
1281 curr_val += counter_arr[PRV_U];
1282 }
1283
1284 if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
1285 curr_val += counter_arr_virt[PRV_S];
1286 }
1287
1288 if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
1289 curr_val += counter_arr_virt[PRV_U];
1290 }
1291
1292 done:
1293 if (riscv_cpu_mxl(env) == MXL_RV32) {
1294 result = upper_half ? curr_val >> 32 : curr_val;
1295 } else {
1296 result = curr_val;
1297 }
1298
1299 return result;
1300 }
1301
riscv_pmu_write_ctr(CPURISCVState * env,target_ulong val,uint32_t ctr_idx)1302 static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
1303 uint32_t ctr_idx)
1304 {
1305 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1306 uint64_t mhpmctr_val = val;
1307
1308 counter->mhpmcounter_val = val;
1309 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1310 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1311 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1312 counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1313 ctr_idx, false);
1314 if (ctr_idx > 2) {
1315 if (riscv_cpu_mxl(env) == MXL_RV32) {
1316 mhpmctr_val = mhpmctr_val |
1317 ((uint64_t)counter->mhpmcounterh_val << 32);
1318 }
1319 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1320 }
1321 } else {
1322 /* Other counters can keep incrementing from the given value */
1323 counter->mhpmcounter_prev = val;
1324 }
1325
1326 return RISCV_EXCP_NONE;
1327 }
1328
riscv_pmu_write_ctrh(CPURISCVState * env,target_ulong val,uint32_t ctr_idx)1329 static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
1330 uint32_t ctr_idx)
1331 {
1332 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1333 uint64_t mhpmctr_val = counter->mhpmcounter_val;
1334 uint64_t mhpmctrh_val = val;
1335
1336 counter->mhpmcounterh_val = val;
1337 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
1338 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1339 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1340 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1341 counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1342 ctr_idx, true);
1343 if (ctr_idx > 2) {
1344 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1345 }
1346 } else {
1347 counter->mhpmcounterh_prev = val;
1348 }
1349
1350 return RISCV_EXCP_NONE;
1351 }
1352
write_mhpmcounter(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1353 static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
1354 target_ulong val, uintptr_t ra)
1355 {
1356 int ctr_idx = csrno - CSR_MCYCLE;
1357
1358 return riscv_pmu_write_ctr(env, val, ctr_idx);
1359 }
1360
write_mhpmcounterh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1361 static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
1362 target_ulong val, uintptr_t ra)
1363 {
1364 int ctr_idx = csrno - CSR_MCYCLEH;
1365
1366 return riscv_pmu_write_ctrh(env, val, ctr_idx);
1367 }
1368
riscv_pmu_read_ctr(CPURISCVState * env,target_ulong * val,bool upper_half,uint32_t ctr_idx)1369 RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
1370 bool upper_half, uint32_t ctr_idx)
1371 {
1372 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1373 target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev :
1374 counter->mhpmcounter_prev;
1375 target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
1376 counter->mhpmcounter_val;
1377
1378 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
1379 /*
1380 * Counter should not increment if inhibit bit is set. Just return the
1381 * current counter value.
1382 */
1383 *val = ctr_val;
1384 return RISCV_EXCP_NONE;
1385 }
1386
1387 /*
1388 * The kernel computes the perf delta by subtracting the current value from
1389 * the value it initialized previously (ctr_val).
1390 */
1391 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1392 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
1393 *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
1394 ctr_prev + ctr_val;
1395 } else {
1396 *val = ctr_val;
1397 }
1398
1399 return RISCV_EXCP_NONE;
1400 }
1401
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1402 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1403 target_ulong *val)
1404 {
1405 uint16_t ctr_index;
1406
1407 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
1408 ctr_index = csrno - CSR_MCYCLE;
1409 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
1410 ctr_index = csrno - CSR_CYCLE;
1411 } else {
1412 return RISCV_EXCP_ILLEGAL_INST;
1413 }
1414
1415 return riscv_pmu_read_ctr(env, val, false, ctr_index);
1416 }
1417
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1418 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1419 target_ulong *val)
1420 {
1421 uint16_t ctr_index;
1422
1423 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
1424 ctr_index = csrno - CSR_MCYCLEH;
1425 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
1426 ctr_index = csrno - CSR_CYCLEH;
1427 } else {
1428 return RISCV_EXCP_ILLEGAL_INST;
1429 }
1430
1431 return riscv_pmu_read_ctr(env, val, true, ctr_index);
1432 }
1433
rmw_cd_mhpmcounter(CPURISCVState * env,int ctr_idx,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1434 static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
1435 target_ulong *val, target_ulong new_val,
1436 target_ulong wr_mask)
1437 {
1438 if (wr_mask != 0 && wr_mask != -1) {
1439 return -EINVAL;
1440 }
1441
1442 if (!wr_mask && val) {
1443 riscv_pmu_read_ctr(env, val, false, ctr_idx);
1444 } else if (wr_mask) {
1445 riscv_pmu_write_ctr(env, new_val, ctr_idx);
1446 } else {
1447 return -EINVAL;
1448 }
1449
1450 return 0;
1451 }
1452
rmw_cd_mhpmcounterh(CPURISCVState * env,int ctr_idx,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1453 static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
1454 target_ulong *val, target_ulong new_val,
1455 target_ulong wr_mask)
1456 {
1457 if (wr_mask != 0 && wr_mask != -1) {
1458 return -EINVAL;
1459 }
1460
1461 if (!wr_mask && val) {
1462 riscv_pmu_read_ctr(env, val, true, ctr_idx);
1463 } else if (wr_mask) {
1464 riscv_pmu_write_ctrh(env, new_val, ctr_idx);
1465 } else {
1466 return -EINVAL;
1467 }
1468
1469 return 0;
1470 }
1471
rmw_cd_mhpmevent(CPURISCVState * env,int evt_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1472 static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
1473 target_ulong *val, target_ulong new_val,
1474 target_ulong wr_mask)
1475 {
1476 uint64_t mhpmevt_val = new_val;
1477
1478 if (wr_mask != 0 && wr_mask != -1) {
1479 return -EINVAL;
1480 }
1481
1482 if (!wr_mask && val) {
1483 *val = env->mhpmevent_val[evt_index];
1484 if (riscv_cpu_cfg(env)->ext_sscofpmf) {
1485 *val &= ~MHPMEVENT_BIT_MINH;
1486 }
1487 } else if (wr_mask) {
1488 wr_mask &= ~MHPMEVENT_BIT_MINH;
1489 mhpmevt_val = (new_val & wr_mask) |
1490 (env->mhpmevent_val[evt_index] & ~wr_mask);
1491 if (riscv_cpu_mxl(env) == MXL_RV32) {
1492 mhpmevt_val = mhpmevt_val |
1493 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
1494 }
1495 env->mhpmevent_val[evt_index] = mhpmevt_val;
1496 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1497 } else {
1498 return -EINVAL;
1499 }
1500
1501 return 0;
1502 }
1503
rmw_cd_mhpmeventh(CPURISCVState * env,int evt_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1504 static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
1505 target_ulong *val, target_ulong new_val,
1506 target_ulong wr_mask)
1507 {
1508 uint64_t mhpmevth_val;
1509 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1510
1511 if (wr_mask != 0 && wr_mask != -1) {
1512 return -EINVAL;
1513 }
1514
1515 if (!wr_mask && val) {
1516 *val = env->mhpmeventh_val[evt_index];
1517 if (riscv_cpu_cfg(env)->ext_sscofpmf) {
1518 *val &= ~MHPMEVENTH_BIT_MINH;
1519 }
1520 } else if (wr_mask) {
1521 wr_mask &= ~MHPMEVENTH_BIT_MINH;
1522 env->mhpmeventh_val[evt_index] =
1523 (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
1524 mhpmevth_val = env->mhpmeventh_val[evt_index];
1525 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1526 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1527 } else {
1528 return -EINVAL;
1529 }
1530
1531 return 0;
1532 }
1533
rmw_cd_ctr_cfg(CPURISCVState * env,int cfg_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1534 static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
1535 target_ulong new_val, target_ulong wr_mask)
1536 {
1537 switch (cfg_index) {
1538 case 0: /* CYCLECFG */
1539 if (wr_mask) {
1540 wr_mask &= ~MCYCLECFG_BIT_MINH;
1541 env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
1542 } else {
1543 *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
1544 }
1545 break;
1546 case 2: /* INSTRETCFG */
1547 if (wr_mask) {
1548 wr_mask &= ~MINSTRETCFG_BIT_MINH;
1549 env->minstretcfg = (new_val & wr_mask) |
1550 (env->minstretcfg & ~wr_mask);
1551 } else {
1552 *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
1553 }
1554 break;
1555 default:
1556 return -EINVAL;
1557 }
1558 return 0;
1559 }
1560
rmw_cd_ctr_cfgh(CPURISCVState * env,int cfg_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1561 static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
1562 target_ulong new_val, target_ulong wr_mask)
1563 {
1564
1565 if (riscv_cpu_mxl(env) != MXL_RV32) {
1566 return RISCV_EXCP_ILLEGAL_INST;
1567 }
1568
1569 switch (cfg_index) {
1570 case 0: /* CYCLECFGH */
1571 if (wr_mask) {
1572 wr_mask &= ~MCYCLECFGH_BIT_MINH;
1573 env->mcyclecfgh = (new_val & wr_mask) |
1574 (env->mcyclecfgh & ~wr_mask);
1575 } else {
1576 *val = env->mcyclecfgh;
1577 }
1578 break;
1579 case 2: /* INSTRETCFGH */
1580 if (wr_mask) {
1581 wr_mask &= ~MINSTRETCFGH_BIT_MINH;
1582 env->minstretcfgh = (new_val & wr_mask) |
1583 (env->minstretcfgh & ~wr_mask);
1584 } else {
1585 *val = env->minstretcfgh;
1586 }
1587 break;
1588 default:
1589 return -EINVAL;
1590 }
1591 return 0;
1592 }
1593
1594
read_scountovf(CPURISCVState * env,int csrno,target_ulong * val)1595 static RISCVException read_scountovf(CPURISCVState *env, int csrno,
1596 target_ulong *val)
1597 {
1598 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
1599 int i;
1600 *val = 0;
1601 target_ulong *mhpm_evt_val;
1602 uint64_t of_bit_mask;
1603
1604 /* Virtualize scountovf for counter delegation */
1605 if (riscv_cpu_cfg(env)->ext_sscofpmf &&
1606 riscv_cpu_cfg(env)->ext_ssccfg &&
1607 get_field(env->menvcfg, MENVCFG_CDE) &&
1608 env->virt_enabled) {
1609 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1610 }
1611
1612 if (riscv_cpu_mxl(env) == MXL_RV32) {
1613 mhpm_evt_val = env->mhpmeventh_val;
1614 of_bit_mask = MHPMEVENTH_BIT_OF;
1615 } else {
1616 mhpm_evt_val = env->mhpmevent_val;
1617 of_bit_mask = MHPMEVENT_BIT_OF;
1618 }
1619
1620 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
1621 if ((get_field(env->mcounteren, BIT(i))) &&
1622 (mhpm_evt_val[i] & of_bit_mask)) {
1623 *val |= BIT(i);
1624 }
1625 }
1626
1627 return RISCV_EXCP_NONE;
1628 }
1629
read_time(CPURISCVState * env,int csrno,target_ulong * val)1630 static RISCVException read_time(CPURISCVState *env, int csrno,
1631 target_ulong *val)
1632 {
1633 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1634
1635 if (!env->rdtime_fn) {
1636 return RISCV_EXCP_ILLEGAL_INST;
1637 }
1638
1639 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
1640 return RISCV_EXCP_NONE;
1641 }
1642
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1643 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1644 target_ulong *val)
1645 {
1646 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1647
1648 if (!env->rdtime_fn) {
1649 return RISCV_EXCP_ILLEGAL_INST;
1650 }
1651
1652 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
1653 return RISCV_EXCP_NONE;
1654 }
1655
read_vstimecmp(CPURISCVState * env,int csrno,target_ulong * val)1656 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
1657 target_ulong *val)
1658 {
1659 *val = env->vstimecmp;
1660
1661 return RISCV_EXCP_NONE;
1662 }
1663
read_vstimecmph(CPURISCVState * env,int csrno,target_ulong * val)1664 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
1665 target_ulong *val)
1666 {
1667 *val = env->vstimecmp >> 32;
1668
1669 return RISCV_EXCP_NONE;
1670 }
1671
write_vstimecmp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1672 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
1673 target_ulong val, uintptr_t ra)
1674 {
1675 if (riscv_cpu_mxl(env) == MXL_RV32) {
1676 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
1677 } else {
1678 env->vstimecmp = val;
1679 }
1680
1681 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1682 env->htimedelta, MIP_VSTIP);
1683
1684 return RISCV_EXCP_NONE;
1685 }
1686
write_vstimecmph(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1687 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
1688 target_ulong val, uintptr_t ra)
1689 {
1690 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1691 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1692 env->htimedelta, MIP_VSTIP);
1693
1694 return RISCV_EXCP_NONE;
1695 }
1696
read_stimecmp(CPURISCVState * env,int csrno,target_ulong * val)1697 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1698 target_ulong *val)
1699 {
1700 if (env->virt_enabled) {
1701 *val = env->vstimecmp;
1702 } else {
1703 *val = env->stimecmp;
1704 }
1705
1706 return RISCV_EXCP_NONE;
1707 }
1708
read_stimecmph(CPURISCVState * env,int csrno,target_ulong * val)1709 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1710 target_ulong *val)
1711 {
1712 if (env->virt_enabled) {
1713 *val = env->vstimecmp >> 32;
1714 } else {
1715 *val = env->stimecmp >> 32;
1716 }
1717
1718 return RISCV_EXCP_NONE;
1719 }
1720
write_stimecmp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1721 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1722 target_ulong val, uintptr_t ra)
1723 {
1724 if (env->virt_enabled) {
1725 if (env->hvictl & HVICTL_VTI) {
1726 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1727 }
1728 return write_vstimecmp(env, csrno, val, ra);
1729 }
1730
1731 if (riscv_cpu_mxl(env) == MXL_RV32) {
1732 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1733 } else {
1734 env->stimecmp = val;
1735 }
1736
1737 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1738
1739 return RISCV_EXCP_NONE;
1740 }
1741
write_stimecmph(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1742 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1743 target_ulong val, uintptr_t ra)
1744 {
1745 if (env->virt_enabled) {
1746 if (env->hvictl & HVICTL_VTI) {
1747 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1748 }
1749 return write_vstimecmph(env, csrno, val, ra);
1750 }
1751
1752 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1753 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1754
1755 return RISCV_EXCP_NONE;
1756 }
1757
1758 #define VSTOPI_NUM_SRCS 5
1759
1760 /*
1761 * All core local interrupts except the fixed ones 0:12. This macro is for
1762 * virtual interrupts logic so please don't change this to avoid messing up
1763 * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
1764 * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
1765 * VS level`.
1766 */
1767 #define LOCAL_INTERRUPTS (~0x1FFFULL)
1768
1769 static const uint64_t delegable_ints =
1770 S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
1771 static const uint64_t vs_delegable_ints =
1772 (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
1773 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1774 HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
1775 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1776 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1777 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1778 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1779 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1780 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1781 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1782 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1783 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1784 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1785 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1786 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1787 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1788 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1789 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1790 (1ULL << (RISCV_EXCP_SW_CHECK)) | \
1791 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1792 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1793 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1794 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1795 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1796 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1797 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1798 (1ULL << (RISCV_EXCP_M_ECALL)) |
1799 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1800 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1801 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1802 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1803 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1804 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1805 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1806
1807 /*
1808 * Spec allows for bits 13:63 to be either read-only or writable.
1809 * So far we have interrupt LCOFIP in that region which is writable.
1810 *
1811 * Also, spec allows to inject virtual interrupts in this region even
1812 * without any hardware interrupts for that interrupt number.
1813 *
1814 * For now interrupt in 13:63 region are all kept writable. 13 being
1815 * LCOFIP and 14:63 being virtual only. Change this in future if we
1816 * introduce more interrupts that are not writable.
1817 */
1818
1819 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1820 static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
1821 LOCAL_INTERRUPTS;
1822 static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP |
1823 LOCAL_INTERRUPTS;
1824
1825 static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
1826 static const uint64_t hip_writable_mask = MIP_VSSIP;
1827 static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
1828 MIP_VSEIP | LOCAL_INTERRUPTS;
1829 static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS;
1830
1831 static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
1832
1833 const bool valid_vm_1_10_32[16] = {
1834 [VM_1_10_MBARE] = true,
1835 [VM_1_10_SV32] = true
1836 };
1837
1838 const bool valid_vm_1_10_64[16] = {
1839 [VM_1_10_MBARE] = true,
1840 [VM_1_10_SV39] = true,
1841 [VM_1_10_SV48] = true,
1842 [VM_1_10_SV57] = true
1843 };
1844
1845 /* Machine Information Registers */
read_zero(CPURISCVState * env,int csrno,target_ulong * val)1846 static RISCVException read_zero(CPURISCVState *env, int csrno,
1847 target_ulong *val)
1848 {
1849 *val = 0;
1850 return RISCV_EXCP_NONE;
1851 }
1852
write_ignore(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1853 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1854 target_ulong val, uintptr_t ra)
1855 {
1856 return RISCV_EXCP_NONE;
1857 }
1858
read_mvendorid(CPURISCVState * env,int csrno,target_ulong * val)1859 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1860 target_ulong *val)
1861 {
1862 *val = riscv_cpu_cfg(env)->mvendorid;
1863 return RISCV_EXCP_NONE;
1864 }
1865
read_marchid(CPURISCVState * env,int csrno,target_ulong * val)1866 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1867 target_ulong *val)
1868 {
1869 *val = riscv_cpu_cfg(env)->marchid;
1870 return RISCV_EXCP_NONE;
1871 }
1872
read_mimpid(CPURISCVState * env,int csrno,target_ulong * val)1873 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1874 target_ulong *val)
1875 {
1876 *val = riscv_cpu_cfg(env)->mimpid;
1877 return RISCV_EXCP_NONE;
1878 }
1879
read_mhartid(CPURISCVState * env,int csrno,target_ulong * val)1880 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1881 target_ulong *val)
1882 {
1883 *val = env->mhartid;
1884 return RISCV_EXCP_NONE;
1885 }
1886
1887 /* Machine Trap Setup */
1888
1889 /* We do not store SD explicitly, only compute it on demand. */
add_status_sd(RISCVMXL xl,uint64_t status)1890 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1891 {
1892 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1893 (status & MSTATUS_VS) == MSTATUS_VS ||
1894 (status & MSTATUS_XS) == MSTATUS_XS) {
1895 switch (xl) {
1896 case MXL_RV32:
1897 return status | MSTATUS32_SD;
1898 case MXL_RV64:
1899 return status | MSTATUS64_SD;
1900 case MXL_RV128:
1901 return MSTATUSH128_SD;
1902 default:
1903 g_assert_not_reached();
1904 }
1905 }
1906 return status;
1907 }
1908
read_mstatus(CPURISCVState * env,int csrno,target_ulong * val)1909 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1910 target_ulong *val)
1911 {
1912 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1913 return RISCV_EXCP_NONE;
1914 }
1915
validate_vm(CPURISCVState * env,target_ulong vm)1916 static bool validate_vm(CPURISCVState *env, target_ulong vm)
1917 {
1918 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32;
1919 RISCVCPU *cpu = env_archcpu(env);
1920 int satp_mode_supported_max = cpu->cfg.max_satp_mode;
1921 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
1922
1923 assert(satp_mode_supported_max >= 0);
1924 return vm <= satp_mode_supported_max && valid_vm[vm];
1925 }
1926
legalize_xatp(CPURISCVState * env,target_ulong old_xatp,target_ulong val)1927 static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
1928 target_ulong val)
1929 {
1930 target_ulong mask;
1931 bool vm;
1932 if (riscv_cpu_mxl(env) == MXL_RV32) {
1933 vm = validate_vm(env, get_field(val, SATP32_MODE));
1934 mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1935 } else {
1936 vm = validate_vm(env, get_field(val, SATP64_MODE));
1937 mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1938 }
1939
1940 if (vm && mask) {
1941 /*
1942 * The ISA defines SATP.MODE=Bare as "no translation", but we still
1943 * pass these through QEMU's TLB emulation as it improves
1944 * performance. Flushing the TLB on SATP writes with paging
1945 * enabled avoids leaking those invalid cached mappings.
1946 */
1947 tlb_flush(env_cpu(env));
1948 return val;
1949 }
1950 return old_xatp;
1951 }
1952
legalize_mpp(CPURISCVState * env,target_ulong old_mpp,target_ulong val)1953 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
1954 target_ulong val)
1955 {
1956 bool valid = false;
1957 target_ulong new_mpp = get_field(val, MSTATUS_MPP);
1958
1959 switch (new_mpp) {
1960 case PRV_M:
1961 valid = true;
1962 break;
1963 case PRV_S:
1964 valid = riscv_has_ext(env, RVS);
1965 break;
1966 case PRV_U:
1967 valid = riscv_has_ext(env, RVU);
1968 break;
1969 }
1970
1971 /* Remain field unchanged if new_mpp value is invalid */
1972 if (!valid) {
1973 val = set_field(val, MSTATUS_MPP, old_mpp);
1974 }
1975
1976 return val;
1977 }
1978
write_mstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1979 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1980 target_ulong val, uintptr_t ra)
1981 {
1982 uint64_t mstatus = env->mstatus;
1983 uint64_t mask = 0;
1984 RISCVMXL xl = riscv_cpu_mxl(env);
1985
1986 /*
1987 * MPP field have been made WARL since priv version 1.11. However,
1988 * legalization for it will not break any software running on 1.10.
1989 */
1990 val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val);
1991
1992 /* flush tlb on mstatus fields that affect VM */
1993 if ((val ^ mstatus) & MSTATUS_MXR) {
1994 tlb_flush(env_cpu(env));
1995 }
1996 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1997 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1998 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1999 MSTATUS_TW;
2000
2001 if (riscv_has_ext(env, RVF)) {
2002 mask |= MSTATUS_FS;
2003 }
2004 if (riscv_has_ext(env, RVV)) {
2005 mask |= MSTATUS_VS;
2006 }
2007
2008 if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) {
2009 mask |= MSTATUS_SDT;
2010 if ((val & MSTATUS_SDT) != 0) {
2011 val &= ~MSTATUS_SIE;
2012 }
2013 }
2014
2015 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
2016 mask |= MSTATUS_MDT;
2017 if ((val & MSTATUS_MDT) != 0) {
2018 val &= ~MSTATUS_MIE;
2019 }
2020 }
2021
2022 if (xl != MXL_RV32 || env->debugger) {
2023 if (riscv_has_ext(env, RVH)) {
2024 mask |= MSTATUS_MPV | MSTATUS_GVA;
2025 }
2026 if ((val & MSTATUS64_UXL) != 0) {
2027 mask |= MSTATUS64_UXL;
2028 }
2029 }
2030
2031 /* If cfi lp extension is available, then apply cfi lp mask */
2032 if (env_archcpu(env)->cfg.ext_zicfilp) {
2033 mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
2034 }
2035
2036 mstatus = (mstatus & ~mask) | (val & mask);
2037
2038 env->mstatus = mstatus;
2039
2040 /*
2041 * Except in debug mode, UXL/SXL can only be modified by higher
2042 * privilege mode. So xl will not be changed in normal mode.
2043 */
2044 if (env->debugger) {
2045 env->xl = cpu_recompute_xl(env);
2046 }
2047
2048 return RISCV_EXCP_NONE;
2049 }
2050
read_mstatush(CPURISCVState * env,int csrno,target_ulong * val)2051 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
2052 target_ulong *val)
2053 {
2054 *val = env->mstatus >> 32;
2055 return RISCV_EXCP_NONE;
2056 }
2057
write_mstatush(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2058 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
2059 target_ulong val, uintptr_t ra)
2060 {
2061 uint64_t valh = (uint64_t)val << 32;
2062 uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
2063
2064 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
2065 mask |= MSTATUS_MDT;
2066 if ((valh & MSTATUS_MDT) != 0) {
2067 mask |= MSTATUS_MIE;
2068 }
2069 }
2070 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
2071
2072 return RISCV_EXCP_NONE;
2073 }
2074
read_mstatus_i128(CPURISCVState * env,int csrno,Int128 * val)2075 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
2076 Int128 *val)
2077 {
2078 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128,
2079 env->mstatus));
2080 return RISCV_EXCP_NONE;
2081 }
2082
read_misa_i128(CPURISCVState * env,int csrno,Int128 * val)2083 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
2084 Int128 *val)
2085 {
2086 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
2087 return RISCV_EXCP_NONE;
2088 }
2089
read_misa(CPURISCVState * env,int csrno,target_ulong * val)2090 static RISCVException read_misa(CPURISCVState *env, int csrno,
2091 target_ulong *val)
2092 {
2093 target_ulong misa;
2094
2095 switch (env->misa_mxl) {
2096 case MXL_RV32:
2097 misa = (target_ulong)MXL_RV32 << 30;
2098 break;
2099 #ifdef TARGET_RISCV64
2100 case MXL_RV64:
2101 misa = (target_ulong)MXL_RV64 << 62;
2102 break;
2103 #endif
2104 default:
2105 g_assert_not_reached();
2106 }
2107
2108 *val = misa | env->misa_ext;
2109 return RISCV_EXCP_NONE;
2110 }
2111
get_next_pc(CPURISCVState * env,uintptr_t ra)2112 static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra)
2113 {
2114 uint64_t data[INSN_START_WORDS];
2115
2116 /* Outside of a running cpu, env contains the next pc. */
2117 if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) {
2118 return env->pc;
2119 }
2120
2121 /* Within unwind data, [0] is pc and [1] is the opcode. */
2122 return data[0] + insn_len(data[1]);
2123 }
2124
write_misa(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2125 static RISCVException write_misa(CPURISCVState *env, int csrno,
2126 target_ulong val, uintptr_t ra)
2127 {
2128 RISCVCPU *cpu = env_archcpu(env);
2129 uint32_t orig_misa_ext = env->misa_ext;
2130 Error *local_err = NULL;
2131
2132 if (!riscv_cpu_cfg(env)->misa_w) {
2133 /* drop write to misa */
2134 return RISCV_EXCP_NONE;
2135 }
2136
2137 /* Mask extensions that are not supported by this hart */
2138 val &= env->misa_ext_mask;
2139
2140 /* Suppress 'C' if next instruction is not aligned. */
2141 if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) {
2142 val &= ~RVC;
2143 }
2144
2145 /* Disable RVG if any of its dependencies are disabled */
2146 if (!(val & RVI && val & RVM && val & RVA &&
2147 val & RVF && val & RVD)) {
2148 val &= ~RVG;
2149 }
2150
2151 /* If nothing changed, do nothing. */
2152 if (val == env->misa_ext) {
2153 return RISCV_EXCP_NONE;
2154 }
2155
2156 env->misa_ext = val;
2157 riscv_cpu_validate_set_extensions(cpu, &local_err);
2158 if (local_err != NULL) {
2159 /* Rollback on validation error */
2160 qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
2161 "0x%x, keeping existing MISA ext 0x%x\n",
2162 env->misa_ext, orig_misa_ext);
2163
2164 env->misa_ext = orig_misa_ext;
2165
2166 return RISCV_EXCP_NONE;
2167 }
2168
2169 if (!(env->misa_ext & RVF)) {
2170 env->mstatus &= ~MSTATUS_FS;
2171 }
2172
2173 /* flush translation cache */
2174 tb_flush(env_cpu(env));
2175 env->xl = riscv_cpu_mxl(env);
2176 return RISCV_EXCP_NONE;
2177 }
2178
read_medeleg(CPURISCVState * env,int csrno,target_ulong * val)2179 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
2180 target_ulong *val)
2181 {
2182 *val = env->medeleg;
2183 return RISCV_EXCP_NONE;
2184 }
2185
write_medeleg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2186 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
2187 target_ulong val, uintptr_t ra)
2188 {
2189 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
2190 return RISCV_EXCP_NONE;
2191 }
2192
rmw_mideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2193 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
2194 uint64_t *ret_val,
2195 uint64_t new_val, uint64_t wr_mask)
2196 {
2197 uint64_t mask = wr_mask & delegable_ints;
2198
2199 if (ret_val) {
2200 *ret_val = env->mideleg;
2201 }
2202
2203 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
2204
2205 if (riscv_has_ext(env, RVH)) {
2206 env->mideleg |= HS_MODE_INTERRUPTS;
2207 }
2208
2209 return RISCV_EXCP_NONE;
2210 }
2211
rmw_mideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2212 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
2213 target_ulong *ret_val,
2214 target_ulong new_val, target_ulong wr_mask)
2215 {
2216 uint64_t rval;
2217 RISCVException ret;
2218
2219 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
2220 if (ret_val) {
2221 *ret_val = rval;
2222 }
2223
2224 return ret;
2225 }
2226
rmw_midelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2227 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
2228 target_ulong *ret_val,
2229 target_ulong new_val,
2230 target_ulong wr_mask)
2231 {
2232 uint64_t rval;
2233 RISCVException ret;
2234
2235 ret = rmw_mideleg64(env, csrno, &rval,
2236 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2237 if (ret_val) {
2238 *ret_val = rval >> 32;
2239 }
2240
2241 return ret;
2242 }
2243
rmw_mie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2244 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
2245 uint64_t *ret_val,
2246 uint64_t new_val, uint64_t wr_mask)
2247 {
2248 uint64_t mask = wr_mask & all_ints;
2249
2250 if (ret_val) {
2251 *ret_val = env->mie;
2252 }
2253
2254 env->mie = (env->mie & ~mask) | (new_val & mask);
2255
2256 if (!riscv_has_ext(env, RVH)) {
2257 env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
2258 }
2259
2260 return RISCV_EXCP_NONE;
2261 }
2262
rmw_mie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2263 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
2264 target_ulong *ret_val,
2265 target_ulong new_val, target_ulong wr_mask)
2266 {
2267 uint64_t rval;
2268 RISCVException ret;
2269
2270 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
2271 if (ret_val) {
2272 *ret_val = rval;
2273 }
2274
2275 return ret;
2276 }
2277
rmw_mieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2278 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
2279 target_ulong *ret_val,
2280 target_ulong new_val, target_ulong wr_mask)
2281 {
2282 uint64_t rval;
2283 RISCVException ret;
2284
2285 ret = rmw_mie64(env, csrno, &rval,
2286 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2287 if (ret_val) {
2288 *ret_val = rval >> 32;
2289 }
2290
2291 return ret;
2292 }
2293
rmw_mvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2294 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
2295 uint64_t *ret_val,
2296 uint64_t new_val, uint64_t wr_mask)
2297 {
2298 uint64_t mask = wr_mask & mvien_writable_mask;
2299
2300 if (ret_val) {
2301 *ret_val = env->mvien;
2302 }
2303
2304 env->mvien = (env->mvien & ~mask) | (new_val & mask);
2305
2306 return RISCV_EXCP_NONE;
2307 }
2308
rmw_mvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2309 static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
2310 target_ulong *ret_val,
2311 target_ulong new_val, target_ulong wr_mask)
2312 {
2313 uint64_t rval;
2314 RISCVException ret;
2315
2316 ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
2317 if (ret_val) {
2318 *ret_val = rval;
2319 }
2320
2321 return ret;
2322 }
2323
rmw_mvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2324 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
2325 target_ulong *ret_val,
2326 target_ulong new_val, target_ulong wr_mask)
2327 {
2328 uint64_t rval;
2329 RISCVException ret;
2330
2331 ret = rmw_mvien64(env, csrno, &rval,
2332 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2333 if (ret_val) {
2334 *ret_val = rval >> 32;
2335 }
2336
2337 return ret;
2338 }
2339
read_mtopi(CPURISCVState * env,int csrno,target_ulong * val)2340 static RISCVException read_mtopi(CPURISCVState *env, int csrno,
2341 target_ulong *val)
2342 {
2343 int irq;
2344 uint8_t iprio;
2345
2346 irq = riscv_cpu_mirq_pending(env);
2347 if (irq <= 0 || irq > 63) {
2348 *val = 0;
2349 } else {
2350 iprio = env->miprio[irq];
2351 if (!iprio) {
2352 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
2353 iprio = IPRIO_MMAXIPRIO;
2354 }
2355 }
2356 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2357 *val |= iprio;
2358 }
2359
2360 return RISCV_EXCP_NONE;
2361 }
2362
aia_xlate_vs_csrno(CPURISCVState * env,int csrno)2363 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
2364 {
2365 if (!env->virt_enabled) {
2366 return csrno;
2367 }
2368
2369 switch (csrno) {
2370 case CSR_SISELECT:
2371 return CSR_VSISELECT;
2372 case CSR_SIREG:
2373 return CSR_VSIREG;
2374 case CSR_STOPEI:
2375 return CSR_VSTOPEI;
2376 default:
2377 return csrno;
2378 };
2379 }
2380
csrind_xlate_vs_csrno(CPURISCVState * env,int csrno)2381 static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
2382 {
2383 if (!env->virt_enabled) {
2384 return csrno;
2385 }
2386
2387 switch (csrno) {
2388 case CSR_SISELECT:
2389 return CSR_VSISELECT;
2390 case CSR_SIREG:
2391 case CSR_SIREG2:
2392 case CSR_SIREG3:
2393 case CSR_SIREG4:
2394 case CSR_SIREG5:
2395 case CSR_SIREG6:
2396 return CSR_VSIREG + (csrno - CSR_SIREG);
2397 default:
2398 return csrno;
2399 };
2400 }
2401
rmw_xiselect(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2402 static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
2403 target_ulong *val, target_ulong new_val,
2404 target_ulong wr_mask)
2405 {
2406 target_ulong *iselect;
2407 int ret;
2408
2409 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2410 if (ret != RISCV_EXCP_NONE) {
2411 return ret;
2412 }
2413
2414 /* Translate CSR number for VS-mode */
2415 csrno = csrind_xlate_vs_csrno(env, csrno);
2416
2417 /* Find the iselect CSR based on CSR number */
2418 switch (csrno) {
2419 case CSR_MISELECT:
2420 iselect = &env->miselect;
2421 break;
2422 case CSR_SISELECT:
2423 iselect = &env->siselect;
2424 break;
2425 case CSR_VSISELECT:
2426 iselect = &env->vsiselect;
2427 break;
2428 default:
2429 return RISCV_EXCP_ILLEGAL_INST;
2430 };
2431
2432 if (val) {
2433 *val = *iselect;
2434 }
2435
2436 if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) {
2437 wr_mask &= ISELECT_MASK_SXCSRIND;
2438 } else {
2439 wr_mask &= ISELECT_MASK_AIA;
2440 }
2441
2442 if (wr_mask) {
2443 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
2444 }
2445
2446 return RISCV_EXCP_NONE;
2447 }
2448
xiselect_aia_range(target_ulong isel)2449 static bool xiselect_aia_range(target_ulong isel)
2450 {
2451 return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) ||
2452 (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
2453 }
2454
xiselect_cd_range(target_ulong isel)2455 static bool xiselect_cd_range(target_ulong isel)
2456 {
2457 return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
2458 }
2459
xiselect_ctr_range(int csrno,target_ulong isel)2460 static bool xiselect_ctr_range(int csrno, target_ulong isel)
2461 {
2462 /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
2463 return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
2464 csrno < CSR_MIREG;
2465 }
2466
rmw_iprio(target_ulong xlen,target_ulong iselect,uint8_t * iprio,target_ulong * val,target_ulong new_val,target_ulong wr_mask,int ext_irq_no)2467 static int rmw_iprio(target_ulong xlen,
2468 target_ulong iselect, uint8_t *iprio,
2469 target_ulong *val, target_ulong new_val,
2470 target_ulong wr_mask, int ext_irq_no)
2471 {
2472 int i, firq, nirqs;
2473 target_ulong old_val;
2474
2475 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
2476 return -EINVAL;
2477 }
2478 if (xlen != 32 && iselect & 0x1) {
2479 return -EINVAL;
2480 }
2481
2482 nirqs = 4 * (xlen / 32);
2483 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
2484
2485 old_val = 0;
2486 for (i = 0; i < nirqs; i++) {
2487 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
2488 }
2489
2490 if (val) {
2491 *val = old_val;
2492 }
2493
2494 if (wr_mask) {
2495 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
2496 for (i = 0; i < nirqs; i++) {
2497 /*
2498 * M-level and S-level external IRQ priority always read-only
2499 * zero. This means default priority order is always preferred
2500 * for M-level and S-level external IRQs.
2501 */
2502 if ((firq + i) == ext_irq_no) {
2503 continue;
2504 }
2505 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
2506 }
2507 }
2508
2509 return 0;
2510 }
2511
rmw_ctrsource(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2512 static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
2513 target_ulong new_val, target_ulong wr_mask)
2514 {
2515 /*
2516 * CTR arrays are treated as circular buffers and TOS always points to next
2517 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2518 * 0 is always the latest one, traversal is a bit different here. See the
2519 * below example.
2520 *
2521 * Depth = 16.
2522 *
2523 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2524 * TOS H
2525 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2526 */
2527 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2528 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2529 uint64_t idx;
2530
2531 /* Entry greater than depth-1 is read-only zero */
2532 if (entry >= depth) {
2533 if (val) {
2534 *val = 0;
2535 }
2536 return 0;
2537 }
2538
2539 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2540 idx = (idx - entry - 1) & (depth - 1);
2541
2542 if (val) {
2543 *val = env->ctr_src[idx];
2544 }
2545
2546 env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
2547
2548 return 0;
2549 }
2550
rmw_ctrtarget(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2551 static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
2552 target_ulong new_val, target_ulong wr_mask)
2553 {
2554 /*
2555 * CTR arrays are treated as circular buffers and TOS always points to next
2556 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2557 * 0 is always the latest one, traversal is a bit different here. See the
2558 * below example.
2559 *
2560 * Depth = 16.
2561 *
2562 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2563 * head H
2564 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2565 */
2566 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2567 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2568 uint64_t idx;
2569
2570 /* Entry greater than depth-1 is read-only zero */
2571 if (entry >= depth) {
2572 if (val) {
2573 *val = 0;
2574 }
2575 return 0;
2576 }
2577
2578 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2579 idx = (idx - entry - 1) & (depth - 1);
2580
2581 if (val) {
2582 *val = env->ctr_dst[idx];
2583 }
2584
2585 env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
2586
2587 return 0;
2588 }
2589
rmw_ctrdata(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2590 static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
2591 target_ulong new_val, target_ulong wr_mask)
2592 {
2593 /*
2594 * CTR arrays are treated as circular buffers and TOS always points to next
2595 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2596 * 0 is always the latest one, traversal is a bit different here. See the
2597 * below example.
2598 *
2599 * Depth = 16.
2600 *
2601 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2602 * head H
2603 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2604 */
2605 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2606 const uint64_t mask = wr_mask & CTRDATA_MASK;
2607 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2608 uint64_t idx;
2609
2610 /* Entry greater than depth-1 is read-only zero */
2611 if (entry >= depth) {
2612 if (val) {
2613 *val = 0;
2614 }
2615 return 0;
2616 }
2617
2618 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2619 idx = (idx - entry - 1) & (depth - 1);
2620
2621 if (val) {
2622 *val = env->ctr_data[idx];
2623 }
2624
2625 env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
2626
2627 return 0;
2628 }
2629
rmw_xireg_aia(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2630 static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
2631 target_ulong isel, target_ulong *val,
2632 target_ulong new_val, target_ulong wr_mask)
2633 {
2634 bool virt = false, isel_reserved = false;
2635 int ret = -EINVAL;
2636 uint8_t *iprio;
2637 target_ulong priv, vgein;
2638
2639 /* VS-mode CSR number passed in has already been translated */
2640 switch (csrno) {
2641 case CSR_MIREG:
2642 if (!riscv_cpu_cfg(env)->ext_smaia) {
2643 goto done;
2644 }
2645 iprio = env->miprio;
2646 priv = PRV_M;
2647 break;
2648 case CSR_SIREG:
2649 if (!riscv_cpu_cfg(env)->ext_ssaia ||
2650 (env->priv == PRV_S && env->mvien & MIP_SEIP &&
2651 env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
2652 env->siselect <= ISELECT_IMSIC_EIE63)) {
2653 goto done;
2654 }
2655 iprio = env->siprio;
2656 priv = PRV_S;
2657 break;
2658 case CSR_VSIREG:
2659 if (!riscv_cpu_cfg(env)->ext_ssaia) {
2660 goto done;
2661 }
2662 iprio = env->hviprio;
2663 priv = PRV_S;
2664 virt = true;
2665 break;
2666 default:
2667 goto done;
2668 };
2669
2670 /* Find the selected guest interrupt file */
2671 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2672
2673 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
2674 /* Local interrupt priority registers not available for VS-mode */
2675 if (!virt) {
2676 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
2677 isel, iprio, val, new_val, wr_mask,
2678 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
2679 }
2680 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
2681 /* IMSIC registers only available when machine implements it. */
2682 if (env->aia_ireg_rmw_fn[priv]) {
2683 /* Selected guest interrupt file should not be zero */
2684 if (virt && (!vgein || env->geilen < vgein)) {
2685 goto done;
2686 }
2687 /* Call machine specific IMSIC register emulation */
2688 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2689 AIA_MAKE_IREG(isel, priv, virt, vgein,
2690 riscv_cpu_mxl_bits(env)),
2691 val, new_val, wr_mask);
2692 }
2693 } else {
2694 isel_reserved = true;
2695 }
2696
2697 done:
2698 /*
2699 * If AIA is not enabled, illegal instruction exception is always
2700 * returned regardless of whether we are in VS-mode or not
2701 */
2702 if (ret) {
2703 return (env->virt_enabled && virt && !isel_reserved) ?
2704 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2705 }
2706
2707 return RISCV_EXCP_NONE;
2708 }
2709
rmw_xireg_cd(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2710 static int rmw_xireg_cd(CPURISCVState *env, int csrno,
2711 target_ulong isel, target_ulong *val,
2712 target_ulong new_val, target_ulong wr_mask)
2713 {
2714 int ret = -EINVAL;
2715 int ctr_index = isel - ISELECT_CD_FIRST;
2716 int isel_hpm_start = ISELECT_CD_FIRST + 3;
2717
2718 if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
2719 ret = RISCV_EXCP_ILLEGAL_INST;
2720 goto done;
2721 }
2722
2723 /* Invalid siselect value for reserved */
2724 if (ctr_index == 1) {
2725 goto done;
2726 }
2727
2728 /* sireg4 and sireg5 provides access RV32 only CSRs */
2729 if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
2730 (riscv_cpu_mxl(env) != MXL_RV32)) {
2731 ret = RISCV_EXCP_ILLEGAL_INST;
2732 goto done;
2733 }
2734
2735 /* Check Sscofpmf dependancy */
2736 if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
2737 (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
2738 goto done;
2739 }
2740
2741 /* Check smcntrpmf dependancy */
2742 if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
2743 (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
2744 (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
2745 goto done;
2746 }
2747
2748 if (!get_field(env->mcounteren, BIT(ctr_index)) ||
2749 !get_field(env->menvcfg, MENVCFG_CDE)) {
2750 goto done;
2751 }
2752
2753 switch (csrno) {
2754 case CSR_SIREG:
2755 ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
2756 break;
2757 case CSR_SIREG4:
2758 ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
2759 break;
2760 case CSR_SIREG2:
2761 if (ctr_index <= 2) {
2762 ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
2763 } else {
2764 ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
2765 }
2766 break;
2767 case CSR_SIREG5:
2768 if (ctr_index <= 2) {
2769 ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
2770 } else {
2771 ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
2772 }
2773 break;
2774 default:
2775 goto done;
2776 }
2777
2778 done:
2779 return ret;
2780 }
2781
rmw_xireg_ctr(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2782 static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
2783 target_ulong isel, target_ulong *val,
2784 target_ulong new_val, target_ulong wr_mask)
2785 {
2786 if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
2787 return -EINVAL;
2788 }
2789
2790 if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
2791 return rmw_ctrsource(env, isel, val, new_val, wr_mask);
2792 } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
2793 return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
2794 } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
2795 return rmw_ctrdata(env, isel, val, new_val, wr_mask);
2796 } else if (val) {
2797 *val = 0;
2798 }
2799
2800 return 0;
2801 }
2802
2803 /*
2804 * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
2805 *
2806 * Perform indirect access to xireg and xireg2-xireg6.
2807 * This is a generic interface for all xireg CSRs. Apart from AIA, all other
2808 * extension using csrind should be implemented here.
2809 */
rmw_xireg_csrind(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2810 static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
2811 target_ulong isel, target_ulong *val,
2812 target_ulong new_val, target_ulong wr_mask)
2813 {
2814 bool virt = csrno == CSR_VSIREG ? true : false;
2815 int ret = -EINVAL;
2816
2817 if (xiselect_cd_range(isel)) {
2818 ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
2819 } else if (xiselect_ctr_range(csrno, isel)) {
2820 ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
2821 } else {
2822 /*
2823 * As per the specification, access to unimplented region is undefined
2824 * but recommendation is to raise illegal instruction exception.
2825 */
2826 return RISCV_EXCP_ILLEGAL_INST;
2827 }
2828
2829 if (ret) {
2830 return (env->virt_enabled && virt) ?
2831 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2832 }
2833
2834 return RISCV_EXCP_NONE;
2835 }
2836
rmw_xiregi(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2837 static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
2838 target_ulong new_val, target_ulong wr_mask)
2839 {
2840 int ret = -EINVAL;
2841 target_ulong isel;
2842
2843 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2844 if (ret != RISCV_EXCP_NONE) {
2845 return ret;
2846 }
2847
2848 /* Translate CSR number for VS-mode */
2849 csrno = csrind_xlate_vs_csrno(env, csrno);
2850
2851 if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 &&
2852 csrno != CSR_MIREG4 - 1) {
2853 isel = env->miselect;
2854 } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 &&
2855 csrno != CSR_SIREG4 - 1) {
2856 isel = env->siselect;
2857 } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
2858 csrno != CSR_VSIREG4 - 1) {
2859 isel = env->vsiselect;
2860 } else {
2861 return RISCV_EXCP_ILLEGAL_INST;
2862 }
2863
2864 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
2865 }
2866
rmw_xireg(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2867 static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
2868 target_ulong *val, target_ulong new_val,
2869 target_ulong wr_mask)
2870 {
2871 int ret = -EINVAL;
2872 target_ulong isel;
2873
2874 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2875 if (ret != RISCV_EXCP_NONE) {
2876 return ret;
2877 }
2878
2879 /* Translate CSR number for VS-mode */
2880 csrno = csrind_xlate_vs_csrno(env, csrno);
2881
2882 /* Decode register details from CSR number */
2883 switch (csrno) {
2884 case CSR_MIREG:
2885 isel = env->miselect;
2886 break;
2887 case CSR_SIREG:
2888 isel = env->siselect;
2889 break;
2890 case CSR_VSIREG:
2891 isel = env->vsiselect;
2892 break;
2893 default:
2894 goto done;
2895 };
2896
2897 /*
2898 * Use the xiselect range to determine actual op on xireg.
2899 *
2900 * Since we only checked the existence of AIA or Indirect Access in the
2901 * predicate, we should check the existence of the exact extension when
2902 * we get to a specific range and return illegal instruction exception even
2903 * in VS-mode.
2904 */
2905 if (xiselect_aia_range(isel)) {
2906 return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
2907 } else if (riscv_cpu_cfg(env)->ext_smcsrind ||
2908 riscv_cpu_cfg(env)->ext_sscsrind) {
2909 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
2910 }
2911
2912 done:
2913 return RISCV_EXCP_ILLEGAL_INST;
2914 }
2915
rmw_xtopei(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2916 static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
2917 target_ulong *val, target_ulong new_val,
2918 target_ulong wr_mask)
2919 {
2920 bool virt;
2921 int ret = -EINVAL;
2922 target_ulong priv, vgein;
2923
2924 /* Translate CSR number for VS-mode */
2925 csrno = aia_xlate_vs_csrno(env, csrno);
2926
2927 /* Decode register details from CSR number */
2928 virt = false;
2929 switch (csrno) {
2930 case CSR_MTOPEI:
2931 priv = PRV_M;
2932 break;
2933 case CSR_STOPEI:
2934 if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
2935 goto done;
2936 }
2937 priv = PRV_S;
2938 break;
2939 case CSR_VSTOPEI:
2940 priv = PRV_S;
2941 virt = true;
2942 break;
2943 default:
2944 goto done;
2945 };
2946
2947 /* IMSIC CSRs only available when machine implements IMSIC. */
2948 if (!env->aia_ireg_rmw_fn[priv]) {
2949 goto done;
2950 }
2951
2952 /* Find the selected guest interrupt file */
2953 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2954
2955 /* Selected guest interrupt file should be valid */
2956 if (virt && (!vgein || env->geilen < vgein)) {
2957 goto done;
2958 }
2959
2960 /* Call machine specific IMSIC register emulation for TOPEI */
2961 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2962 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
2963 riscv_cpu_mxl_bits(env)),
2964 val, new_val, wr_mask);
2965
2966 done:
2967 if (ret) {
2968 return (env->virt_enabled && virt) ?
2969 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2970 }
2971 return RISCV_EXCP_NONE;
2972 }
2973
read_mtvec(CPURISCVState * env,int csrno,target_ulong * val)2974 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
2975 target_ulong *val)
2976 {
2977 *val = env->mtvec;
2978 return RISCV_EXCP_NONE;
2979 }
2980
write_mtvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2981 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
2982 target_ulong val, uintptr_t ra)
2983 {
2984 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2985 if ((val & 3) < 2) {
2986 env->mtvec = val;
2987 } else {
2988 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
2989 }
2990 return RISCV_EXCP_NONE;
2991 }
2992
read_mcountinhibit(CPURISCVState * env,int csrno,target_ulong * val)2993 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
2994 target_ulong *val)
2995 {
2996 *val = env->mcountinhibit;
2997 return RISCV_EXCP_NONE;
2998 }
2999
write_mcountinhibit(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3000 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
3001 target_ulong val, uintptr_t ra)
3002 {
3003 int cidx;
3004 PMUCTRState *counter;
3005 RISCVCPU *cpu = env_archcpu(env);
3006 uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
3007 target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
3008 uint64_t mhpmctr_val, prev_count, curr_count;
3009
3010 /* WARL register - disable unavailable counters; TM bit is always 0 */
3011 env->mcountinhibit = val & present_ctrs;
3012
3013 /* Check if any other counter is also monitoring cycles/instructions */
3014 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
3015 if (!(updated_ctrs & BIT(cidx)) ||
3016 (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
3017 !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
3018 continue;
3019 }
3020
3021 counter = &env->pmu_ctrs[cidx];
3022
3023 if (!get_field(env->mcountinhibit, BIT(cidx))) {
3024 counter->mhpmcounter_prev =
3025 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
3026 if (riscv_cpu_mxl(env) == MXL_RV32) {
3027 counter->mhpmcounterh_prev =
3028 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
3029 }
3030
3031 if (cidx > 2) {
3032 mhpmctr_val = counter->mhpmcounter_val;
3033 if (riscv_cpu_mxl(env) == MXL_RV32) {
3034 mhpmctr_val = mhpmctr_val |
3035 ((uint64_t)counter->mhpmcounterh_val << 32);
3036 }
3037 riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
3038 }
3039 } else {
3040 curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
3041
3042 mhpmctr_val = counter->mhpmcounter_val;
3043 prev_count = counter->mhpmcounter_prev;
3044 if (riscv_cpu_mxl(env) == MXL_RV32) {
3045 uint64_t tmp =
3046 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
3047
3048 curr_count = curr_count | (tmp << 32);
3049 mhpmctr_val = mhpmctr_val |
3050 ((uint64_t)counter->mhpmcounterh_val << 32);
3051 prev_count = prev_count |
3052 ((uint64_t)counter->mhpmcounterh_prev << 32);
3053 }
3054
3055 /* Adjust the counter for later reads. */
3056 mhpmctr_val = curr_count - prev_count + mhpmctr_val;
3057 counter->mhpmcounter_val = mhpmctr_val;
3058 if (riscv_cpu_mxl(env) == MXL_RV32) {
3059 counter->mhpmcounterh_val = mhpmctr_val >> 32;
3060 }
3061 }
3062 }
3063
3064 return RISCV_EXCP_NONE;
3065 }
3066
read_scountinhibit(CPURISCVState * env,int csrno,target_ulong * val)3067 static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
3068 target_ulong *val)
3069 {
3070 /* S-mode can only access the bits delegated by M-mode */
3071 *val = env->mcountinhibit & env->mcounteren;
3072 return RISCV_EXCP_NONE;
3073 }
3074
write_scountinhibit(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3075 static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
3076 target_ulong val, uintptr_t ra)
3077 {
3078 return write_mcountinhibit(env, csrno, val & env->mcounteren, ra);
3079 }
3080
read_mcounteren(CPURISCVState * env,int csrno,target_ulong * val)3081 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
3082 target_ulong *val)
3083 {
3084 *val = env->mcounteren;
3085 return RISCV_EXCP_NONE;
3086 }
3087
write_mcounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3088 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
3089 target_ulong val, uintptr_t ra)
3090 {
3091 RISCVCPU *cpu = env_archcpu(env);
3092
3093 /* WARL register - disable unavailable counters */
3094 env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3095 COUNTEREN_IR);
3096 return RISCV_EXCP_NONE;
3097 }
3098
3099 /* Machine Trap Handling */
read_mscratch_i128(CPURISCVState * env,int csrno,Int128 * val)3100 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
3101 Int128 *val)
3102 {
3103 *val = int128_make128(env->mscratch, env->mscratchh);
3104 return RISCV_EXCP_NONE;
3105 }
3106
write_mscratch_i128(CPURISCVState * env,int csrno,Int128 val)3107 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
3108 Int128 val)
3109 {
3110 env->mscratch = int128_getlo(val);
3111 env->mscratchh = int128_gethi(val);
3112 return RISCV_EXCP_NONE;
3113 }
3114
read_mscratch(CPURISCVState * env,int csrno,target_ulong * val)3115 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
3116 target_ulong *val)
3117 {
3118 *val = env->mscratch;
3119 return RISCV_EXCP_NONE;
3120 }
3121
write_mscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3122 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
3123 target_ulong val, uintptr_t ra)
3124 {
3125 env->mscratch = val;
3126 return RISCV_EXCP_NONE;
3127 }
3128
read_mepc(CPURISCVState * env,int csrno,target_ulong * val)3129 static RISCVException read_mepc(CPURISCVState *env, int csrno,
3130 target_ulong *val)
3131 {
3132 *val = env->mepc & get_xepc_mask(env);
3133 return RISCV_EXCP_NONE;
3134 }
3135
write_mepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3136 static RISCVException write_mepc(CPURISCVState *env, int csrno,
3137 target_ulong val, uintptr_t ra)
3138 {
3139 env->mepc = val & get_xepc_mask(env);
3140 return RISCV_EXCP_NONE;
3141 }
3142
read_mcause(CPURISCVState * env,int csrno,target_ulong * val)3143 static RISCVException read_mcause(CPURISCVState *env, int csrno,
3144 target_ulong *val)
3145 {
3146 *val = env->mcause;
3147 return RISCV_EXCP_NONE;
3148 }
3149
write_mcause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3150 static RISCVException write_mcause(CPURISCVState *env, int csrno,
3151 target_ulong val, uintptr_t ra)
3152 {
3153 env->mcause = val;
3154 return RISCV_EXCP_NONE;
3155 }
3156
read_mtval(CPURISCVState * env,int csrno,target_ulong * val)3157 static RISCVException read_mtval(CPURISCVState *env, int csrno,
3158 target_ulong *val)
3159 {
3160 *val = env->mtval;
3161 return RISCV_EXCP_NONE;
3162 }
3163
write_mtval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3164 static RISCVException write_mtval(CPURISCVState *env, int csrno,
3165 target_ulong val, uintptr_t ra)
3166 {
3167 env->mtval = val;
3168 return RISCV_EXCP_NONE;
3169 }
3170
3171 /* Execution environment configuration setup */
read_menvcfg(CPURISCVState * env,int csrno,target_ulong * val)3172 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
3173 target_ulong *val)
3174 {
3175 *val = env->menvcfg;
3176 return RISCV_EXCP_NONE;
3177 }
3178
3179 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
3180 target_ulong val, uintptr_t ra);
write_menvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3181 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
3182 target_ulong val, uintptr_t ra)
3183 {
3184 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3185 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
3186 MENVCFG_CBZE | MENVCFG_CDE;
3187 bool stce_changed = false;
3188
3189 if (riscv_cpu_mxl(env) == MXL_RV64) {
3190 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
3191 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
3192 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
3193 (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
3194 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
3195
3196 if (env_archcpu(env)->cfg.ext_zicfilp) {
3197 mask |= MENVCFG_LPE;
3198 }
3199
3200 if (env_archcpu(env)->cfg.ext_zicfiss) {
3201 mask |= MENVCFG_SSE;
3202 }
3203
3204 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3205 if (env_archcpu(env)->cfg.ext_smnpm &&
3206 get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
3207 mask |= MENVCFG_PMM;
3208 }
3209
3210 if ((val & MENVCFG_DTE) == 0) {
3211 env->mstatus &= ~MSTATUS_SDT;
3212 }
3213
3214 if (cfg->ext_sstc &&
3215 ((env->menvcfg & MENVCFG_STCE) != (val & MENVCFG_STCE))) {
3216 stce_changed = true;
3217 }
3218 }
3219 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
3220
3221 if (stce_changed) {
3222 riscv_timer_stce_changed(env, true, !!(val & MENVCFG_STCE));
3223 }
3224
3225 return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra);
3226 }
3227
read_menvcfgh(CPURISCVState * env,int csrno,target_ulong * val)3228 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
3229 target_ulong *val)
3230 {
3231 *val = env->menvcfg >> 32;
3232 return RISCV_EXCP_NONE;
3233 }
3234
3235 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
3236 target_ulong val, uintptr_t ra);
write_menvcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3237 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
3238 target_ulong val, uintptr_t ra)
3239 {
3240 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3241 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
3242 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
3243 (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
3244 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
3245 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
3246 uint64_t valh = (uint64_t)val << 32;
3247 bool stce_changed = false;
3248
3249 if (cfg->ext_sstc &&
3250 ((env->menvcfg & MENVCFG_STCE) != (valh & MENVCFG_STCE))) {
3251 stce_changed = true;
3252 }
3253
3254 if ((valh & MENVCFG_DTE) == 0) {
3255 env->mstatus &= ~MSTATUS_SDT;
3256 }
3257
3258 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
3259
3260 if (stce_changed) {
3261 riscv_timer_stce_changed(env, true, !!(valh & MENVCFG_STCE));
3262 }
3263
3264 return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra);
3265 }
3266
read_senvcfg(CPURISCVState * env,int csrno,target_ulong * val)3267 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
3268 target_ulong *val)
3269 {
3270 RISCVException ret;
3271
3272 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3273 if (ret != RISCV_EXCP_NONE) {
3274 return ret;
3275 }
3276
3277 *val = env->senvcfg;
3278 return RISCV_EXCP_NONE;
3279 }
3280
write_senvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3281 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
3282 target_ulong val, uintptr_t ra)
3283 {
3284 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
3285 RISCVException ret;
3286 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3287 if (env_archcpu(env)->cfg.ext_ssnpm &&
3288 riscv_cpu_mxl(env) == MXL_RV64 &&
3289 get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) {
3290 mask |= SENVCFG_PMM;
3291 }
3292
3293 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3294 if (ret != RISCV_EXCP_NONE) {
3295 return ret;
3296 }
3297
3298 if (env_archcpu(env)->cfg.ext_zicfilp) {
3299 mask |= SENVCFG_LPE;
3300 }
3301
3302 /* Higher mode SSE must be ON for next-less mode SSE to be ON */
3303 if (env_archcpu(env)->cfg.ext_zicfiss &&
3304 get_field(env->menvcfg, MENVCFG_SSE) &&
3305 (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
3306 mask |= SENVCFG_SSE;
3307 }
3308
3309 if (env_archcpu(env)->cfg.ext_svukte) {
3310 mask |= SENVCFG_UKTE;
3311 }
3312
3313 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
3314 return RISCV_EXCP_NONE;
3315 }
3316
read_henvcfg(CPURISCVState * env,int csrno,target_ulong * val)3317 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
3318 target_ulong *val)
3319 {
3320 RISCVException ret;
3321
3322 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3323 if (ret != RISCV_EXCP_NONE) {
3324 return ret;
3325 }
3326
3327 /*
3328 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
3329 * henvcfg.stce is read_only 0 when menvcfg.stce = 0
3330 * henvcfg.adue is read_only 0 when menvcfg.adue = 0
3331 * henvcfg.dte is read_only 0 when menvcfg.dte = 0
3332 */
3333 *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3334 HENVCFG_DTE) | env->menvcfg);
3335 return RISCV_EXCP_NONE;
3336 }
3337
write_henvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3338 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
3339 target_ulong val, uintptr_t ra)
3340 {
3341 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3342 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
3343 RISCVException ret;
3344 bool stce_changed = false;
3345
3346 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3347 if (ret != RISCV_EXCP_NONE) {
3348 return ret;
3349 }
3350
3351 if (riscv_cpu_mxl(env) == MXL_RV64) {
3352 mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3353 HENVCFG_DTE);
3354
3355 if (env_archcpu(env)->cfg.ext_zicfilp) {
3356 mask |= HENVCFG_LPE;
3357 }
3358
3359 /* H can light up SSE for VS only if HS had it from menvcfg */
3360 if (env_archcpu(env)->cfg.ext_zicfiss &&
3361 get_field(env->menvcfg, MENVCFG_SSE)) {
3362 mask |= HENVCFG_SSE;
3363 }
3364
3365 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3366 if (env_archcpu(env)->cfg.ext_ssnpm &&
3367 get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) {
3368 mask |= HENVCFG_PMM;
3369 }
3370
3371 if (cfg->ext_sstc &&
3372 ((env->henvcfg & HENVCFG_STCE) != (val & HENVCFG_STCE))) {
3373 stce_changed = true;
3374 }
3375 }
3376
3377 env->henvcfg = val & mask;
3378 if ((env->henvcfg & HENVCFG_DTE) == 0) {
3379 env->vsstatus &= ~MSTATUS_SDT;
3380 }
3381
3382 if (stce_changed) {
3383 riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE));
3384 }
3385
3386 return RISCV_EXCP_NONE;
3387 }
3388
read_henvcfgh(CPURISCVState * env,int csrno,target_ulong * val)3389 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
3390 target_ulong *val)
3391 {
3392 RISCVException ret;
3393
3394 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3395 if (ret != RISCV_EXCP_NONE) {
3396 return ret;
3397 }
3398
3399 *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3400 HENVCFG_DTE) | env->menvcfg)) >> 32;
3401 return RISCV_EXCP_NONE;
3402 }
3403
write_henvcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3404 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
3405 target_ulong val, uintptr_t ra)
3406 {
3407 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3408 uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
3409 HENVCFG_ADUE | HENVCFG_DTE);
3410 uint64_t valh = (uint64_t)val << 32;
3411 RISCVException ret;
3412 bool stce_changed = false;
3413
3414 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3415 if (ret != RISCV_EXCP_NONE) {
3416 return ret;
3417 }
3418
3419 if (cfg->ext_sstc &&
3420 ((env->henvcfg & HENVCFG_STCE) != (valh & HENVCFG_STCE))) {
3421 stce_changed = true;
3422 }
3423
3424 env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
3425 if ((env->henvcfg & HENVCFG_DTE) == 0) {
3426 env->vsstatus &= ~MSTATUS_SDT;
3427 }
3428
3429 if (stce_changed) {
3430 riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE));
3431 }
3432
3433 return RISCV_EXCP_NONE;
3434 }
3435
read_mstateen(CPURISCVState * env,int csrno,target_ulong * val)3436 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
3437 target_ulong *val)
3438 {
3439 *val = env->mstateen[csrno - CSR_MSTATEEN0];
3440
3441 return RISCV_EXCP_NONE;
3442 }
3443
write_mstateen(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)3444 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
3445 uint64_t wr_mask, target_ulong new_val)
3446 {
3447 uint64_t *reg;
3448
3449 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
3450 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3451
3452 return RISCV_EXCP_NONE;
3453 }
3454
write_mstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3455 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
3456 target_ulong new_val, uintptr_t ra)
3457 {
3458 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3459 if (!riscv_has_ext(env, RVF)) {
3460 wr_mask |= SMSTATEEN0_FCSR;
3461 }
3462
3463 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
3464 wr_mask |= SMSTATEEN0_P1P13;
3465 }
3466
3467 if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) {
3468 wr_mask |= SMSTATEEN0_SVSLCT;
3469 }
3470
3471 /*
3472 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
3473 * implemented. However, that information is with MachineState and we can't
3474 * figure that out in csr.c. Just enable if Smaia is available.
3475 */
3476 if (riscv_cpu_cfg(env)->ext_smaia) {
3477 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
3478 }
3479
3480 if (riscv_cpu_cfg(env)->ext_ssctr) {
3481 wr_mask |= SMSTATEEN0_CTR;
3482 }
3483
3484 return write_mstateen(env, csrno, wr_mask, new_val);
3485 }
3486
write_mstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3487 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
3488 target_ulong new_val, uintptr_t ra)
3489 {
3490 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3491 }
3492
read_mstateenh(CPURISCVState * env,int csrno,target_ulong * val)3493 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
3494 target_ulong *val)
3495 {
3496 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
3497
3498 return RISCV_EXCP_NONE;
3499 }
3500
write_mstateenh(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)3501 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
3502 uint64_t wr_mask, target_ulong new_val)
3503 {
3504 uint64_t *reg, val;
3505
3506 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
3507 val = (uint64_t)new_val << 32;
3508 val |= *reg & 0xFFFFFFFF;
3509 *reg = (*reg & ~wr_mask) | (val & wr_mask);
3510
3511 return RISCV_EXCP_NONE;
3512 }
3513
write_mstateen0h(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3514 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
3515 target_ulong new_val, uintptr_t ra)
3516 {
3517 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3518
3519 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
3520 wr_mask |= SMSTATEEN0_P1P13;
3521 }
3522
3523 if (riscv_cpu_cfg(env)->ext_ssctr) {
3524 wr_mask |= SMSTATEEN0_CTR;
3525 }
3526
3527 return write_mstateenh(env, csrno, wr_mask, new_val);
3528 }
3529
write_mstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3530 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
3531 target_ulong new_val, uintptr_t ra)
3532 {
3533 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
3534 }
3535
read_hstateen(CPURISCVState * env,int csrno,target_ulong * val)3536 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
3537 target_ulong *val)
3538 {
3539 int index = csrno - CSR_HSTATEEN0;
3540
3541 *val = env->hstateen[index] & env->mstateen[index];
3542
3543 return RISCV_EXCP_NONE;
3544 }
3545
write_hstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3546 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
3547 uint64_t mask, target_ulong new_val)
3548 {
3549 int index = csrno - CSR_HSTATEEN0;
3550 uint64_t *reg, wr_mask;
3551
3552 reg = &env->hstateen[index];
3553 wr_mask = env->mstateen[index] & mask;
3554 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3555
3556 return RISCV_EXCP_NONE;
3557 }
3558
write_hstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3559 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
3560 target_ulong new_val, uintptr_t ra)
3561 {
3562 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3563
3564 if (!riscv_has_ext(env, RVF)) {
3565 wr_mask |= SMSTATEEN0_FCSR;
3566 }
3567
3568 if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) {
3569 wr_mask |= SMSTATEEN0_SVSLCT;
3570 }
3571
3572 /*
3573 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
3574 * implemented. However, that information is with MachineState and we can't
3575 * figure that out in csr.c. Just enable if Ssaia is available.
3576 */
3577 if (riscv_cpu_cfg(env)->ext_ssaia) {
3578 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
3579 }
3580
3581 if (riscv_cpu_cfg(env)->ext_ssctr) {
3582 wr_mask |= SMSTATEEN0_CTR;
3583 }
3584
3585 return write_hstateen(env, csrno, wr_mask, new_val);
3586 }
3587
write_hstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3588 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
3589 target_ulong new_val, uintptr_t ra)
3590 {
3591 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3592 }
3593
read_hstateenh(CPURISCVState * env,int csrno,target_ulong * val)3594 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
3595 target_ulong *val)
3596 {
3597 int index = csrno - CSR_HSTATEEN0H;
3598
3599 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
3600
3601 return RISCV_EXCP_NONE;
3602 }
3603
write_hstateenh(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3604 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
3605 uint64_t mask, target_ulong new_val)
3606 {
3607 int index = csrno - CSR_HSTATEEN0H;
3608 uint64_t *reg, wr_mask, val;
3609
3610 reg = &env->hstateen[index];
3611 val = (uint64_t)new_val << 32;
3612 val |= *reg & 0xFFFFFFFF;
3613 wr_mask = env->mstateen[index] & mask;
3614 *reg = (*reg & ~wr_mask) | (val & wr_mask);
3615
3616 return RISCV_EXCP_NONE;
3617 }
3618
write_hstateen0h(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3619 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
3620 target_ulong new_val, uintptr_t ra)
3621 {
3622 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3623
3624 if (riscv_cpu_cfg(env)->ext_ssctr) {
3625 wr_mask |= SMSTATEEN0_CTR;
3626 }
3627
3628 return write_hstateenh(env, csrno, wr_mask, new_val);
3629 }
3630
write_hstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3631 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
3632 target_ulong new_val, uintptr_t ra)
3633 {
3634 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
3635 }
3636
read_sstateen(CPURISCVState * env,int csrno,target_ulong * val)3637 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
3638 target_ulong *val)
3639 {
3640 bool virt = env->virt_enabled;
3641 int index = csrno - CSR_SSTATEEN0;
3642
3643 *val = env->sstateen[index] & env->mstateen[index];
3644 if (virt) {
3645 *val &= env->hstateen[index];
3646 }
3647
3648 return RISCV_EXCP_NONE;
3649 }
3650
write_sstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3651 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
3652 uint64_t mask, target_ulong new_val)
3653 {
3654 bool virt = env->virt_enabled;
3655 int index = csrno - CSR_SSTATEEN0;
3656 uint64_t wr_mask;
3657 uint64_t *reg;
3658
3659 wr_mask = env->mstateen[index] & mask;
3660 if (virt) {
3661 wr_mask &= env->hstateen[index];
3662 }
3663
3664 reg = &env->sstateen[index];
3665 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3666
3667 return RISCV_EXCP_NONE;
3668 }
3669
write_sstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3670 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
3671 target_ulong new_val, uintptr_t ra)
3672 {
3673 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3674
3675 if (!riscv_has_ext(env, RVF)) {
3676 wr_mask |= SMSTATEEN0_FCSR;
3677 }
3678
3679 return write_sstateen(env, csrno, wr_mask, new_val);
3680 }
3681
write_sstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3682 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
3683 target_ulong new_val, uintptr_t ra)
3684 {
3685 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3686 }
3687
rmw_mip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3688 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
3689 uint64_t *ret_val,
3690 uint64_t new_val, uint64_t wr_mask)
3691 {
3692 uint64_t old_mip, mask = wr_mask & delegable_ints;
3693 uint32_t gin;
3694
3695 if (mask & MIP_SEIP) {
3696 env->software_seip = new_val & MIP_SEIP;
3697 new_val |= env->external_seip * MIP_SEIP;
3698 }
3699
3700 if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) &&
3701 get_field(env->menvcfg, MENVCFG_STCE)) {
3702 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
3703
3704 /* STIP is not writable when menvcfg.STCE is enabled. */
3705 mask = mask & ~MIP_STIP;
3706
3707 /* VSTIP is not writable when both [mh]envcfg.STCE are enabled. */
3708 if (get_field(env->henvcfg, HENVCFG_STCE)) {
3709 mask = mask & ~MIP_VSTIP;
3710 }
3711 }
3712
3713 if (mask) {
3714 old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask));
3715 } else {
3716 old_mip = env->mip;
3717 }
3718
3719 if (csrno != CSR_HVIP) {
3720 gin = get_field(env->hstatus, HSTATUS_VGEIN);
3721 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
3722 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
3723 }
3724
3725 if (ret_val) {
3726 *ret_val = old_mip;
3727 }
3728
3729 return RISCV_EXCP_NONE;
3730 }
3731
rmw_mip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3732 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
3733 target_ulong *ret_val,
3734 target_ulong new_val, target_ulong wr_mask)
3735 {
3736 uint64_t rval;
3737 RISCVException ret;
3738
3739 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
3740 if (ret_val) {
3741 *ret_val = rval;
3742 }
3743
3744 return ret;
3745 }
3746
rmw_miph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3747 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
3748 target_ulong *ret_val,
3749 target_ulong new_val, target_ulong wr_mask)
3750 {
3751 uint64_t rval;
3752 RISCVException ret;
3753
3754 ret = rmw_mip64(env, csrno, &rval,
3755 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3756 if (ret_val) {
3757 *ret_val = rval >> 32;
3758 }
3759
3760 return ret;
3761 }
3762
3763 /*
3764 * The function is written for two use-cases:
3765 * 1- To access mvip csr as is for m-mode access.
3766 * 2- To access sip as a combination of mip and mvip for s-mode.
3767 *
3768 * Both report bits 1, 5, 9 and 13:63 but with the exception of
3769 * STIP being read-only zero in case of mvip when sstc extension
3770 * is present.
3771 * Also, sip needs to be read-only zero when both mideleg[i] and
3772 * mvien[i] are zero but mvip needs to be an alias of mip.
3773 */
rmw_mvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3774 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
3775 uint64_t *ret_val,
3776 uint64_t new_val, uint64_t wr_mask)
3777 {
3778 RISCVCPU *cpu = env_archcpu(env);
3779 target_ulong ret_mip = 0;
3780 RISCVException ret;
3781 uint64_t old_mvip;
3782
3783 /*
3784 * mideleg[i] mvien[i]
3785 * 0 0 No delegation. mvip[i] is alias of mip[i].
3786 * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
3787 * 1 X mip[i] is source of interrupt and mvip[i] aliases
3788 * mip[i].
3789 *
3790 * So alias condition would be for bits:
3791 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
3792 * (!sstc & MIP_STIP)
3793 *
3794 * Non-alias condition will be for bits:
3795 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
3796 *
3797 * alias_mask denotes the bits that come from mip nalias_mask denotes bits
3798 * that come from hvip.
3799 */
3800 uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3801 (env->mideleg | ~env->mvien)) | MIP_STIP;
3802 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3803 (~env->mideleg & env->mvien);
3804 uint64_t wr_mask_mvip;
3805 uint64_t wr_mask_mip;
3806
3807 /*
3808 * mideleg[i] mvien[i]
3809 * 0 0 sip[i] read-only zero.
3810 * 0 1 sip[i] alias of mvip[i].
3811 * 1 X sip[i] alias of mip[i].
3812 *
3813 * Both alias and non-alias mask remain same for sip except for bits
3814 * which are zero in both mideleg and mvien.
3815 */
3816 if (csrno == CSR_SIP) {
3817 /* Remove bits that are zero in both mideleg and mvien. */
3818 alias_mask &= (env->mideleg | env->mvien);
3819 nalias_mask &= (env->mideleg | env->mvien);
3820 }
3821
3822 /*
3823 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
3824 * that our in mip returned value.
3825 */
3826 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
3827 get_field(env->menvcfg, MENVCFG_STCE)) {
3828 alias_mask &= ~MIP_STIP;
3829 }
3830
3831 wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
3832 wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
3833
3834 /*
3835 * For bits set in alias_mask, mvip needs to be alias of mip, so forward
3836 * this to rmw_mip.
3837 */
3838 ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
3839 if (ret != RISCV_EXCP_NONE) {
3840 return ret;
3841 }
3842
3843 old_mvip = env->mvip;
3844
3845 /*
3846 * Write to mvip. Update only non-alias bits. Alias bits were updated
3847 * in mip in rmw_mip above.
3848 */
3849 if (wr_mask_mvip) {
3850 env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
3851
3852 /*
3853 * Given mvip is separate source from mip, we need to trigger interrupt
3854 * from here separately. Normally this happen from riscv_cpu_update_mip.
3855 */
3856 riscv_cpu_interrupt(env);
3857 }
3858
3859 if (ret_val) {
3860 ret_mip &= alias_mask;
3861 old_mvip &= nalias_mask;
3862
3863 *ret_val = old_mvip | ret_mip;
3864 }
3865
3866 return RISCV_EXCP_NONE;
3867 }
3868
rmw_mvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3869 static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
3870 target_ulong *ret_val,
3871 target_ulong new_val, target_ulong wr_mask)
3872 {
3873 uint64_t rval;
3874 RISCVException ret;
3875
3876 ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
3877 if (ret_val) {
3878 *ret_val = rval;
3879 }
3880
3881 return ret;
3882 }
3883
rmw_mviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3884 static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
3885 target_ulong *ret_val,
3886 target_ulong new_val, target_ulong wr_mask)
3887 {
3888 uint64_t rval;
3889 RISCVException ret;
3890
3891 ret = rmw_mvip64(env, csrno, &rval,
3892 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3893 if (ret_val) {
3894 *ret_val = rval >> 32;
3895 }
3896
3897 return ret;
3898 }
3899
3900 /* Supervisor Trap Setup */
read_sstatus_i128(CPURISCVState * env,int csrno,Int128 * val)3901 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
3902 Int128 *val)
3903 {
3904 uint64_t mask = sstatus_v1_10_mask;
3905 uint64_t sstatus = env->mstatus & mask;
3906 if (env->xl != MXL_RV32 || env->debugger) {
3907 mask |= SSTATUS64_UXL;
3908 }
3909 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3910 mask |= SSTATUS_SDT;
3911 }
3912
3913 if (env_archcpu(env)->cfg.ext_zicfilp) {
3914 mask |= SSTATUS_SPELP;
3915 }
3916
3917 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
3918 return RISCV_EXCP_NONE;
3919 }
3920
read_sstatus(CPURISCVState * env,int csrno,target_ulong * val)3921 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
3922 target_ulong *val)
3923 {
3924 target_ulong mask = (sstatus_v1_10_mask);
3925 if (env->xl != MXL_RV32 || env->debugger) {
3926 mask |= SSTATUS64_UXL;
3927 }
3928
3929 if (env_archcpu(env)->cfg.ext_zicfilp) {
3930 mask |= SSTATUS_SPELP;
3931 }
3932 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3933 mask |= SSTATUS_SDT;
3934 }
3935 /* TODO: Use SXL not MXL. */
3936 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
3937 return RISCV_EXCP_NONE;
3938 }
3939
write_sstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3940 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
3941 target_ulong val, uintptr_t ra)
3942 {
3943 target_ulong mask = (sstatus_v1_10_mask);
3944
3945 if (env->xl != MXL_RV32 || env->debugger) {
3946 if ((val & SSTATUS64_UXL) != 0) {
3947 mask |= SSTATUS64_UXL;
3948 }
3949 }
3950
3951 if (env_archcpu(env)->cfg.ext_zicfilp) {
3952 mask |= SSTATUS_SPELP;
3953 }
3954 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3955 mask |= SSTATUS_SDT;
3956 }
3957 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
3958 return write_mstatus(env, CSR_MSTATUS, newval, ra);
3959 }
3960
rmw_vsie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3961 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
3962 uint64_t *ret_val,
3963 uint64_t new_val, uint64_t wr_mask)
3964 {
3965 uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
3966 env->hideleg;
3967 uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
3968 uint64_t rval, rval_vs, vsbits;
3969 uint64_t wr_mask_vsie;
3970 uint64_t wr_mask_mie;
3971 RISCVException ret;
3972
3973 /* Bring VS-level bits to correct position */
3974 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3975 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3976 new_val |= vsbits << 1;
3977
3978 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3979 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3980 wr_mask |= vsbits << 1;
3981
3982 wr_mask_mie = wr_mask & alias_mask;
3983 wr_mask_vsie = wr_mask & nalias_mask;
3984
3985 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
3986
3987 rval_vs = env->vsie & nalias_mask;
3988 env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
3989
3990 if (ret_val) {
3991 rval &= alias_mask;
3992 vsbits = rval & VS_MODE_INTERRUPTS;
3993 rval &= ~VS_MODE_INTERRUPTS;
3994 *ret_val = rval | (vsbits >> 1) | rval_vs;
3995 }
3996
3997 return ret;
3998 }
3999
rmw_vsie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4000 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
4001 target_ulong *ret_val,
4002 target_ulong new_val, target_ulong wr_mask)
4003 {
4004 uint64_t rval;
4005 RISCVException ret;
4006
4007 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
4008 if (ret_val) {
4009 *ret_val = rval;
4010 }
4011
4012 return ret;
4013 }
4014
rmw_vsieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4015 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
4016 target_ulong *ret_val,
4017 target_ulong new_val, target_ulong wr_mask)
4018 {
4019 uint64_t rval;
4020 RISCVException ret;
4021
4022 ret = rmw_vsie64(env, csrno, &rval,
4023 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4024 if (ret_val) {
4025 *ret_val = rval >> 32;
4026 }
4027
4028 return ret;
4029 }
4030
rmw_sie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4031 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
4032 uint64_t *ret_val,
4033 uint64_t new_val, uint64_t wr_mask)
4034 {
4035 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
4036 (~env->mideleg & env->mvien);
4037 uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
4038 uint64_t sie_mask = wr_mask & nalias_mask;
4039 RISCVException ret;
4040
4041 /*
4042 * mideleg[i] mvien[i]
4043 * 0 0 sie[i] read-only zero.
4044 * 0 1 sie[i] is a separate writable bit.
4045 * 1 X sie[i] alias of mie[i].
4046 *
4047 * Both alias and non-alias mask remain same for sip except for bits
4048 * which are zero in both mideleg and mvien.
4049 */
4050 if (env->virt_enabled) {
4051 if (env->hvictl & HVICTL_VTI) {
4052 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4053 }
4054 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
4055 if (ret_val) {
4056 *ret_val &= alias_mask;
4057 }
4058 } else {
4059 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
4060 if (ret_val) {
4061 *ret_val &= alias_mask;
4062 *ret_val |= env->sie & nalias_mask;
4063 }
4064
4065 env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
4066 }
4067
4068 return ret;
4069 }
4070
rmw_sie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4071 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
4072 target_ulong *ret_val,
4073 target_ulong new_val, target_ulong wr_mask)
4074 {
4075 uint64_t rval;
4076 RISCVException ret;
4077
4078 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
4079 if (ret == RISCV_EXCP_NONE && ret_val) {
4080 *ret_val = rval;
4081 }
4082
4083 return ret;
4084 }
4085
rmw_sieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4086 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
4087 target_ulong *ret_val,
4088 target_ulong new_val, target_ulong wr_mask)
4089 {
4090 uint64_t rval;
4091 RISCVException ret;
4092
4093 ret = rmw_sie64(env, csrno, &rval,
4094 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4095 if (ret_val) {
4096 *ret_val = rval >> 32;
4097 }
4098
4099 return ret;
4100 }
4101
read_stvec(CPURISCVState * env,int csrno,target_ulong * val)4102 static RISCVException read_stvec(CPURISCVState *env, int csrno,
4103 target_ulong *val)
4104 {
4105 *val = env->stvec;
4106 return RISCV_EXCP_NONE;
4107 }
4108
write_stvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4109 static RISCVException write_stvec(CPURISCVState *env, int csrno,
4110 target_ulong val, uintptr_t ra)
4111 {
4112 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
4113 if ((val & 3) < 2) {
4114 env->stvec = val;
4115 } else {
4116 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
4117 }
4118 return RISCV_EXCP_NONE;
4119 }
4120
read_scounteren(CPURISCVState * env,int csrno,target_ulong * val)4121 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
4122 target_ulong *val)
4123 {
4124 *val = env->scounteren;
4125 return RISCV_EXCP_NONE;
4126 }
4127
write_scounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4128 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
4129 target_ulong val, uintptr_t ra)
4130 {
4131 RISCVCPU *cpu = env_archcpu(env);
4132
4133 /* WARL register - disable unavailable counters */
4134 env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
4135 COUNTEREN_IR);
4136 return RISCV_EXCP_NONE;
4137 }
4138
4139 /* Supervisor Trap Handling */
read_sscratch_i128(CPURISCVState * env,int csrno,Int128 * val)4140 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
4141 Int128 *val)
4142 {
4143 *val = int128_make128(env->sscratch, env->sscratchh);
4144 return RISCV_EXCP_NONE;
4145 }
4146
write_sscratch_i128(CPURISCVState * env,int csrno,Int128 val)4147 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
4148 Int128 val)
4149 {
4150 env->sscratch = int128_getlo(val);
4151 env->sscratchh = int128_gethi(val);
4152 return RISCV_EXCP_NONE;
4153 }
4154
read_sscratch(CPURISCVState * env,int csrno,target_ulong * val)4155 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
4156 target_ulong *val)
4157 {
4158 *val = env->sscratch;
4159 return RISCV_EXCP_NONE;
4160 }
4161
write_sscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4162 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
4163 target_ulong val, uintptr_t ra)
4164 {
4165 env->sscratch = val;
4166 return RISCV_EXCP_NONE;
4167 }
4168
read_sepc(CPURISCVState * env,int csrno,target_ulong * val)4169 static RISCVException read_sepc(CPURISCVState *env, int csrno,
4170 target_ulong *val)
4171 {
4172 *val = env->sepc & get_xepc_mask(env);
4173 return RISCV_EXCP_NONE;
4174 }
4175
write_sepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4176 static RISCVException write_sepc(CPURISCVState *env, int csrno,
4177 target_ulong val, uintptr_t ra)
4178 {
4179 env->sepc = val & get_xepc_mask(env);
4180 return RISCV_EXCP_NONE;
4181 }
4182
read_scause(CPURISCVState * env,int csrno,target_ulong * val)4183 static RISCVException read_scause(CPURISCVState *env, int csrno,
4184 target_ulong *val)
4185 {
4186 *val = env->scause;
4187 return RISCV_EXCP_NONE;
4188 }
4189
write_scause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4190 static RISCVException write_scause(CPURISCVState *env, int csrno,
4191 target_ulong val, uintptr_t ra)
4192 {
4193 env->scause = val;
4194 return RISCV_EXCP_NONE;
4195 }
4196
read_stval(CPURISCVState * env,int csrno,target_ulong * val)4197 static RISCVException read_stval(CPURISCVState *env, int csrno,
4198 target_ulong *val)
4199 {
4200 *val = env->stval;
4201 return RISCV_EXCP_NONE;
4202 }
4203
write_stval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4204 static RISCVException write_stval(CPURISCVState *env, int csrno,
4205 target_ulong val, uintptr_t ra)
4206 {
4207 env->stval = val;
4208 return RISCV_EXCP_NONE;
4209 }
4210
4211 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
4212 uint64_t *ret_val,
4213 uint64_t new_val, uint64_t wr_mask);
4214
rmw_vsip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4215 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
4216 uint64_t *ret_val,
4217 uint64_t new_val, uint64_t wr_mask)
4218 {
4219 RISCVException ret;
4220 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
4221 uint64_t vsbits;
4222
4223 /* Add virtualized bits into vsip mask. */
4224 mask |= env->hvien & ~env->hideleg;
4225
4226 /* Bring VS-level bits to correct position */
4227 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
4228 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
4229 new_val |= vsbits << 1;
4230 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
4231 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
4232 wr_mask |= vsbits << 1;
4233
4234 ret = rmw_hvip64(env, csrno, &rval, new_val,
4235 wr_mask & mask & vsip_writable_mask);
4236 if (ret_val) {
4237 rval &= mask;
4238 vsbits = rval & VS_MODE_INTERRUPTS;
4239 rval &= ~VS_MODE_INTERRUPTS;
4240 *ret_val = rval | (vsbits >> 1);
4241 }
4242
4243 return ret;
4244 }
4245
rmw_vsip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4246 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
4247 target_ulong *ret_val,
4248 target_ulong new_val, target_ulong wr_mask)
4249 {
4250 uint64_t rval;
4251 RISCVException ret;
4252
4253 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
4254 if (ret_val) {
4255 *ret_val = rval;
4256 }
4257
4258 return ret;
4259 }
4260
rmw_vsiph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4261 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
4262 target_ulong *ret_val,
4263 target_ulong new_val, target_ulong wr_mask)
4264 {
4265 uint64_t rval;
4266 RISCVException ret;
4267
4268 ret = rmw_vsip64(env, csrno, &rval,
4269 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4270 if (ret_val) {
4271 *ret_val = rval >> 32;
4272 }
4273
4274 return ret;
4275 }
4276
rmw_sip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4277 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
4278 uint64_t *ret_val,
4279 uint64_t new_val, uint64_t wr_mask)
4280 {
4281 RISCVException ret;
4282 uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
4283
4284 if (env->virt_enabled) {
4285 if (env->hvictl & HVICTL_VTI) {
4286 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4287 }
4288 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
4289 } else {
4290 ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
4291 }
4292
4293 if (ret_val) {
4294 *ret_val &= (env->mideleg | env->mvien) &
4295 (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
4296 }
4297
4298 return ret;
4299 }
4300
rmw_sip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4301 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
4302 target_ulong *ret_val,
4303 target_ulong new_val, target_ulong wr_mask)
4304 {
4305 uint64_t rval;
4306 RISCVException ret;
4307
4308 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
4309 if (ret_val) {
4310 *ret_val = rval;
4311 }
4312
4313 return ret;
4314 }
4315
rmw_siph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4316 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
4317 target_ulong *ret_val,
4318 target_ulong new_val, target_ulong wr_mask)
4319 {
4320 uint64_t rval;
4321 RISCVException ret;
4322
4323 ret = rmw_sip64(env, csrno, &rval,
4324 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4325 if (ret_val) {
4326 *ret_val = rval >> 32;
4327 }
4328
4329 return ret;
4330 }
4331
4332 /* Supervisor Protection and Translation */
read_satp(CPURISCVState * env,int csrno,target_ulong * val)4333 static RISCVException read_satp(CPURISCVState *env, int csrno,
4334 target_ulong *val)
4335 {
4336 if (!riscv_cpu_cfg(env)->mmu) {
4337 *val = 0;
4338 return RISCV_EXCP_NONE;
4339 }
4340 *val = env->satp;
4341 return RISCV_EXCP_NONE;
4342 }
4343
write_satp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4344 static RISCVException write_satp(CPURISCVState *env, int csrno,
4345 target_ulong val, uintptr_t ra)
4346 {
4347 if (!riscv_cpu_cfg(env)->mmu) {
4348 return RISCV_EXCP_NONE;
4349 }
4350
4351 env->satp = legalize_xatp(env, env->satp, val);
4352 return RISCV_EXCP_NONE;
4353 }
4354
rmw_sctrdepth(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4355 static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
4356 target_ulong *ret_val,
4357 target_ulong new_val, target_ulong wr_mask)
4358 {
4359 uint64_t mask = wr_mask & SCTRDEPTH_MASK;
4360
4361 if (ret_val) {
4362 *ret_val = env->sctrdepth;
4363 }
4364
4365 env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
4366
4367 /* Correct depth. */
4368 if (mask) {
4369 uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
4370
4371 if (depth > SCTRDEPTH_MAX) {
4372 depth = SCTRDEPTH_MAX;
4373 env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
4374 }
4375
4376 /* Update sctrstatus.WRPTR with a legal value */
4377 depth = 16ULL << depth;
4378 env->sctrstatus =
4379 env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
4380 }
4381
4382 return RISCV_EXCP_NONE;
4383 }
4384
rmw_sctrstatus(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4385 static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
4386 target_ulong *ret_val,
4387 target_ulong new_val, target_ulong wr_mask)
4388 {
4389 uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
4390 uint32_t mask = wr_mask & SCTRSTATUS_MASK;
4391
4392 if (ret_val) {
4393 *ret_val = env->sctrstatus;
4394 }
4395
4396 env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
4397
4398 /* Update sctrstatus.WRPTR with a legal value */
4399 env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
4400
4401 return RISCV_EXCP_NONE;
4402 }
4403
rmw_xctrctl(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4404 static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
4405 target_ulong *ret_val,
4406 target_ulong new_val, target_ulong wr_mask)
4407 {
4408 uint64_t csr_mask, mask = wr_mask;
4409 uint64_t *ctl_ptr = &env->mctrctl;
4410
4411 if (csrno == CSR_MCTRCTL) {
4412 csr_mask = MCTRCTL_MASK;
4413 } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
4414 csr_mask = SCTRCTL_MASK;
4415 } else {
4416 /*
4417 * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
4418 * or csrno == CSR_VSCTRCTL.
4419 */
4420 csr_mask = VSCTRCTL_MASK;
4421 ctl_ptr = &env->vsctrctl;
4422 }
4423
4424 mask &= csr_mask;
4425
4426 if (ret_val) {
4427 *ret_val = *ctl_ptr & csr_mask;
4428 }
4429
4430 *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
4431
4432 return RISCV_EXCP_NONE;
4433 }
4434
read_vstopi(CPURISCVState * env,int csrno,target_ulong * val)4435 static RISCVException read_vstopi(CPURISCVState *env, int csrno,
4436 target_ulong *val)
4437 {
4438 int irq, ret;
4439 target_ulong topei;
4440 uint64_t vseip, vsgein;
4441 uint32_t iid, iprio, hviid, hviprio, gein;
4442 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
4443
4444 gein = get_field(env->hstatus, HSTATUS_VGEIN);
4445 hviid = get_field(env->hvictl, HVICTL_IID);
4446 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
4447
4448 if (gein) {
4449 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
4450 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
4451 if (gein <= env->geilen && vseip) {
4452 siid[scount] = IRQ_S_EXT;
4453 siprio[scount] = IPRIO_MMAXIPRIO + 1;
4454 if (env->aia_ireg_rmw_fn[PRV_S]) {
4455 /*
4456 * Call machine specific IMSIC register emulation for
4457 * reading TOPEI.
4458 */
4459 ret = env->aia_ireg_rmw_fn[PRV_S](
4460 env->aia_ireg_rmw_fn_arg[PRV_S],
4461 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
4462 riscv_cpu_mxl_bits(env)),
4463 &topei, 0, 0);
4464 if (!ret && topei) {
4465 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
4466 }
4467 }
4468 scount++;
4469 }
4470 } else {
4471 if (hviid == IRQ_S_EXT && hviprio) {
4472 siid[scount] = IRQ_S_EXT;
4473 siprio[scount] = hviprio;
4474 scount++;
4475 }
4476 }
4477
4478 if (env->hvictl & HVICTL_VTI) {
4479 if (hviid != IRQ_S_EXT) {
4480 siid[scount] = hviid;
4481 siprio[scount] = hviprio;
4482 scount++;
4483 }
4484 } else {
4485 irq = riscv_cpu_vsirq_pending(env);
4486 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
4487 siid[scount] = irq;
4488 siprio[scount] = env->hviprio[irq];
4489 scount++;
4490 }
4491 }
4492
4493 iid = 0;
4494 iprio = UINT_MAX;
4495 for (s = 0; s < scount; s++) {
4496 if (siprio[s] < iprio) {
4497 iid = siid[s];
4498 iprio = siprio[s];
4499 }
4500 }
4501
4502 if (iid) {
4503 if (env->hvictl & HVICTL_IPRIOM) {
4504 if (iprio > IPRIO_MMAXIPRIO) {
4505 iprio = IPRIO_MMAXIPRIO;
4506 }
4507 if (!iprio) {
4508 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
4509 iprio = IPRIO_MMAXIPRIO;
4510 }
4511 }
4512 } else {
4513 iprio = 1;
4514 }
4515 } else {
4516 iprio = 0;
4517 }
4518
4519 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
4520 *val |= iprio;
4521
4522 return RISCV_EXCP_NONE;
4523 }
4524
read_stopi(CPURISCVState * env,int csrno,target_ulong * val)4525 static RISCVException read_stopi(CPURISCVState *env, int csrno,
4526 target_ulong *val)
4527 {
4528 int irq;
4529 uint8_t iprio;
4530
4531 if (env->virt_enabled) {
4532 return read_vstopi(env, CSR_VSTOPI, val);
4533 }
4534
4535 irq = riscv_cpu_sirq_pending(env);
4536 if (irq <= 0 || irq > 63) {
4537 *val = 0;
4538 } else {
4539 iprio = env->siprio[irq];
4540 if (!iprio) {
4541 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
4542 iprio = IPRIO_MMAXIPRIO;
4543 }
4544 }
4545 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
4546 *val |= iprio;
4547 }
4548
4549 return RISCV_EXCP_NONE;
4550 }
4551
4552 /* Hypervisor Extensions */
read_hstatus(CPURISCVState * env,int csrno,target_ulong * val)4553 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
4554 target_ulong *val)
4555 {
4556 *val = env->hstatus;
4557 if (riscv_cpu_mxl(env) != MXL_RV32) {
4558 /* We only support 64-bit VSXL */
4559 *val = set_field(*val, HSTATUS_VSXL, 2);
4560 }
4561 /* We only support little endian */
4562 *val = set_field(*val, HSTATUS_VSBE, 0);
4563 return RISCV_EXCP_NONE;
4564 }
4565
write_hstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4566 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
4567 target_ulong val, uintptr_t ra)
4568 {
4569 uint64_t mask = (target_ulong)-1;
4570 if (!env_archcpu(env)->cfg.ext_svukte) {
4571 mask &= ~HSTATUS_HUKTE;
4572 }
4573 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
4574 if (!env_archcpu(env)->cfg.ext_ssnpm ||
4575 riscv_cpu_mxl(env) != MXL_RV64 ||
4576 get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) {
4577 mask &= ~HSTATUS_HUPMM;
4578 }
4579 env->hstatus = (env->hstatus & ~mask) | (val & mask);
4580
4581 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
4582 qemu_log_mask(LOG_UNIMP,
4583 "QEMU does not support mixed HSXLEN options.");
4584 }
4585 if (get_field(val, HSTATUS_VSBE) != 0) {
4586 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
4587 }
4588 return RISCV_EXCP_NONE;
4589 }
4590
read_hedeleg(CPURISCVState * env,int csrno,target_ulong * val)4591 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
4592 target_ulong *val)
4593 {
4594 *val = env->hedeleg;
4595 return RISCV_EXCP_NONE;
4596 }
4597
write_hedeleg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4598 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
4599 target_ulong val, uintptr_t ra)
4600 {
4601 env->hedeleg = val & vs_delegable_excps;
4602 return RISCV_EXCP_NONE;
4603 }
4604
read_hedelegh(CPURISCVState * env,int csrno,target_ulong * val)4605 static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
4606 target_ulong *val)
4607 {
4608 RISCVException ret;
4609 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
4610 if (ret != RISCV_EXCP_NONE) {
4611 return ret;
4612 }
4613
4614 /* Reserved, now read zero */
4615 *val = 0;
4616 return RISCV_EXCP_NONE;
4617 }
4618
write_hedelegh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4619 static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
4620 target_ulong val, uintptr_t ra)
4621 {
4622 RISCVException ret;
4623 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
4624 if (ret != RISCV_EXCP_NONE) {
4625 return ret;
4626 }
4627
4628 /* Reserved, now write ignore */
4629 return RISCV_EXCP_NONE;
4630 }
4631
rmw_hvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4632 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
4633 uint64_t *ret_val,
4634 uint64_t new_val, uint64_t wr_mask)
4635 {
4636 uint64_t mask = wr_mask & hvien_writable_mask;
4637
4638 if (ret_val) {
4639 *ret_val = env->hvien;
4640 }
4641
4642 env->hvien = (env->hvien & ~mask) | (new_val & mask);
4643
4644 return RISCV_EXCP_NONE;
4645 }
4646
rmw_hvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4647 static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
4648 target_ulong *ret_val,
4649 target_ulong new_val, target_ulong wr_mask)
4650 {
4651 uint64_t rval;
4652 RISCVException ret;
4653
4654 ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
4655 if (ret_val) {
4656 *ret_val = rval;
4657 }
4658
4659 return ret;
4660 }
4661
rmw_hvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4662 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
4663 target_ulong *ret_val,
4664 target_ulong new_val, target_ulong wr_mask)
4665 {
4666 uint64_t rval;
4667 RISCVException ret;
4668
4669 ret = rmw_hvien64(env, csrno, &rval,
4670 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4671 if (ret_val) {
4672 *ret_val = rval >> 32;
4673 }
4674
4675 return ret;
4676 }
4677
rmw_hideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4678 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
4679 uint64_t *ret_val,
4680 uint64_t new_val, uint64_t wr_mask)
4681 {
4682 uint64_t mask = wr_mask & vs_delegable_ints;
4683
4684 if (ret_val) {
4685 *ret_val = env->hideleg & vs_delegable_ints;
4686 }
4687
4688 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
4689 return RISCV_EXCP_NONE;
4690 }
4691
rmw_hideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4692 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
4693 target_ulong *ret_val,
4694 target_ulong new_val, target_ulong wr_mask)
4695 {
4696 uint64_t rval;
4697 RISCVException ret;
4698
4699 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
4700 if (ret_val) {
4701 *ret_val = rval;
4702 }
4703
4704 return ret;
4705 }
4706
rmw_hidelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4707 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
4708 target_ulong *ret_val,
4709 target_ulong new_val, target_ulong wr_mask)
4710 {
4711 uint64_t rval;
4712 RISCVException ret;
4713
4714 ret = rmw_hideleg64(env, csrno, &rval,
4715 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4716 if (ret_val) {
4717 *ret_val = rval >> 32;
4718 }
4719
4720 return ret;
4721 }
4722
4723 /*
4724 * The function is written for two use-cases:
4725 * 1- To access hvip csr as is for HS-mode access.
4726 * 2- To access vsip as a combination of hvip, and mip for vs-mode.
4727 *
4728 * Both report bits 2, 6, 10 and 13:63.
4729 * vsip needs to be read-only zero when both hideleg[i] and
4730 * hvien[i] are zero.
4731 */
rmw_hvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4732 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
4733 uint64_t *ret_val,
4734 uint64_t new_val, uint64_t wr_mask)
4735 {
4736 RISCVException ret;
4737 uint64_t old_hvip;
4738 uint64_t ret_mip;
4739
4740 /*
4741 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
4742 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
4743 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
4744 * bits are actually being maintained in mip so we read them from there.
4745 * This way we have a single source of truth and allows for easier
4746 * implementation.
4747 *
4748 * For bits 13:63 we have:
4749 *
4750 * hideleg[i] hvien[i]
4751 * 0 0 No delegation. vsip[i] readonly zero.
4752 * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
4753 * 1 X vsip[i] is alias of sip[i], hvip bypassed.
4754 *
4755 * alias_mask denotes the bits that come from sip (mip here given we
4756 * maintain all bits there). nalias_mask denotes bits that come from
4757 * hvip.
4758 */
4759 uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
4760 uint64_t nalias_mask = (~env->hideleg & env->hvien);
4761 uint64_t wr_mask_hvip;
4762 uint64_t wr_mask_mip;
4763
4764 /*
4765 * Both alias and non-alias mask remain same for vsip except:
4766 * 1- For VS* bits if they are zero in hideleg.
4767 * 2- For 13:63 bits if they are zero in both hideleg and hvien.
4768 */
4769 if (csrno == CSR_VSIP) {
4770 /* zero-out VS* bits that are not delegated to VS mode. */
4771 alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
4772
4773 /*
4774 * zero-out 13:63 bits that are zero in both hideleg and hvien.
4775 * nalias_mask mask can not contain any VS* bits so only second
4776 * condition applies on it.
4777 */
4778 nalias_mask &= (env->hideleg | env->hvien);
4779 alias_mask &= (env->hideleg | env->hvien);
4780 }
4781
4782 wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
4783 wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
4784
4785 /* Aliased bits, bits 10, 6, 2 need to come from mip. */
4786 ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
4787 if (ret != RISCV_EXCP_NONE) {
4788 return ret;
4789 }
4790
4791 old_hvip = env->hvip;
4792
4793 if (wr_mask_hvip) {
4794 env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
4795
4796 /*
4797 * Given hvip is separate source from mip, we need to trigger interrupt
4798 * from here separately. Normally this happen from riscv_cpu_update_mip.
4799 */
4800 riscv_cpu_interrupt(env);
4801 }
4802
4803 if (ret_val) {
4804 /* Only take VS* bits from mip. */
4805 ret_mip &= alias_mask;
4806
4807 /* Take in non-delegated 13:63 bits from hvip. */
4808 old_hvip &= nalias_mask;
4809
4810 *ret_val = ret_mip | old_hvip;
4811 }
4812
4813 return ret;
4814 }
4815
rmw_hvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4816 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
4817 target_ulong *ret_val,
4818 target_ulong new_val, target_ulong wr_mask)
4819 {
4820 uint64_t rval;
4821 RISCVException ret;
4822
4823 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
4824 if (ret_val) {
4825 *ret_val = rval;
4826 }
4827
4828 return ret;
4829 }
4830
rmw_hviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4831 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
4832 target_ulong *ret_val,
4833 target_ulong new_val, target_ulong wr_mask)
4834 {
4835 uint64_t rval;
4836 RISCVException ret;
4837
4838 ret = rmw_hvip64(env, csrno, &rval,
4839 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4840 if (ret_val) {
4841 *ret_val = rval >> 32;
4842 }
4843
4844 return ret;
4845 }
4846
rmw_hip(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4847 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
4848 target_ulong *ret_value,
4849 target_ulong new_value, target_ulong write_mask)
4850 {
4851 int ret = rmw_mip(env, csrno, ret_value, new_value,
4852 write_mask & hip_writable_mask);
4853
4854 if (ret_value) {
4855 *ret_value &= HS_MODE_INTERRUPTS;
4856 }
4857 return ret;
4858 }
4859
rmw_hie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4860 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
4861 target_ulong *ret_val,
4862 target_ulong new_val, target_ulong wr_mask)
4863 {
4864 uint64_t rval;
4865 RISCVException ret;
4866
4867 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
4868 if (ret_val) {
4869 *ret_val = rval & HS_MODE_INTERRUPTS;
4870 }
4871
4872 return ret;
4873 }
4874
read_hcounteren(CPURISCVState * env,int csrno,target_ulong * val)4875 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
4876 target_ulong *val)
4877 {
4878 *val = env->hcounteren;
4879 return RISCV_EXCP_NONE;
4880 }
4881
write_hcounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4882 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
4883 target_ulong val, uintptr_t ra)
4884 {
4885 RISCVCPU *cpu = env_archcpu(env);
4886
4887 /* WARL register - disable unavailable counters */
4888 env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
4889 COUNTEREN_IR);
4890 return RISCV_EXCP_NONE;
4891 }
4892
read_hgeie(CPURISCVState * env,int csrno,target_ulong * val)4893 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
4894 target_ulong *val)
4895 {
4896 if (val) {
4897 *val = env->hgeie;
4898 }
4899 return RISCV_EXCP_NONE;
4900 }
4901
write_hgeie(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4902 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
4903 target_ulong val, uintptr_t ra)
4904 {
4905 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
4906 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
4907 env->hgeie = val;
4908 /* Update mip.SGEIP bit */
4909 riscv_cpu_update_mip(env, MIP_SGEIP,
4910 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
4911 return RISCV_EXCP_NONE;
4912 }
4913
read_htval(CPURISCVState * env,int csrno,target_ulong * val)4914 static RISCVException read_htval(CPURISCVState *env, int csrno,
4915 target_ulong *val)
4916 {
4917 *val = env->htval;
4918 return RISCV_EXCP_NONE;
4919 }
4920
write_htval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4921 static RISCVException write_htval(CPURISCVState *env, int csrno,
4922 target_ulong val, uintptr_t ra)
4923 {
4924 env->htval = val;
4925 return RISCV_EXCP_NONE;
4926 }
4927
read_htinst(CPURISCVState * env,int csrno,target_ulong * val)4928 static RISCVException read_htinst(CPURISCVState *env, int csrno,
4929 target_ulong *val)
4930 {
4931 *val = env->htinst;
4932 return RISCV_EXCP_NONE;
4933 }
4934
write_htinst(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4935 static RISCVException write_htinst(CPURISCVState *env, int csrno,
4936 target_ulong val, uintptr_t ra)
4937 {
4938 return RISCV_EXCP_NONE;
4939 }
4940
read_hgeip(CPURISCVState * env,int csrno,target_ulong * val)4941 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
4942 target_ulong *val)
4943 {
4944 if (val) {
4945 *val = env->hgeip;
4946 }
4947 return RISCV_EXCP_NONE;
4948 }
4949
read_hgatp(CPURISCVState * env,int csrno,target_ulong * val)4950 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
4951 target_ulong *val)
4952 {
4953 *val = env->hgatp;
4954 return RISCV_EXCP_NONE;
4955 }
4956
write_hgatp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4957 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
4958 target_ulong val, uintptr_t ra)
4959 {
4960 env->hgatp = legalize_xatp(env, env->hgatp, val);
4961 return RISCV_EXCP_NONE;
4962 }
4963
read_htimedelta(CPURISCVState * env,int csrno,target_ulong * val)4964 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
4965 target_ulong *val)
4966 {
4967 if (!env->rdtime_fn) {
4968 return RISCV_EXCP_ILLEGAL_INST;
4969 }
4970
4971 *val = env->htimedelta;
4972 return RISCV_EXCP_NONE;
4973 }
4974
write_htimedelta(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4975 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
4976 target_ulong val, uintptr_t ra)
4977 {
4978 if (!env->rdtime_fn) {
4979 return RISCV_EXCP_ILLEGAL_INST;
4980 }
4981
4982 if (riscv_cpu_mxl(env) == MXL_RV32) {
4983 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
4984 } else {
4985 env->htimedelta = val;
4986 }
4987
4988 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
4989 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
4990 env->htimedelta, MIP_VSTIP);
4991 }
4992
4993 return RISCV_EXCP_NONE;
4994 }
4995
read_htimedeltah(CPURISCVState * env,int csrno,target_ulong * val)4996 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
4997 target_ulong *val)
4998 {
4999 if (!env->rdtime_fn) {
5000 return RISCV_EXCP_ILLEGAL_INST;
5001 }
5002
5003 *val = env->htimedelta >> 32;
5004 return RISCV_EXCP_NONE;
5005 }
5006
write_htimedeltah(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5007 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
5008 target_ulong val, uintptr_t ra)
5009 {
5010 if (!env->rdtime_fn) {
5011 return RISCV_EXCP_ILLEGAL_INST;
5012 }
5013
5014 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
5015
5016 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
5017 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
5018 env->htimedelta, MIP_VSTIP);
5019 }
5020
5021 return RISCV_EXCP_NONE;
5022 }
5023
read_hvictl(CPURISCVState * env,int csrno,target_ulong * val)5024 static RISCVException read_hvictl(CPURISCVState *env, int csrno,
5025 target_ulong *val)
5026 {
5027 *val = env->hvictl;
5028 return RISCV_EXCP_NONE;
5029 }
5030
write_hvictl(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5031 static RISCVException write_hvictl(CPURISCVState *env, int csrno,
5032 target_ulong val, uintptr_t ra)
5033 {
5034 env->hvictl = val & HVICTL_VALID_MASK;
5035 return RISCV_EXCP_NONE;
5036 }
5037
read_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong * val)5038 static RISCVException read_hvipriox(CPURISCVState *env, int first_index,
5039 uint8_t *iprio, target_ulong *val)
5040 {
5041 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
5042
5043 /* First index has to be a multiple of number of irqs per register */
5044 if (first_index % num_irqs) {
5045 return (env->virt_enabled) ?
5046 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
5047 }
5048
5049 /* Fill-up return value */
5050 *val = 0;
5051 for (i = 0; i < num_irqs; i++) {
5052 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
5053 continue;
5054 }
5055 if (rdzero) {
5056 continue;
5057 }
5058 *val |= ((target_ulong)iprio[irq]) << (i * 8);
5059 }
5060
5061 return RISCV_EXCP_NONE;
5062 }
5063
write_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong val)5064 static RISCVException write_hvipriox(CPURISCVState *env, int first_index,
5065 uint8_t *iprio, target_ulong val)
5066 {
5067 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
5068
5069 /* First index has to be a multiple of number of irqs per register */
5070 if (first_index % num_irqs) {
5071 return (env->virt_enabled) ?
5072 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
5073 }
5074
5075 /* Fill-up priority array */
5076 for (i = 0; i < num_irqs; i++) {
5077 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
5078 continue;
5079 }
5080 if (rdzero) {
5081 iprio[irq] = 0;
5082 } else {
5083 iprio[irq] = (val >> (i * 8)) & 0xff;
5084 }
5085 }
5086
5087 return RISCV_EXCP_NONE;
5088 }
5089
read_hviprio1(CPURISCVState * env,int csrno,target_ulong * val)5090 static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
5091 target_ulong *val)
5092 {
5093 return read_hvipriox(env, 0, env->hviprio, val);
5094 }
5095
write_hviprio1(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5096 static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
5097 target_ulong val, uintptr_t ra)
5098 {
5099 return write_hvipriox(env, 0, env->hviprio, val);
5100 }
5101
read_hviprio1h(CPURISCVState * env,int csrno,target_ulong * val)5102 static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
5103 target_ulong *val)
5104 {
5105 return read_hvipriox(env, 4, env->hviprio, val);
5106 }
5107
write_hviprio1h(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5108 static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
5109 target_ulong val, uintptr_t ra)
5110 {
5111 return write_hvipriox(env, 4, env->hviprio, val);
5112 }
5113
read_hviprio2(CPURISCVState * env,int csrno,target_ulong * val)5114 static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
5115 target_ulong *val)
5116 {
5117 return read_hvipriox(env, 8, env->hviprio, val);
5118 }
5119
write_hviprio2(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5120 static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
5121 target_ulong val, uintptr_t ra)
5122 {
5123 return write_hvipriox(env, 8, env->hviprio, val);
5124 }
5125
read_hviprio2h(CPURISCVState * env,int csrno,target_ulong * val)5126 static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
5127 target_ulong *val)
5128 {
5129 return read_hvipriox(env, 12, env->hviprio, val);
5130 }
5131
write_hviprio2h(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5132 static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
5133 target_ulong val, uintptr_t ra)
5134 {
5135 return write_hvipriox(env, 12, env->hviprio, val);
5136 }
5137
5138 /* Virtual CSR Registers */
read_vsstatus(CPURISCVState * env,int csrno,target_ulong * val)5139 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
5140 target_ulong *val)
5141 {
5142 *val = env->vsstatus;
5143 return RISCV_EXCP_NONE;
5144 }
5145
write_vsstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5146 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
5147 target_ulong val, uintptr_t ra)
5148 {
5149 uint64_t mask = (target_ulong)-1;
5150 if ((val & VSSTATUS64_UXL) == 0) {
5151 mask &= ~VSSTATUS64_UXL;
5152 }
5153 if ((env->henvcfg & HENVCFG_DTE)) {
5154 if ((val & SSTATUS_SDT) != 0) {
5155 val &= ~SSTATUS_SIE;
5156 }
5157 } else {
5158 val &= ~SSTATUS_SDT;
5159 }
5160 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
5161 return RISCV_EXCP_NONE;
5162 }
5163
read_vstvec(CPURISCVState * env,int csrno,target_ulong * val)5164 static RISCVException read_vstvec(CPURISCVState *env, int csrno,
5165 target_ulong *val)
5166 {
5167 *val = env->vstvec;
5168 return RISCV_EXCP_NONE;
5169 }
5170
write_vstvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5171 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
5172 target_ulong val, uintptr_t ra)
5173 {
5174 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
5175 if ((val & 3) < 2) {
5176 env->vstvec = val;
5177 } else {
5178 qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n");
5179 }
5180 return RISCV_EXCP_NONE;
5181 }
5182
read_vsscratch(CPURISCVState * env,int csrno,target_ulong * val)5183 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
5184 target_ulong *val)
5185 {
5186 *val = env->vsscratch;
5187 return RISCV_EXCP_NONE;
5188 }
5189
write_vsscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5190 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
5191 target_ulong val, uintptr_t ra)
5192 {
5193 env->vsscratch = val;
5194 return RISCV_EXCP_NONE;
5195 }
5196
read_vsepc(CPURISCVState * env,int csrno,target_ulong * val)5197 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
5198 target_ulong *val)
5199 {
5200 *val = env->vsepc;
5201 return RISCV_EXCP_NONE;
5202 }
5203
write_vsepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5204 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
5205 target_ulong val, uintptr_t ra)
5206 {
5207 env->vsepc = val;
5208 return RISCV_EXCP_NONE;
5209 }
5210
read_vscause(CPURISCVState * env,int csrno,target_ulong * val)5211 static RISCVException read_vscause(CPURISCVState *env, int csrno,
5212 target_ulong *val)
5213 {
5214 *val = env->vscause;
5215 return RISCV_EXCP_NONE;
5216 }
5217
write_vscause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5218 static RISCVException write_vscause(CPURISCVState *env, int csrno,
5219 target_ulong val, uintptr_t ra)
5220 {
5221 env->vscause = val;
5222 return RISCV_EXCP_NONE;
5223 }
5224
read_vstval(CPURISCVState * env,int csrno,target_ulong * val)5225 static RISCVException read_vstval(CPURISCVState *env, int csrno,
5226 target_ulong *val)
5227 {
5228 *val = env->vstval;
5229 return RISCV_EXCP_NONE;
5230 }
5231
write_vstval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5232 static RISCVException write_vstval(CPURISCVState *env, int csrno,
5233 target_ulong val, uintptr_t ra)
5234 {
5235 env->vstval = val;
5236 return RISCV_EXCP_NONE;
5237 }
5238
read_vsatp(CPURISCVState * env,int csrno,target_ulong * val)5239 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
5240 target_ulong *val)
5241 {
5242 *val = env->vsatp;
5243 return RISCV_EXCP_NONE;
5244 }
5245
write_vsatp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5246 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
5247 target_ulong val, uintptr_t ra)
5248 {
5249 env->vsatp = legalize_xatp(env, env->vsatp, val);
5250 return RISCV_EXCP_NONE;
5251 }
5252
read_mtval2(CPURISCVState * env,int csrno,target_ulong * val)5253 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
5254 target_ulong *val)
5255 {
5256 *val = env->mtval2;
5257 return RISCV_EXCP_NONE;
5258 }
5259
write_mtval2(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5260 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
5261 target_ulong val, uintptr_t ra)
5262 {
5263 env->mtval2 = val;
5264 return RISCV_EXCP_NONE;
5265 }
5266
read_mtinst(CPURISCVState * env,int csrno,target_ulong * val)5267 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
5268 target_ulong *val)
5269 {
5270 *val = env->mtinst;
5271 return RISCV_EXCP_NONE;
5272 }
5273
write_mtinst(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5274 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
5275 target_ulong val, uintptr_t ra)
5276 {
5277 env->mtinst = val;
5278 return RISCV_EXCP_NONE;
5279 }
5280
5281 /* Physical Memory Protection */
read_mseccfg(CPURISCVState * env,int csrno,target_ulong * val)5282 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
5283 target_ulong *val)
5284 {
5285 *val = mseccfg_csr_read(env);
5286 return RISCV_EXCP_NONE;
5287 }
5288
write_mseccfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5289 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
5290 target_ulong val, uintptr_t ra)
5291 {
5292 mseccfg_csr_write(env, val);
5293 return RISCV_EXCP_NONE;
5294 }
5295
read_pmpcfg(CPURISCVState * env,int csrno,target_ulong * val)5296 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
5297 target_ulong *val)
5298 {
5299 uint32_t reg_index = csrno - CSR_PMPCFG0;
5300
5301 *val = pmpcfg_csr_read(env, reg_index);
5302 return RISCV_EXCP_NONE;
5303 }
5304
write_pmpcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5305 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
5306 target_ulong val, uintptr_t ra)
5307 {
5308 uint32_t reg_index = csrno - CSR_PMPCFG0;
5309
5310 pmpcfg_csr_write(env, reg_index, val);
5311 return RISCV_EXCP_NONE;
5312 }
5313
read_pmpaddr(CPURISCVState * env,int csrno,target_ulong * val)5314 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
5315 target_ulong *val)
5316 {
5317 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
5318 return RISCV_EXCP_NONE;
5319 }
5320
write_pmpaddr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5321 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
5322 target_ulong val, uintptr_t ra)
5323 {
5324 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
5325 return RISCV_EXCP_NONE;
5326 }
5327
read_tselect(CPURISCVState * env,int csrno,target_ulong * val)5328 static RISCVException read_tselect(CPURISCVState *env, int csrno,
5329 target_ulong *val)
5330 {
5331 *val = tselect_csr_read(env);
5332 return RISCV_EXCP_NONE;
5333 }
5334
write_tselect(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5335 static RISCVException write_tselect(CPURISCVState *env, int csrno,
5336 target_ulong val, uintptr_t ra)
5337 {
5338 tselect_csr_write(env, val);
5339 return RISCV_EXCP_NONE;
5340 }
5341
read_tdata(CPURISCVState * env,int csrno,target_ulong * val)5342 static RISCVException read_tdata(CPURISCVState *env, int csrno,
5343 target_ulong *val)
5344 {
5345 /* return 0 in tdata1 to end the trigger enumeration */
5346 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
5347 *val = 0;
5348 return RISCV_EXCP_NONE;
5349 }
5350
5351 if (!tdata_available(env, csrno - CSR_TDATA1)) {
5352 return RISCV_EXCP_ILLEGAL_INST;
5353 }
5354
5355 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
5356 return RISCV_EXCP_NONE;
5357 }
5358
write_tdata(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5359 static RISCVException write_tdata(CPURISCVState *env, int csrno,
5360 target_ulong val, uintptr_t ra)
5361 {
5362 if (!tdata_available(env, csrno - CSR_TDATA1)) {
5363 return RISCV_EXCP_ILLEGAL_INST;
5364 }
5365
5366 tdata_csr_write(env, csrno - CSR_TDATA1, val);
5367 return RISCV_EXCP_NONE;
5368 }
5369
read_tinfo(CPURISCVState * env,int csrno,target_ulong * val)5370 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
5371 target_ulong *val)
5372 {
5373 *val = tinfo_csr_read(env);
5374 return RISCV_EXCP_NONE;
5375 }
5376
read_mcontext(CPURISCVState * env,int csrno,target_ulong * val)5377 static RISCVException read_mcontext(CPURISCVState *env, int csrno,
5378 target_ulong *val)
5379 {
5380 *val = env->mcontext;
5381 return RISCV_EXCP_NONE;
5382 }
5383
write_mcontext(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5384 static RISCVException write_mcontext(CPURISCVState *env, int csrno,
5385 target_ulong val, uintptr_t ra)
5386 {
5387 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
5388 int32_t mask;
5389
5390 if (riscv_has_ext(env, RVH)) {
5391 /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
5392 mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT;
5393 } else {
5394 /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
5395 mask = rv32 ? MCONTEXT32 : MCONTEXT64;
5396 }
5397
5398 env->mcontext = val & mask;
5399 return RISCV_EXCP_NONE;
5400 }
5401
read_mnscratch(CPURISCVState * env,int csrno,target_ulong * val)5402 static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
5403 target_ulong *val)
5404 {
5405 *val = env->mnscratch;
5406 return RISCV_EXCP_NONE;
5407 }
5408
write_mnscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5409 static RISCVException write_mnscratch(CPURISCVState *env, int csrno,
5410 target_ulong val, uintptr_t ra)
5411 {
5412 env->mnscratch = val;
5413 return RISCV_EXCP_NONE;
5414 }
5415
read_mnepc(CPURISCVState * env,int csrno,target_ulong * val)5416 static RISCVException read_mnepc(CPURISCVState *env, int csrno,
5417 target_ulong *val)
5418 {
5419 *val = env->mnepc;
5420 return RISCV_EXCP_NONE;
5421 }
5422
write_mnepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5423 static RISCVException write_mnepc(CPURISCVState *env, int csrno,
5424 target_ulong val, uintptr_t ra)
5425 {
5426 env->mnepc = val;
5427 return RISCV_EXCP_NONE;
5428 }
5429
read_mncause(CPURISCVState * env,int csrno,target_ulong * val)5430 static RISCVException read_mncause(CPURISCVState *env, int csrno,
5431 target_ulong *val)
5432 {
5433 *val = env->mncause;
5434 return RISCV_EXCP_NONE;
5435 }
5436
write_mncause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5437 static RISCVException write_mncause(CPURISCVState *env, int csrno,
5438 target_ulong val, uintptr_t ra)
5439 {
5440 env->mncause = val;
5441 return RISCV_EXCP_NONE;
5442 }
5443
read_mnstatus(CPURISCVState * env,int csrno,target_ulong * val)5444 static RISCVException read_mnstatus(CPURISCVState *env, int csrno,
5445 target_ulong *val)
5446 {
5447 *val = env->mnstatus;
5448 return RISCV_EXCP_NONE;
5449 }
5450
write_mnstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5451 static RISCVException write_mnstatus(CPURISCVState *env, int csrno,
5452 target_ulong val, uintptr_t ra)
5453 {
5454 target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
5455
5456 if (riscv_has_ext(env, RVH)) {
5457 /* Flush tlb on mnstatus fields that affect VM. */
5458 if ((val ^ env->mnstatus) & MNSTATUS_MNPV) {
5459 tlb_flush(env_cpu(env));
5460 }
5461
5462 mask |= MNSTATUS_MNPV;
5463 }
5464
5465 /* mnstatus.mnie can only be cleared by hardware. */
5466 env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask);
5467 return RISCV_EXCP_NONE;
5468 }
5469
5470 #endif
5471
5472 /* Crypto Extension */
riscv_new_csr_seed(target_ulong new_value,target_ulong write_mask)5473 target_ulong riscv_new_csr_seed(target_ulong new_value,
5474 target_ulong write_mask)
5475 {
5476 uint16_t random_v;
5477 Error *random_e = NULL;
5478 int random_r;
5479 target_ulong rval;
5480
5481 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
5482 if (unlikely(random_r < 0)) {
5483 /*
5484 * Failed, for unknown reasons in the crypto subsystem.
5485 * The best we can do is log the reason and return a
5486 * failure indication to the guest. There is no reason
5487 * we know to expect the failure to be transitory, so
5488 * indicate DEAD to avoid having the guest spin on WAIT.
5489 */
5490 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5491 __func__, error_get_pretty(random_e));
5492 error_free(random_e);
5493 rval = SEED_OPST_DEAD;
5494 } else {
5495 rval = random_v | SEED_OPST_ES16;
5496 }
5497
5498 return rval;
5499 }
5500
rmw_seed(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)5501 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
5502 target_ulong *ret_value,
5503 target_ulong new_value,
5504 target_ulong write_mask)
5505 {
5506 target_ulong rval;
5507
5508 rval = riscv_new_csr_seed(new_value, write_mask);
5509
5510 if (ret_value) {
5511 *ret_value = rval;
5512 }
5513
5514 return RISCV_EXCP_NONE;
5515 }
5516
5517 /*
5518 * riscv_csrrw - read and/or update control and status register
5519 *
5520 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
5521 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
5522 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
5523 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
5524 */
5525
riscv_csrrw_check(CPURISCVState * env,int csrno,bool write)5526 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
5527 int csrno,
5528 bool write)
5529 {
5530 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
5531 bool read_only = get_field(csrno, 0xC00) == 3;
5532 int csr_min_priv = csr_ops[csrno].min_priv_ver;
5533
5534 /* ensure the CSR extension is enabled */
5535 if (!riscv_cpu_cfg(env)->ext_zicsr) {
5536 return RISCV_EXCP_ILLEGAL_INST;
5537 }
5538
5539 /* ensure CSR is implemented by checking predicate */
5540 if (!csr_ops[csrno].predicate) {
5541 return RISCV_EXCP_ILLEGAL_INST;
5542 }
5543
5544 /* privileged spec version check */
5545 if (env->priv_ver < csr_min_priv) {
5546 return RISCV_EXCP_ILLEGAL_INST;
5547 }
5548
5549 /* read / write check */
5550 if (write && read_only) {
5551 return RISCV_EXCP_ILLEGAL_INST;
5552 }
5553
5554 /*
5555 * The predicate() not only does existence check but also does some
5556 * access control check which triggers for example virtual instruction
5557 * exception in some cases. When writing read-only CSRs in those cases
5558 * illegal instruction exception should be triggered instead of virtual
5559 * instruction exception. Hence this comes after the read / write check.
5560 */
5561 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
5562 if (ret != RISCV_EXCP_NONE) {
5563 return ret;
5564 }
5565
5566 #if !defined(CONFIG_USER_ONLY)
5567 int csr_priv, effective_priv = env->priv;
5568
5569 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
5570 !env->virt_enabled) {
5571 /*
5572 * We are in HS mode. Add 1 to the effective privilege level to
5573 * allow us to access the Hypervisor CSRs.
5574 */
5575 effective_priv++;
5576 }
5577
5578 csr_priv = get_field(csrno, 0x300);
5579 if (!env->debugger && (effective_priv < csr_priv)) {
5580 if (csr_priv == (PRV_S + 1) && env->virt_enabled) {
5581 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
5582 }
5583 return RISCV_EXCP_ILLEGAL_INST;
5584 }
5585 #endif
5586 return RISCV_EXCP_NONE;
5587 }
5588
riscv_csrrw_do64(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask,uintptr_t ra)5589 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
5590 target_ulong *ret_value,
5591 target_ulong new_value,
5592 target_ulong write_mask,
5593 uintptr_t ra)
5594 {
5595 RISCVException ret;
5596 target_ulong old_value = 0;
5597
5598 /* execute combined read/write operation if it exists */
5599 if (csr_ops[csrno].op) {
5600 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
5601 }
5602
5603 /*
5604 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
5605 * and we can't throw side effects caused by CSR reads.
5606 */
5607 if (ret_value) {
5608 /* if no accessor exists then return failure */
5609 if (!csr_ops[csrno].read) {
5610 return RISCV_EXCP_ILLEGAL_INST;
5611 }
5612 /* read old value */
5613 ret = csr_ops[csrno].read(env, csrno, &old_value);
5614 if (ret != RISCV_EXCP_NONE) {
5615 return ret;
5616 }
5617 }
5618
5619 /* write value if writable and write mask set, otherwise drop writes */
5620 if (write_mask) {
5621 new_value = (old_value & ~write_mask) | (new_value & write_mask);
5622 if (csr_ops[csrno].write) {
5623 ret = csr_ops[csrno].write(env, csrno, new_value, ra);
5624 if (ret != RISCV_EXCP_NONE) {
5625 return ret;
5626 }
5627 }
5628 }
5629
5630 /* return old value */
5631 if (ret_value) {
5632 *ret_value = old_value;
5633 }
5634
5635 return RISCV_EXCP_NONE;
5636 }
5637
riscv_csrr(CPURISCVState * env,int csrno,target_ulong * ret_value)5638 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
5639 target_ulong *ret_value)
5640 {
5641 RISCVException ret = riscv_csrrw_check(env, csrno, false);
5642 if (ret != RISCV_EXCP_NONE) {
5643 return ret;
5644 }
5645
5646 return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0);
5647 }
5648
riscv_csrrw(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask,uintptr_t ra)5649 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
5650 target_ulong *ret_value, target_ulong new_value,
5651 target_ulong write_mask, uintptr_t ra)
5652 {
5653 RISCVException ret = riscv_csrrw_check(env, csrno, true);
5654 if (ret != RISCV_EXCP_NONE) {
5655 return ret;
5656 }
5657
5658 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra);
5659 }
5660
riscv_csrrw_do128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask,uintptr_t ra)5661 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
5662 Int128 *ret_value,
5663 Int128 new_value,
5664 Int128 write_mask, uintptr_t ra)
5665 {
5666 RISCVException ret;
5667 Int128 old_value;
5668
5669 /* read old value */
5670 ret = csr_ops[csrno].read128(env, csrno, &old_value);
5671 if (ret != RISCV_EXCP_NONE) {
5672 return ret;
5673 }
5674
5675 /* write value if writable and write mask set, otherwise drop writes */
5676 if (int128_nz(write_mask)) {
5677 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
5678 int128_and(new_value, write_mask));
5679 if (csr_ops[csrno].write128) {
5680 ret = csr_ops[csrno].write128(env, csrno, new_value);
5681 if (ret != RISCV_EXCP_NONE) {
5682 return ret;
5683 }
5684 } else if (csr_ops[csrno].write) {
5685 /* avoids having to write wrappers for all registers */
5686 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra);
5687 if (ret != RISCV_EXCP_NONE) {
5688 return ret;
5689 }
5690 }
5691 }
5692
5693 /* return old value */
5694 if (ret_value) {
5695 *ret_value = old_value;
5696 }
5697
5698 return RISCV_EXCP_NONE;
5699 }
5700
riscv_csrr_i128(CPURISCVState * env,int csrno,Int128 * ret_value)5701 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
5702 Int128 *ret_value)
5703 {
5704 RISCVException ret;
5705
5706 ret = riscv_csrrw_check(env, csrno, false);
5707 if (ret != RISCV_EXCP_NONE) {
5708 return ret;
5709 }
5710
5711 if (csr_ops[csrno].read128) {
5712 return riscv_csrrw_do128(env, csrno, ret_value,
5713 int128_zero(), int128_zero(), 0);
5714 }
5715
5716 /*
5717 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
5718 * at all defined.
5719 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
5720 * significant), for those, this fallback is correctly handling the
5721 * accesses
5722 */
5723 target_ulong old_value;
5724 ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0);
5725 if (ret == RISCV_EXCP_NONE && ret_value) {
5726 *ret_value = int128_make64(old_value);
5727 }
5728 return ret;
5729 }
5730
riscv_csrrw_i128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask,uintptr_t ra)5731 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
5732 Int128 *ret_value, Int128 new_value,
5733 Int128 write_mask, uintptr_t ra)
5734 {
5735 RISCVException ret;
5736
5737 ret = riscv_csrrw_check(env, csrno, true);
5738 if (ret != RISCV_EXCP_NONE) {
5739 return ret;
5740 }
5741
5742 if (csr_ops[csrno].read128) {
5743 return riscv_csrrw_do128(env, csrno, ret_value,
5744 new_value, write_mask, ra);
5745 }
5746
5747 /*
5748 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
5749 * at all defined.
5750 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
5751 * significant), for those, this fallback is correctly handling the
5752 * accesses
5753 */
5754 target_ulong old_value;
5755 ret = riscv_csrrw_do64(env, csrno, &old_value,
5756 int128_getlo(new_value),
5757 int128_getlo(write_mask), ra);
5758 if (ret == RISCV_EXCP_NONE && ret_value) {
5759 *ret_value = int128_make64(old_value);
5760 }
5761 return ret;
5762 }
5763
5764 /*
5765 * Debugger support. If not in user mode, set env->debugger before the
5766 * riscv_csrrw call and clear it after the call.
5767 */
riscv_csrrw_debug(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)5768 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
5769 target_ulong *ret_value,
5770 target_ulong new_value,
5771 target_ulong write_mask)
5772 {
5773 RISCVException ret;
5774 #if !defined(CONFIG_USER_ONLY)
5775 env->debugger = true;
5776 #endif
5777 if (!write_mask) {
5778 ret = riscv_csrr(env, csrno, ret_value);
5779 } else {
5780 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0);
5781 }
5782 #if !defined(CONFIG_USER_ONLY)
5783 env->debugger = false;
5784 #endif
5785 return ret;
5786 }
5787
read_jvt(CPURISCVState * env,int csrno,target_ulong * val)5788 static RISCVException read_jvt(CPURISCVState *env, int csrno,
5789 target_ulong *val)
5790 {
5791 *val = env->jvt;
5792 return RISCV_EXCP_NONE;
5793 }
5794
write_jvt(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5795 static RISCVException write_jvt(CPURISCVState *env, int csrno,
5796 target_ulong val, uintptr_t ra)
5797 {
5798 env->jvt = val;
5799 return RISCV_EXCP_NONE;
5800 }
5801
5802 /*
5803 * Control and Status Register function table
5804 * riscv_csr_operations::predicate() must be provided for an implemented CSR
5805 */
5806 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
5807 /* User Floating-Point CSRs */
5808 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
5809 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
5810 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
5811 /* Vector CSRs */
5812 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
5813 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
5814 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
5815 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
5816 [CSR_VL] = { "vl", vs, read_vl },
5817 [CSR_VTYPE] = { "vtype", vs, read_vtype },
5818 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
5819 /* User Timers and Counters */
5820 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
5821 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
5822 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
5823 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
5824
5825 /*
5826 * In privileged mode, the monitor will have to emulate TIME CSRs only if
5827 * rdtime callback is not provided by machine/platform emulation.
5828 */
5829 [CSR_TIME] = { "time", ctr, read_time },
5830 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
5831
5832 /* Crypto Extension */
5833 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
5834
5835 /* Zcmt Extension */
5836 [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
5837
5838 /* zicfiss Extension, shadow stack register */
5839 [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
5840
5841 #if !defined(CONFIG_USER_ONLY)
5842 /* Machine Timers and Counters */
5843 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
5844 write_mhpmcounter },
5845 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
5846 write_mhpmcounter },
5847 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
5848 write_mhpmcounterh },
5849 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
5850 write_mhpmcounterh },
5851
5852 /* Machine Information Registers */
5853 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
5854 [CSR_MARCHID] = { "marchid", any, read_marchid },
5855 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
5856 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
5857
5858 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
5859 .min_priv_ver = PRIV_VERSION_1_12_0 },
5860 /* Machine Trap Setup */
5861 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
5862 NULL, read_mstatus_i128 },
5863 [CSR_MISA] = { "misa", any, read_misa, write_misa,
5864 NULL, read_misa_i128 },
5865 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
5866 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
5867 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
5868 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
5869 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
5870 write_mcounteren },
5871
5872 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
5873 write_mstatush },
5874 [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore,
5875 .min_priv_ver = PRIV_VERSION_1_13_0 },
5876 [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh,
5877 .min_priv_ver = PRIV_VERSION_1_13_0 },
5878
5879 /* Machine Trap Handling */
5880 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
5881 NULL, read_mscratch_i128, write_mscratch_i128 },
5882 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
5883 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
5884 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
5885 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
5886
5887 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
5888 [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL,
5889 rmw_xiselect },
5890 [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
5891 rmw_xireg },
5892
5893 /* Machine Indirect Register Alias */
5894 [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi,
5895 .min_priv_ver = PRIV_VERSION_1_12_0 },
5896 [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi,
5897 .min_priv_ver = PRIV_VERSION_1_12_0 },
5898 [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi,
5899 .min_priv_ver = PRIV_VERSION_1_12_0 },
5900 [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi,
5901 .min_priv_ver = PRIV_VERSION_1_12_0 },
5902 [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi,
5903 .min_priv_ver = PRIV_VERSION_1_12_0 },
5904
5905 /* Machine-Level Interrupts (AIA) */
5906 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
5907 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
5908
5909 /* Virtual Interrupts for Supervisor Level (AIA) */
5910 [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
5911 [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
5912
5913 /* Machine-Level High-Half CSRs (AIA) */
5914 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
5915 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
5916 [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
5917 [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
5918 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
5919
5920 /* Execution environment configuration */
5921 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
5922 .min_priv_ver = PRIV_VERSION_1_12_0 },
5923 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
5924 .min_priv_ver = PRIV_VERSION_1_12_0 },
5925 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
5926 .min_priv_ver = PRIV_VERSION_1_12_0 },
5927 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
5928 .min_priv_ver = PRIV_VERSION_1_12_0 },
5929 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
5930 .min_priv_ver = PRIV_VERSION_1_12_0 },
5931
5932 /* Smstateen extension CSRs */
5933 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
5934 .min_priv_ver = PRIV_VERSION_1_12_0 },
5935 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
5936 write_mstateen0h,
5937 .min_priv_ver = PRIV_VERSION_1_12_0 },
5938 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
5939 write_mstateen_1_3,
5940 .min_priv_ver = PRIV_VERSION_1_12_0 },
5941 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
5942 write_mstateenh_1_3,
5943 .min_priv_ver = PRIV_VERSION_1_12_0 },
5944 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
5945 write_mstateen_1_3,
5946 .min_priv_ver = PRIV_VERSION_1_12_0 },
5947 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
5948 write_mstateenh_1_3,
5949 .min_priv_ver = PRIV_VERSION_1_12_0 },
5950 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
5951 write_mstateen_1_3,
5952 .min_priv_ver = PRIV_VERSION_1_12_0 },
5953 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
5954 write_mstateenh_1_3,
5955 .min_priv_ver = PRIV_VERSION_1_12_0 },
5956 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
5957 .min_priv_ver = PRIV_VERSION_1_12_0 },
5958 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
5959 write_hstateen0h,
5960 .min_priv_ver = PRIV_VERSION_1_12_0 },
5961 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
5962 write_hstateen_1_3,
5963 .min_priv_ver = PRIV_VERSION_1_12_0 },
5964 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
5965 write_hstateenh_1_3,
5966 .min_priv_ver = PRIV_VERSION_1_12_0 },
5967 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
5968 write_hstateen_1_3,
5969 .min_priv_ver = PRIV_VERSION_1_12_0 },
5970 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
5971 write_hstateenh_1_3,
5972 .min_priv_ver = PRIV_VERSION_1_12_0 },
5973 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
5974 write_hstateen_1_3,
5975 .min_priv_ver = PRIV_VERSION_1_12_0 },
5976 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
5977 write_hstateenh_1_3,
5978 .min_priv_ver = PRIV_VERSION_1_12_0 },
5979 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
5980 .min_priv_ver = PRIV_VERSION_1_12_0 },
5981 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
5982 write_sstateen_1_3,
5983 .min_priv_ver = PRIV_VERSION_1_12_0 },
5984 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
5985 write_sstateen_1_3,
5986 .min_priv_ver = PRIV_VERSION_1_12_0 },
5987 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
5988 write_sstateen_1_3,
5989 .min_priv_ver = PRIV_VERSION_1_12_0 },
5990
5991 /* RNMI */
5992 [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch,
5993 .min_priv_ver = PRIV_VERSION_1_12_0 },
5994 [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc,
5995 .min_priv_ver = PRIV_VERSION_1_12_0 },
5996 [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause,
5997 .min_priv_ver = PRIV_VERSION_1_12_0 },
5998 [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
5999 .min_priv_ver = PRIV_VERSION_1_12_0 },
6000
6001 /* Supervisor Counter Delegation */
6002 [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
6003 read_scountinhibit, write_scountinhibit,
6004 .min_priv_ver = PRIV_VERSION_1_12_0 },
6005
6006 /* Supervisor Trap Setup */
6007 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
6008 NULL, read_sstatus_i128 },
6009 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
6010 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
6011 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
6012 write_scounteren },
6013
6014 /* Supervisor Trap Handling */
6015 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
6016 NULL, read_sscratch_i128, write_sscratch_i128 },
6017 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
6018 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
6019 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
6020 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
6021 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
6022 .min_priv_ver = PRIV_VERSION_1_12_0 },
6023 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
6024 .min_priv_ver = PRIV_VERSION_1_12_0 },
6025 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
6026 write_vstimecmp,
6027 .min_priv_ver = PRIV_VERSION_1_12_0 },
6028 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
6029 write_vstimecmph,
6030 .min_priv_ver = PRIV_VERSION_1_12_0 },
6031
6032 /* Supervisor Protection and Translation */
6033 [CSR_SATP] = { "satp", satp, read_satp, write_satp },
6034
6035 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
6036 [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL,
6037 rmw_xiselect },
6038 [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
6039 rmw_xireg },
6040
6041 /* Supervisor Indirect Register Alias */
6042 [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi,
6043 .min_priv_ver = PRIV_VERSION_1_12_0 },
6044 [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi,
6045 .min_priv_ver = PRIV_VERSION_1_12_0 },
6046 [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi,
6047 .min_priv_ver = PRIV_VERSION_1_12_0 },
6048 [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi,
6049 .min_priv_ver = PRIV_VERSION_1_12_0 },
6050 [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi,
6051 .min_priv_ver = PRIV_VERSION_1_12_0 },
6052
6053 /* Supervisor-Level Interrupts (AIA) */
6054 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
6055 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
6056
6057 /* Supervisor-Level High-Half CSRs (AIA) */
6058 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
6059 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
6060
6061 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
6062 .min_priv_ver = PRIV_VERSION_1_12_0 },
6063 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
6064 .min_priv_ver = PRIV_VERSION_1_12_0 },
6065 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
6066 .min_priv_ver = PRIV_VERSION_1_12_0 },
6067 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
6068 .min_priv_ver = PRIV_VERSION_1_12_0 },
6069 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
6070 .min_priv_ver = PRIV_VERSION_1_12_0 },
6071 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
6072 .min_priv_ver = PRIV_VERSION_1_12_0 },
6073 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
6074 write_hcounteren,
6075 .min_priv_ver = PRIV_VERSION_1_12_0 },
6076 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
6077 .min_priv_ver = PRIV_VERSION_1_12_0 },
6078 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
6079 .min_priv_ver = PRIV_VERSION_1_12_0 },
6080 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
6081 .min_priv_ver = PRIV_VERSION_1_12_0 },
6082 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
6083 .min_priv_ver = PRIV_VERSION_1_12_0 },
6084 [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp,
6085 .min_priv_ver = PRIV_VERSION_1_12_0 },
6086 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
6087 write_htimedelta,
6088 .min_priv_ver = PRIV_VERSION_1_12_0 },
6089 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
6090 write_htimedeltah,
6091 .min_priv_ver = PRIV_VERSION_1_12_0 },
6092
6093 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
6094 write_vsstatus,
6095 .min_priv_ver = PRIV_VERSION_1_12_0 },
6096 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
6097 .min_priv_ver = PRIV_VERSION_1_12_0 },
6098 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
6099 .min_priv_ver = PRIV_VERSION_1_12_0 },
6100 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
6101 .min_priv_ver = PRIV_VERSION_1_12_0 },
6102 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
6103 write_vsscratch,
6104 .min_priv_ver = PRIV_VERSION_1_12_0 },
6105 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
6106 .min_priv_ver = PRIV_VERSION_1_12_0 },
6107 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
6108 .min_priv_ver = PRIV_VERSION_1_12_0 },
6109 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
6110 .min_priv_ver = PRIV_VERSION_1_12_0 },
6111 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
6112 .min_priv_ver = PRIV_VERSION_1_12_0 },
6113
6114 [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2,
6115 .min_priv_ver = PRIV_VERSION_1_12_0 },
6116 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
6117 .min_priv_ver = PRIV_VERSION_1_12_0 },
6118
6119 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
6120 [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
6121 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
6122 write_hvictl },
6123 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
6124 write_hviprio1 },
6125 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
6126 write_hviprio2 },
6127 /*
6128 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
6129 */
6130 [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL,
6131 rmw_xiselect },
6132 [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
6133 rmw_xireg },
6134
6135 /* Virtual Supervisor Indirect Alias */
6136 [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi,
6137 .min_priv_ver = PRIV_VERSION_1_12_0 },
6138 [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi,
6139 .min_priv_ver = PRIV_VERSION_1_12_0 },
6140 [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi,
6141 .min_priv_ver = PRIV_VERSION_1_12_0 },
6142 [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi,
6143 .min_priv_ver = PRIV_VERSION_1_12_0 },
6144 [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi,
6145 .min_priv_ver = PRIV_VERSION_1_12_0 },
6146
6147 /* VS-Level Interrupts (H-extension with AIA) */
6148 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
6149 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
6150
6151 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
6152 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
6153 rmw_hidelegh },
6154 [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
6155 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
6156 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
6157 write_hviprio1h },
6158 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
6159 write_hviprio2h },
6160 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
6161 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
6162
6163 /* Physical Memory Protection */
6164 [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
6165 .min_priv_ver = PRIV_VERSION_1_11_0 },
6166 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
6167 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
6168 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
6169 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
6170 [CSR_PMPCFG4] = { "pmpcfg4", pmp, read_pmpcfg, write_pmpcfg,
6171 .min_priv_ver = PRIV_VERSION_1_12_0 },
6172 [CSR_PMPCFG5] = { "pmpcfg5", pmp, read_pmpcfg, write_pmpcfg,
6173 .min_priv_ver = PRIV_VERSION_1_12_0 },
6174 [CSR_PMPCFG6] = { "pmpcfg6", pmp, read_pmpcfg, write_pmpcfg,
6175 .min_priv_ver = PRIV_VERSION_1_12_0 },
6176 [CSR_PMPCFG7] = { "pmpcfg7", pmp, read_pmpcfg, write_pmpcfg,
6177 .min_priv_ver = PRIV_VERSION_1_12_0 },
6178 [CSR_PMPCFG8] = { "pmpcfg8", pmp, read_pmpcfg, write_pmpcfg,
6179 .min_priv_ver = PRIV_VERSION_1_12_0 },
6180 [CSR_PMPCFG9] = { "pmpcfg9", pmp, read_pmpcfg, write_pmpcfg,
6181 .min_priv_ver = PRIV_VERSION_1_12_0 },
6182 [CSR_PMPCFG10] = { "pmpcfg10", pmp, read_pmpcfg, write_pmpcfg,
6183 .min_priv_ver = PRIV_VERSION_1_12_0 },
6184 [CSR_PMPCFG11] = { "pmpcfg11", pmp, read_pmpcfg, write_pmpcfg,
6185 .min_priv_ver = PRIV_VERSION_1_12_0 },
6186 [CSR_PMPCFG12] = { "pmpcfg12", pmp, read_pmpcfg, write_pmpcfg,
6187 .min_priv_ver = PRIV_VERSION_1_12_0 },
6188 [CSR_PMPCFG13] = { "pmpcfg13", pmp, read_pmpcfg, write_pmpcfg,
6189 .min_priv_ver = PRIV_VERSION_1_12_0 },
6190 [CSR_PMPCFG14] = { "pmpcfg14", pmp, read_pmpcfg, write_pmpcfg,
6191 .min_priv_ver = PRIV_VERSION_1_12_0 },
6192 [CSR_PMPCFG15] = { "pmpcfg15", pmp, read_pmpcfg, write_pmpcfg,
6193 .min_priv_ver = PRIV_VERSION_1_12_0 },
6194 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
6195 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
6196 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
6197 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
6198 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
6199 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
6200 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
6201 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
6202 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
6203 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
6204 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
6205 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
6206 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
6207 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
6208 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
6209 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
6210 [CSR_PMPADDR16] = { "pmpaddr16", pmp, read_pmpaddr, write_pmpaddr,
6211 .min_priv_ver = PRIV_VERSION_1_12_0 },
6212 [CSR_PMPADDR17] = { "pmpaddr17", pmp, read_pmpaddr, write_pmpaddr,
6213 .min_priv_ver = PRIV_VERSION_1_12_0 },
6214 [CSR_PMPADDR18] = { "pmpaddr18", pmp, read_pmpaddr, write_pmpaddr,
6215 .min_priv_ver = PRIV_VERSION_1_12_0 },
6216 [CSR_PMPADDR19] = { "pmpaddr19", pmp, read_pmpaddr, write_pmpaddr,
6217 .min_priv_ver = PRIV_VERSION_1_12_0 },
6218 [CSR_PMPADDR20] = { "pmpaddr20", pmp, read_pmpaddr, write_pmpaddr,
6219 .min_priv_ver = PRIV_VERSION_1_12_0 },
6220 [CSR_PMPADDR21] = { "pmpaddr21", pmp, read_pmpaddr, write_pmpaddr,
6221 .min_priv_ver = PRIV_VERSION_1_12_0 },
6222 [CSR_PMPADDR22] = { "pmpaddr22", pmp, read_pmpaddr, write_pmpaddr,
6223 .min_priv_ver = PRIV_VERSION_1_12_0 },
6224 [CSR_PMPADDR23] = { "pmpaddr23", pmp, read_pmpaddr, write_pmpaddr,
6225 .min_priv_ver = PRIV_VERSION_1_12_0 },
6226 [CSR_PMPADDR24] = { "pmpaddr24", pmp, read_pmpaddr, write_pmpaddr,
6227 .min_priv_ver = PRIV_VERSION_1_12_0 },
6228 [CSR_PMPADDR25] = { "pmpaddr25", pmp, read_pmpaddr, write_pmpaddr,
6229 .min_priv_ver = PRIV_VERSION_1_12_0 },
6230 [CSR_PMPADDR26] = { "pmpaddr26", pmp, read_pmpaddr, write_pmpaddr,
6231 .min_priv_ver = PRIV_VERSION_1_12_0 },
6232 [CSR_PMPADDR27] = { "pmpaddr27", pmp, read_pmpaddr, write_pmpaddr,
6233 .min_priv_ver = PRIV_VERSION_1_12_0 },
6234 [CSR_PMPADDR28] = { "pmpaddr28", pmp, read_pmpaddr, write_pmpaddr,
6235 .min_priv_ver = PRIV_VERSION_1_12_0 },
6236 [CSR_PMPADDR29] = { "pmpaddr29", pmp, read_pmpaddr, write_pmpaddr,
6237 .min_priv_ver = PRIV_VERSION_1_12_0 },
6238 [CSR_PMPADDR30] = { "pmpaddr30", pmp, read_pmpaddr, write_pmpaddr,
6239 .min_priv_ver = PRIV_VERSION_1_12_0 },
6240 [CSR_PMPADDR31] = { "pmpaddr31", pmp, read_pmpaddr, write_pmpaddr,
6241 .min_priv_ver = PRIV_VERSION_1_12_0 },
6242 [CSR_PMPADDR32] = { "pmpaddr32", pmp, read_pmpaddr, write_pmpaddr,
6243 .min_priv_ver = PRIV_VERSION_1_12_0 },
6244 [CSR_PMPADDR33] = { "pmpaddr33", pmp, read_pmpaddr, write_pmpaddr,
6245 .min_priv_ver = PRIV_VERSION_1_12_0 },
6246 [CSR_PMPADDR34] = { "pmpaddr34", pmp, read_pmpaddr, write_pmpaddr,
6247 .min_priv_ver = PRIV_VERSION_1_12_0 },
6248 [CSR_PMPADDR35] = { "pmpaddr35", pmp, read_pmpaddr, write_pmpaddr,
6249 .min_priv_ver = PRIV_VERSION_1_12_0 },
6250 [CSR_PMPADDR36] = { "pmpaddr36", pmp, read_pmpaddr, write_pmpaddr,
6251 .min_priv_ver = PRIV_VERSION_1_12_0 },
6252 [CSR_PMPADDR37] = { "pmpaddr37", pmp, read_pmpaddr, write_pmpaddr,
6253 .min_priv_ver = PRIV_VERSION_1_12_0 },
6254 [CSR_PMPADDR38] = { "pmpaddr38", pmp, read_pmpaddr, write_pmpaddr,
6255 .min_priv_ver = PRIV_VERSION_1_12_0 },
6256 [CSR_PMPADDR39] = { "pmpaddr39", pmp, read_pmpaddr, write_pmpaddr,
6257 .min_priv_ver = PRIV_VERSION_1_12_0 },
6258 [CSR_PMPADDR40] = { "pmpaddr40", pmp, read_pmpaddr, write_pmpaddr,
6259 .min_priv_ver = PRIV_VERSION_1_12_0 },
6260 [CSR_PMPADDR41] = { "pmpaddr41", pmp, read_pmpaddr, write_pmpaddr,
6261 .min_priv_ver = PRIV_VERSION_1_12_0 },
6262 [CSR_PMPADDR42] = { "pmpaddr42", pmp, read_pmpaddr, write_pmpaddr,
6263 .min_priv_ver = PRIV_VERSION_1_12_0 },
6264 [CSR_PMPADDR43] = { "pmpaddr43", pmp, read_pmpaddr, write_pmpaddr,
6265 .min_priv_ver = PRIV_VERSION_1_12_0 },
6266 [CSR_PMPADDR44] = { "pmpaddr44", pmp, read_pmpaddr, write_pmpaddr,
6267 .min_priv_ver = PRIV_VERSION_1_12_0 },
6268 [CSR_PMPADDR45] = { "pmpaddr45", pmp, read_pmpaddr, write_pmpaddr,
6269 .min_priv_ver = PRIV_VERSION_1_12_0 },
6270 [CSR_PMPADDR46] = { "pmpaddr46", pmp, read_pmpaddr, write_pmpaddr,
6271 .min_priv_ver = PRIV_VERSION_1_12_0 },
6272 [CSR_PMPADDR47] = { "pmpaddr47", pmp, read_pmpaddr, write_pmpaddr,
6273 .min_priv_ver = PRIV_VERSION_1_12_0 },
6274 [CSR_PMPADDR48] = { "pmpaddr48", pmp, read_pmpaddr, write_pmpaddr,
6275 .min_priv_ver = PRIV_VERSION_1_12_0 },
6276 [CSR_PMPADDR49] = { "pmpaddr49", pmp, read_pmpaddr, write_pmpaddr,
6277 .min_priv_ver = PRIV_VERSION_1_12_0 },
6278 [CSR_PMPADDR50] = { "pmpaddr50", pmp, read_pmpaddr, write_pmpaddr,
6279 .min_priv_ver = PRIV_VERSION_1_12_0 },
6280 [CSR_PMPADDR51] = { "pmpaddr51", pmp, read_pmpaddr, write_pmpaddr,
6281 .min_priv_ver = PRIV_VERSION_1_12_0 },
6282 [CSR_PMPADDR52] = { "pmpaddr52", pmp, read_pmpaddr, write_pmpaddr,
6283 .min_priv_ver = PRIV_VERSION_1_12_0 },
6284 [CSR_PMPADDR53] = { "pmpaddr53", pmp, read_pmpaddr, write_pmpaddr,
6285 .min_priv_ver = PRIV_VERSION_1_12_0 },
6286 [CSR_PMPADDR54] = { "pmpaddr54", pmp, read_pmpaddr, write_pmpaddr,
6287 .min_priv_ver = PRIV_VERSION_1_12_0 },
6288 [CSR_PMPADDR55] = { "pmpaddr55", pmp, read_pmpaddr, write_pmpaddr,
6289 .min_priv_ver = PRIV_VERSION_1_12_0 },
6290 [CSR_PMPADDR56] = { "pmpaddr56", pmp, read_pmpaddr, write_pmpaddr,
6291 .min_priv_ver = PRIV_VERSION_1_12_0 },
6292 [CSR_PMPADDR57] = { "pmpaddr57", pmp, read_pmpaddr, write_pmpaddr,
6293 .min_priv_ver = PRIV_VERSION_1_12_0 },
6294 [CSR_PMPADDR58] = { "pmpaddr58", pmp, read_pmpaddr, write_pmpaddr,
6295 .min_priv_ver = PRIV_VERSION_1_12_0 },
6296 [CSR_PMPADDR59] = { "pmpaddr59", pmp, read_pmpaddr, write_pmpaddr,
6297 .min_priv_ver = PRIV_VERSION_1_12_0 },
6298 [CSR_PMPADDR60] = { "pmpaddr60", pmp, read_pmpaddr, write_pmpaddr,
6299 .min_priv_ver = PRIV_VERSION_1_12_0 },
6300 [CSR_PMPADDR61] = { "pmpaddr61", pmp, read_pmpaddr, write_pmpaddr,
6301 .min_priv_ver = PRIV_VERSION_1_12_0 },
6302 [CSR_PMPADDR62] = { "pmpaddr62", pmp, read_pmpaddr, write_pmpaddr,
6303 .min_priv_ver = PRIV_VERSION_1_12_0 },
6304 [CSR_PMPADDR63] = { "pmpaddr63", pmp, read_pmpaddr, write_pmpaddr,
6305 .min_priv_ver = PRIV_VERSION_1_12_0 },
6306
6307 /* Debug CSRs */
6308 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
6309 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
6310 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
6311 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
6312 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
6313 [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
6314
6315 [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
6316 [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
6317 [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
6318 [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
6319 [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
6320
6321 /* Performance Counters */
6322 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
6323 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
6324 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
6325 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
6326 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
6327 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
6328 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
6329 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
6330 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
6331 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
6332 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
6333 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
6334 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
6335 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
6336 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
6337 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
6338 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
6339 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
6340 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
6341 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
6342 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
6343 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
6344 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
6345 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
6346 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
6347 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
6348 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
6349 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
6350 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
6351
6352 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
6353 write_mhpmcounter },
6354 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
6355 write_mhpmcounter },
6356 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
6357 write_mhpmcounter },
6358 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
6359 write_mhpmcounter },
6360 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
6361 write_mhpmcounter },
6362 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
6363 write_mhpmcounter },
6364 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
6365 write_mhpmcounter },
6366 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
6367 write_mhpmcounter },
6368 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
6369 write_mhpmcounter },
6370 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
6371 write_mhpmcounter },
6372 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
6373 write_mhpmcounter },
6374 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
6375 write_mhpmcounter },
6376 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
6377 write_mhpmcounter },
6378 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
6379 write_mhpmcounter },
6380 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
6381 write_mhpmcounter },
6382 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
6383 write_mhpmcounter },
6384 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
6385 write_mhpmcounter },
6386 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
6387 write_mhpmcounter },
6388 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
6389 write_mhpmcounter },
6390 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
6391 write_mhpmcounter },
6392 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
6393 write_mhpmcounter },
6394 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
6395 write_mhpmcounter },
6396 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
6397 write_mhpmcounter },
6398 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
6399 write_mhpmcounter },
6400 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
6401 write_mhpmcounter },
6402 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
6403 write_mhpmcounter },
6404 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
6405 write_mhpmcounter },
6406 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
6407 write_mhpmcounter },
6408 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
6409 write_mhpmcounter },
6410
6411 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
6412 write_mcountinhibit,
6413 .min_priv_ver = PRIV_VERSION_1_11_0 },
6414
6415 [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
6416 write_mcyclecfg,
6417 .min_priv_ver = PRIV_VERSION_1_12_0 },
6418 [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
6419 write_minstretcfg,
6420 .min_priv_ver = PRIV_VERSION_1_12_0 },
6421
6422 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
6423 write_mhpmevent },
6424 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
6425 write_mhpmevent },
6426 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
6427 write_mhpmevent },
6428 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
6429 write_mhpmevent },
6430 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
6431 write_mhpmevent },
6432 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
6433 write_mhpmevent },
6434 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
6435 write_mhpmevent },
6436 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
6437 write_mhpmevent },
6438 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
6439 write_mhpmevent },
6440 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
6441 write_mhpmevent },
6442 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
6443 write_mhpmevent },
6444 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
6445 write_mhpmevent },
6446 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
6447 write_mhpmevent },
6448 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
6449 write_mhpmevent },
6450 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
6451 write_mhpmevent },
6452 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
6453 write_mhpmevent },
6454 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
6455 write_mhpmevent },
6456 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
6457 write_mhpmevent },
6458 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
6459 write_mhpmevent },
6460 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
6461 write_mhpmevent },
6462 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
6463 write_mhpmevent },
6464 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
6465 write_mhpmevent },
6466 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
6467 write_mhpmevent },
6468 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
6469 write_mhpmevent },
6470 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
6471 write_mhpmevent },
6472 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
6473 write_mhpmevent },
6474 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
6475 write_mhpmevent },
6476 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
6477 write_mhpmevent },
6478 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
6479 write_mhpmevent },
6480
6481 [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
6482 write_mcyclecfgh,
6483 .min_priv_ver = PRIV_VERSION_1_12_0 },
6484 [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
6485 write_minstretcfgh,
6486 .min_priv_ver = PRIV_VERSION_1_12_0 },
6487
6488 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
6489 write_mhpmeventh,
6490 .min_priv_ver = PRIV_VERSION_1_12_0 },
6491 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh,
6492 write_mhpmeventh,
6493 .min_priv_ver = PRIV_VERSION_1_12_0 },
6494 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh,
6495 write_mhpmeventh,
6496 .min_priv_ver = PRIV_VERSION_1_12_0 },
6497 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh,
6498 write_mhpmeventh,
6499 .min_priv_ver = PRIV_VERSION_1_12_0 },
6500 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh,
6501 write_mhpmeventh,
6502 .min_priv_ver = PRIV_VERSION_1_12_0 },
6503 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh,
6504 write_mhpmeventh,
6505 .min_priv_ver = PRIV_VERSION_1_12_0 },
6506 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh,
6507 write_mhpmeventh,
6508 .min_priv_ver = PRIV_VERSION_1_12_0 },
6509 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh,
6510 write_mhpmeventh,
6511 .min_priv_ver = PRIV_VERSION_1_12_0 },
6512 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh,
6513 write_mhpmeventh,
6514 .min_priv_ver = PRIV_VERSION_1_12_0 },
6515 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh,
6516 write_mhpmeventh,
6517 .min_priv_ver = PRIV_VERSION_1_12_0 },
6518 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh,
6519 write_mhpmeventh,
6520 .min_priv_ver = PRIV_VERSION_1_12_0 },
6521 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh,
6522 write_mhpmeventh,
6523 .min_priv_ver = PRIV_VERSION_1_12_0 },
6524 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh,
6525 write_mhpmeventh,
6526 .min_priv_ver = PRIV_VERSION_1_12_0 },
6527 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh,
6528 write_mhpmeventh,
6529 .min_priv_ver = PRIV_VERSION_1_12_0 },
6530 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh,
6531 write_mhpmeventh,
6532 .min_priv_ver = PRIV_VERSION_1_12_0 },
6533 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh,
6534 write_mhpmeventh,
6535 .min_priv_ver = PRIV_VERSION_1_12_0 },
6536 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh,
6537 write_mhpmeventh,
6538 .min_priv_ver = PRIV_VERSION_1_12_0 },
6539 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh,
6540 write_mhpmeventh,
6541 .min_priv_ver = PRIV_VERSION_1_12_0 },
6542 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh,
6543 write_mhpmeventh,
6544 .min_priv_ver = PRIV_VERSION_1_12_0 },
6545 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh,
6546 write_mhpmeventh,
6547 .min_priv_ver = PRIV_VERSION_1_12_0 },
6548 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh,
6549 write_mhpmeventh,
6550 .min_priv_ver = PRIV_VERSION_1_12_0 },
6551 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh,
6552 write_mhpmeventh,
6553 .min_priv_ver = PRIV_VERSION_1_12_0 },
6554 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh,
6555 write_mhpmeventh,
6556 .min_priv_ver = PRIV_VERSION_1_12_0 },
6557 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh,
6558 write_mhpmeventh,
6559 .min_priv_ver = PRIV_VERSION_1_12_0 },
6560 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh,
6561 write_mhpmeventh,
6562 .min_priv_ver = PRIV_VERSION_1_12_0 },
6563 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh,
6564 write_mhpmeventh,
6565 .min_priv_ver = PRIV_VERSION_1_12_0 },
6566 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh,
6567 write_mhpmeventh,
6568 .min_priv_ver = PRIV_VERSION_1_12_0 },
6569 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh,
6570 write_mhpmeventh,
6571 .min_priv_ver = PRIV_VERSION_1_12_0 },
6572 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh,
6573 write_mhpmeventh,
6574 .min_priv_ver = PRIV_VERSION_1_12_0 },
6575
6576 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
6577 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
6578 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
6579 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
6580 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
6581 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
6582 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
6583 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
6584 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
6585 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
6586 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
6587 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
6588 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
6589 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
6590 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
6591 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
6592 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
6593 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
6594 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
6595 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
6596 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
6597 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
6598 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
6599 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
6600 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
6601 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
6602 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
6603 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
6604 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
6605
6606 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
6607 write_mhpmcounterh },
6608 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
6609 write_mhpmcounterh },
6610 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
6611 write_mhpmcounterh },
6612 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
6613 write_mhpmcounterh },
6614 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
6615 write_mhpmcounterh },
6616 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
6617 write_mhpmcounterh },
6618 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
6619 write_mhpmcounterh },
6620 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
6621 write_mhpmcounterh },
6622 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
6623 write_mhpmcounterh },
6624 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
6625 write_mhpmcounterh },
6626 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
6627 write_mhpmcounterh },
6628 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
6629 write_mhpmcounterh },
6630 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
6631 write_mhpmcounterh },
6632 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
6633 write_mhpmcounterh },
6634 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
6635 write_mhpmcounterh },
6636 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
6637 write_mhpmcounterh },
6638 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
6639 write_mhpmcounterh },
6640 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
6641 write_mhpmcounterh },
6642 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
6643 write_mhpmcounterh },
6644 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
6645 write_mhpmcounterh },
6646 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
6647 write_mhpmcounterh },
6648 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
6649 write_mhpmcounterh },
6650 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
6651 write_mhpmcounterh },
6652 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
6653 write_mhpmcounterh },
6654 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
6655 write_mhpmcounterh },
6656 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
6657 write_mhpmcounterh },
6658 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
6659 write_mhpmcounterh },
6660 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
6661 write_mhpmcounterh },
6662 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
6663 write_mhpmcounterh },
6664 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
6665 .min_priv_ver = PRIV_VERSION_1_12_0 },
6666
6667 #endif /* !CONFIG_USER_ONLY */
6668 };
6669