1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "tcg/tcg-cpu.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "exec/cputlb.h"
28 #include "exec/tb-flush.h"
29 #include "exec/icount.h"
30 #include "accel/tcg/getpc.h"
31 #include "qemu/guest-random.h"
32 #include "qapi/error.h"
33 #include "tcg/insn-start-words.h"
34 #include "internals.h"
35 #include <stdbool.h>
36
37 /* CSR function table public API */
riscv_get_csr_ops(int csrno,riscv_csr_operations * ops)38 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
39 {
40 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
41 }
42
riscv_set_csr_ops(int csrno,const riscv_csr_operations * ops)43 void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops)
44 {
45 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
46 }
47
48 /* Predicates */
49 #if !defined(CONFIG_USER_ONLY)
smstateen_acc_ok(CPURISCVState * env,int index,uint64_t bit)50 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit)
51 {
52 bool virt = env->virt_enabled;
53
54 if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
55 return RISCV_EXCP_NONE;
56 }
57
58 if (!(env->mstateen[index] & bit)) {
59 return RISCV_EXCP_ILLEGAL_INST;
60 }
61
62 if (virt) {
63 if (!(env->hstateen[index] & bit)) {
64 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65 }
66
67 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
68 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
69 }
70 }
71
72 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
73 if (!(env->sstateen[index] & bit)) {
74 return RISCV_EXCP_ILLEGAL_INST;
75 }
76 }
77
78 return RISCV_EXCP_NONE;
79 }
80 #endif
81
fs(CPURISCVState * env,int csrno)82 static RISCVException fs(CPURISCVState *env, int csrno)
83 {
84 #if !defined(CONFIG_USER_ONLY)
85 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
86 !riscv_cpu_cfg(env)->ext_zfinx) {
87 return RISCV_EXCP_ILLEGAL_INST;
88 }
89
90 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
91 return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
92 }
93 #endif
94 return RISCV_EXCP_NONE;
95 }
96
vs(CPURISCVState * env,int csrno)97 static RISCVException vs(CPURISCVState *env, int csrno)
98 {
99 if (riscv_cpu_cfg(env)->ext_zve32x) {
100 #if !defined(CONFIG_USER_ONLY)
101 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
102 return RISCV_EXCP_ILLEGAL_INST;
103 }
104 #endif
105 return RISCV_EXCP_NONE;
106 }
107 return RISCV_EXCP_ILLEGAL_INST;
108 }
109
ctr(CPURISCVState * env,int csrno)110 static RISCVException ctr(CPURISCVState *env, int csrno)
111 {
112 #if !defined(CONFIG_USER_ONLY)
113 RISCVCPU *cpu = env_archcpu(env);
114 int ctr_index;
115 target_ulong ctr_mask;
116 int base_csrno = CSR_CYCLE;
117 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
118
119 if (rv32 && csrno >= CSR_CYCLEH) {
120 /* Offset for RV32 hpmcounternh counters */
121 base_csrno += 0x80;
122 }
123 ctr_index = csrno - base_csrno;
124 ctr_mask = BIT(ctr_index);
125
126 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
127 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
128 if (!riscv_cpu_cfg(env)->ext_zicntr) {
129 return RISCV_EXCP_ILLEGAL_INST;
130 }
131
132 goto skip_ext_pmu_check;
133 }
134
135 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
136 /* No counter is enabled in PMU or the counter is out of range */
137 return RISCV_EXCP_ILLEGAL_INST;
138 }
139
140 skip_ext_pmu_check:
141
142 if (env->debugger) {
143 return RISCV_EXCP_NONE;
144 }
145
146 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
147 return RISCV_EXCP_ILLEGAL_INST;
148 }
149
150 if (env->virt_enabled) {
151 if (!get_field(env->hcounteren, ctr_mask) ||
152 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
153 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
154 }
155 }
156
157 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
158 !get_field(env->scounteren, ctr_mask)) {
159 return RISCV_EXCP_ILLEGAL_INST;
160 }
161
162 #endif
163 return RISCV_EXCP_NONE;
164 }
165
ctr32(CPURISCVState * env,int csrno)166 static RISCVException ctr32(CPURISCVState *env, int csrno)
167 {
168 if (riscv_cpu_mxl(env) != MXL_RV32) {
169 return RISCV_EXCP_ILLEGAL_INST;
170 }
171
172 return ctr(env, csrno);
173 }
174
zcmt(CPURISCVState * env,int csrno)175 static RISCVException zcmt(CPURISCVState *env, int csrno)
176 {
177 if (!riscv_cpu_cfg(env)->ext_zcmt) {
178 return RISCV_EXCP_ILLEGAL_INST;
179 }
180
181 #if !defined(CONFIG_USER_ONLY)
182 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT);
183 if (ret != RISCV_EXCP_NONE) {
184 return ret;
185 }
186 #endif
187
188 return RISCV_EXCP_NONE;
189 }
190
cfi_ss(CPURISCVState * env,int csrno)191 static RISCVException cfi_ss(CPURISCVState *env, int csrno)
192 {
193 if (!env_archcpu(env)->cfg.ext_zicfiss) {
194 return RISCV_EXCP_ILLEGAL_INST;
195 }
196
197 /* If ext implemented, M-mode always have access to SSP CSR */
198 if (env->priv == PRV_M) {
199 return RISCV_EXCP_NONE;
200 }
201
202 /* if bcfi not active for current env, access to csr is illegal */
203 if (!cpu_get_bcfien(env)) {
204 #if !defined(CONFIG_USER_ONLY)
205 if (env->debugger) {
206 return RISCV_EXCP_NONE;
207 }
208 #endif
209 return RISCV_EXCP_ILLEGAL_INST;
210 }
211
212 return RISCV_EXCP_NONE;
213 }
214
215 #if !defined(CONFIG_USER_ONLY)
mctr(CPURISCVState * env,int csrno)216 static RISCVException mctr(CPURISCVState *env, int csrno)
217 {
218 RISCVCPU *cpu = env_archcpu(env);
219 uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
220 int ctr_index;
221 int base_csrno = CSR_MHPMCOUNTER3;
222
223 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
224 /* Offset for RV32 mhpmcounternh counters */
225 csrno -= 0x80;
226 }
227
228 g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
229
230 ctr_index = csrno - base_csrno;
231 if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
232 /* The PMU is not enabled or counter is out of range */
233 return RISCV_EXCP_ILLEGAL_INST;
234 }
235
236 return RISCV_EXCP_NONE;
237 }
238
mctr32(CPURISCVState * env,int csrno)239 static RISCVException mctr32(CPURISCVState *env, int csrno)
240 {
241 if (riscv_cpu_mxl(env) != MXL_RV32) {
242 return RISCV_EXCP_ILLEGAL_INST;
243 }
244
245 return mctr(env, csrno);
246 }
247
sscofpmf(CPURISCVState * env,int csrno)248 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
249 {
250 if (!riscv_cpu_cfg(env)->ext_sscofpmf) {
251 return RISCV_EXCP_ILLEGAL_INST;
252 }
253
254 return RISCV_EXCP_NONE;
255 }
256
sscofpmf_32(CPURISCVState * env,int csrno)257 static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
258 {
259 if (riscv_cpu_mxl(env) != MXL_RV32) {
260 return RISCV_EXCP_ILLEGAL_INST;
261 }
262
263 return sscofpmf(env, csrno);
264 }
265
smcntrpmf(CPURISCVState * env,int csrno)266 static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
267 {
268 if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
269 return RISCV_EXCP_ILLEGAL_INST;
270 }
271
272 return RISCV_EXCP_NONE;
273 }
274
smcntrpmf_32(CPURISCVState * env,int csrno)275 static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
276 {
277 if (riscv_cpu_mxl(env) != MXL_RV32) {
278 return RISCV_EXCP_ILLEGAL_INST;
279 }
280
281 return smcntrpmf(env, csrno);
282 }
283
any(CPURISCVState * env,int csrno)284 static RISCVException any(CPURISCVState *env, int csrno)
285 {
286 return RISCV_EXCP_NONE;
287 }
288
any32(CPURISCVState * env,int csrno)289 static RISCVException any32(CPURISCVState *env, int csrno)
290 {
291 if (riscv_cpu_mxl(env) != MXL_RV32) {
292 return RISCV_EXCP_ILLEGAL_INST;
293 }
294
295 return any(env, csrno);
296
297 }
298
aia_any(CPURISCVState * env,int csrno)299 static RISCVException aia_any(CPURISCVState *env, int csrno)
300 {
301 if (!riscv_cpu_cfg(env)->ext_smaia) {
302 return RISCV_EXCP_ILLEGAL_INST;
303 }
304
305 return any(env, csrno);
306 }
307
aia_any32(CPURISCVState * env,int csrno)308 static RISCVException aia_any32(CPURISCVState *env, int csrno)
309 {
310 if (!riscv_cpu_cfg(env)->ext_smaia) {
311 return RISCV_EXCP_ILLEGAL_INST;
312 }
313
314 return any32(env, csrno);
315 }
316
csrind_any(CPURISCVState * env,int csrno)317 static RISCVException csrind_any(CPURISCVState *env, int csrno)
318 {
319 if (!riscv_cpu_cfg(env)->ext_smcsrind) {
320 return RISCV_EXCP_ILLEGAL_INST;
321 }
322
323 return RISCV_EXCP_NONE;
324 }
325
csrind_or_aia_any(CPURISCVState * env,int csrno)326 static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
327 {
328 if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
329 return RISCV_EXCP_ILLEGAL_INST;
330 }
331
332 return any(env, csrno);
333 }
334
smode(CPURISCVState * env,int csrno)335 static RISCVException smode(CPURISCVState *env, int csrno)
336 {
337 if (riscv_has_ext(env, RVS)) {
338 return RISCV_EXCP_NONE;
339 }
340
341 return RISCV_EXCP_ILLEGAL_INST;
342 }
343
smode32(CPURISCVState * env,int csrno)344 static RISCVException smode32(CPURISCVState *env, int csrno)
345 {
346 if (riscv_cpu_mxl(env) != MXL_RV32) {
347 return RISCV_EXCP_ILLEGAL_INST;
348 }
349
350 return smode(env, csrno);
351 }
352
aia_smode(CPURISCVState * env,int csrno)353 static RISCVException aia_smode(CPURISCVState *env, int csrno)
354 {
355 int ret;
356
357 if (!riscv_cpu_cfg(env)->ext_ssaia) {
358 return RISCV_EXCP_ILLEGAL_INST;
359 }
360
361 if (csrno == CSR_STOPEI) {
362 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
363 } else {
364 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
365 }
366
367 if (ret != RISCV_EXCP_NONE) {
368 return ret;
369 }
370
371 return smode(env, csrno);
372 }
373
aia_smode32(CPURISCVState * env,int csrno)374 static RISCVException aia_smode32(CPURISCVState *env, int csrno)
375 {
376 int ret;
377
378 if (!riscv_cpu_cfg(env)->ext_ssaia) {
379 return RISCV_EXCP_ILLEGAL_INST;
380 }
381
382 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
383 if (ret != RISCV_EXCP_NONE) {
384 return ret;
385 }
386
387 return smode32(env, csrno);
388 }
389
scountinhibit_pred(CPURISCVState * env,int csrno)390 static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
391 {
392 RISCVCPU *cpu = env_archcpu(env);
393
394 if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
395 return RISCV_EXCP_ILLEGAL_INST;
396 }
397
398 if (env->virt_enabled) {
399 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
400 }
401
402 return smode(env, csrno);
403 }
404
csrind_extensions_present(CPURISCVState * env)405 static bool csrind_extensions_present(CPURISCVState *env)
406 {
407 return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
408 }
409
aia_extensions_present(CPURISCVState * env)410 static bool aia_extensions_present(CPURISCVState *env)
411 {
412 return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia;
413 }
414
csrind_or_aia_extensions_present(CPURISCVState * env)415 static bool csrind_or_aia_extensions_present(CPURISCVState *env)
416 {
417 return csrind_extensions_present(env) || aia_extensions_present(env);
418 }
419
csrind_smode(CPURISCVState * env,int csrno)420 static RISCVException csrind_smode(CPURISCVState *env, int csrno)
421 {
422 if (!csrind_extensions_present(env)) {
423 return RISCV_EXCP_ILLEGAL_INST;
424 }
425
426 return smode(env, csrno);
427 }
428
csrind_or_aia_smode(CPURISCVState * env,int csrno)429 static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
430 {
431 if (!csrind_or_aia_extensions_present(env)) {
432 return RISCV_EXCP_ILLEGAL_INST;
433 }
434
435 return smode(env, csrno);
436 }
437
hmode(CPURISCVState * env,int csrno)438 static RISCVException hmode(CPURISCVState *env, int csrno)
439 {
440 if (riscv_has_ext(env, RVH)) {
441 return RISCV_EXCP_NONE;
442 }
443
444 return RISCV_EXCP_ILLEGAL_INST;
445 }
446
hmode32(CPURISCVState * env,int csrno)447 static RISCVException hmode32(CPURISCVState *env, int csrno)
448 {
449 if (riscv_cpu_mxl(env) != MXL_RV32) {
450 return RISCV_EXCP_ILLEGAL_INST;
451 }
452
453 return hmode(env, csrno);
454
455 }
456
csrind_hmode(CPURISCVState * env,int csrno)457 static RISCVException csrind_hmode(CPURISCVState *env, int csrno)
458 {
459 if (!csrind_extensions_present(env)) {
460 return RISCV_EXCP_ILLEGAL_INST;
461 }
462
463 return hmode(env, csrno);
464 }
465
csrind_or_aia_hmode(CPURISCVState * env,int csrno)466 static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
467 {
468 if (!csrind_or_aia_extensions_present(env)) {
469 return RISCV_EXCP_ILLEGAL_INST;
470 }
471
472 return hmode(env, csrno);
473 }
474
umode(CPURISCVState * env,int csrno)475 static RISCVException umode(CPURISCVState *env, int csrno)
476 {
477 if (riscv_has_ext(env, RVU)) {
478 return RISCV_EXCP_NONE;
479 }
480
481 return RISCV_EXCP_ILLEGAL_INST;
482 }
483
umode32(CPURISCVState * env,int csrno)484 static RISCVException umode32(CPURISCVState *env, int csrno)
485 {
486 if (riscv_cpu_mxl(env) != MXL_RV32) {
487 return RISCV_EXCP_ILLEGAL_INST;
488 }
489
490 return umode(env, csrno);
491 }
492
mstateen(CPURISCVState * env,int csrno)493 static RISCVException mstateen(CPURISCVState *env, int csrno)
494 {
495 if (!riscv_cpu_cfg(env)->ext_smstateen) {
496 return RISCV_EXCP_ILLEGAL_INST;
497 }
498
499 return any(env, csrno);
500 }
501
hstateen_pred(CPURISCVState * env,int csrno,int base)502 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
503 {
504 if (!riscv_cpu_cfg(env)->ext_smstateen) {
505 return RISCV_EXCP_ILLEGAL_INST;
506 }
507
508 RISCVException ret = hmode(env, csrno);
509 if (ret != RISCV_EXCP_NONE) {
510 return ret;
511 }
512
513 if (env->debugger) {
514 return RISCV_EXCP_NONE;
515 }
516
517 if (env->priv < PRV_M) {
518 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
519 return RISCV_EXCP_ILLEGAL_INST;
520 }
521 }
522
523 return RISCV_EXCP_NONE;
524 }
525
hstateen(CPURISCVState * env,int csrno)526 static RISCVException hstateen(CPURISCVState *env, int csrno)
527 {
528 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
529 }
530
hstateenh(CPURISCVState * env,int csrno)531 static RISCVException hstateenh(CPURISCVState *env, int csrno)
532 {
533 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
534 }
535
sstateen(CPURISCVState * env,int csrno)536 static RISCVException sstateen(CPURISCVState *env, int csrno)
537 {
538 bool virt = env->virt_enabled;
539 int index = csrno - CSR_SSTATEEN0;
540
541 if (!riscv_cpu_cfg(env)->ext_smstateen) {
542 return RISCV_EXCP_ILLEGAL_INST;
543 }
544
545 RISCVException ret = smode(env, csrno);
546 if (ret != RISCV_EXCP_NONE) {
547 return ret;
548 }
549
550 if (env->debugger) {
551 return RISCV_EXCP_NONE;
552 }
553
554 if (env->priv < PRV_M) {
555 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
556 return RISCV_EXCP_ILLEGAL_INST;
557 }
558
559 if (virt) {
560 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
561 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
562 }
563 }
564 }
565
566 return RISCV_EXCP_NONE;
567 }
568
sstc(CPURISCVState * env,int csrno)569 static RISCVException sstc(CPURISCVState *env, int csrno)
570 {
571 bool hmode_check = false;
572
573 if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) {
574 return RISCV_EXCP_ILLEGAL_INST;
575 }
576
577 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
578 hmode_check = true;
579 }
580
581 RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
582 if (ret != RISCV_EXCP_NONE) {
583 return ret;
584 }
585
586 if (env->debugger) {
587 return RISCV_EXCP_NONE;
588 }
589
590 if (env->priv == PRV_M) {
591 return RISCV_EXCP_NONE;
592 }
593
594 /*
595 * No need of separate function for rv32 as menvcfg stores both menvcfg
596 * menvcfgh for RV32.
597 */
598 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
599 get_field(env->menvcfg, MENVCFG_STCE))) {
600 return RISCV_EXCP_ILLEGAL_INST;
601 }
602
603 if (env->virt_enabled) {
604 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
605 get_field(env->henvcfg, HENVCFG_STCE))) {
606 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
607 }
608 }
609
610 return RISCV_EXCP_NONE;
611 }
612
sstc_32(CPURISCVState * env,int csrno)613 static RISCVException sstc_32(CPURISCVState *env, int csrno)
614 {
615 if (riscv_cpu_mxl(env) != MXL_RV32) {
616 return RISCV_EXCP_ILLEGAL_INST;
617 }
618
619 return sstc(env, csrno);
620 }
621
satp(CPURISCVState * env,int csrno)622 static RISCVException satp(CPURISCVState *env, int csrno)
623 {
624 if (env->priv == PRV_S && !env->virt_enabled &&
625 get_field(env->mstatus, MSTATUS_TVM)) {
626 return RISCV_EXCP_ILLEGAL_INST;
627 }
628 if (env->priv == PRV_S && env->virt_enabled &&
629 get_field(env->hstatus, HSTATUS_VTVM)) {
630 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
631 }
632
633 return smode(env, csrno);
634 }
635
hgatp(CPURISCVState * env,int csrno)636 static RISCVException hgatp(CPURISCVState *env, int csrno)
637 {
638 if (env->priv == PRV_S && !env->virt_enabled &&
639 get_field(env->mstatus, MSTATUS_TVM)) {
640 return RISCV_EXCP_ILLEGAL_INST;
641 }
642
643 return hmode(env, csrno);
644 }
645
646 /*
647 * M-mode:
648 * Without ext_smctr raise illegal inst excep.
649 * Otherwise everything is accessible to m-mode.
650 *
651 * S-mode:
652 * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
653 * Otherwise everything other than mctrctl is accessible.
654 *
655 * VS-mode:
656 * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
657 * Without hstateen.ctr raise virtual illegal inst excep.
658 * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
659 * Always raise illegal instruction exception for sctrdepth.
660 */
ctr_mmode(CPURISCVState * env,int csrno)661 static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
662 {
663 /* Check if smctr-ext is present */
664 if (riscv_cpu_cfg(env)->ext_smctr) {
665 return RISCV_EXCP_NONE;
666 }
667
668 return RISCV_EXCP_ILLEGAL_INST;
669 }
670
ctr_smode(CPURISCVState * env,int csrno)671 static RISCVException ctr_smode(CPURISCVState *env, int csrno)
672 {
673 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
674
675 if (!cfg->ext_smctr && !cfg->ext_ssctr) {
676 return RISCV_EXCP_ILLEGAL_INST;
677 }
678
679 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
680 if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
681 env->virt_enabled) {
682 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
683 }
684
685 return ret;
686 }
687
aia_hmode(CPURISCVState * env,int csrno)688 static RISCVException aia_hmode(CPURISCVState *env, int csrno)
689 {
690 int ret;
691
692 if (!riscv_cpu_cfg(env)->ext_ssaia) {
693 return RISCV_EXCP_ILLEGAL_INST;
694 }
695
696 if (csrno == CSR_VSTOPEI) {
697 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
698 } else {
699 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
700 }
701
702 if (ret != RISCV_EXCP_NONE) {
703 return ret;
704 }
705
706 return hmode(env, csrno);
707 }
708
aia_hmode32(CPURISCVState * env,int csrno)709 static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
710 {
711 int ret;
712
713 if (!riscv_cpu_cfg(env)->ext_ssaia) {
714 return RISCV_EXCP_ILLEGAL_INST;
715 }
716
717 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
718 if (ret != RISCV_EXCP_NONE) {
719 return ret;
720 }
721
722 if (!riscv_cpu_cfg(env)->ext_ssaia) {
723 return RISCV_EXCP_ILLEGAL_INST;
724 }
725
726 return hmode32(env, csrno);
727 }
728
dbltrp_hmode(CPURISCVState * env,int csrno)729 static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
730 {
731 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
732 return RISCV_EXCP_NONE;
733 }
734
735 return hmode(env, csrno);
736 }
737
pmp(CPURISCVState * env,int csrno)738 static RISCVException pmp(CPURISCVState *env, int csrno)
739 {
740 if (riscv_cpu_cfg(env)->pmp) {
741 if (csrno <= CSR_PMPCFG3) {
742 uint32_t reg_index = csrno - CSR_PMPCFG0;
743
744 /* TODO: RV128 restriction check */
745 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
746 return RISCV_EXCP_ILLEGAL_INST;
747 }
748 }
749
750 return RISCV_EXCP_NONE;
751 }
752
753 return RISCV_EXCP_ILLEGAL_INST;
754 }
755
have_mseccfg(CPURISCVState * env,int csrno)756 static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
757 {
758 if (riscv_cpu_cfg(env)->ext_smepmp) {
759 return RISCV_EXCP_NONE;
760 }
761 if (riscv_cpu_cfg(env)->ext_zkr) {
762 return RISCV_EXCP_NONE;
763 }
764 if (riscv_cpu_cfg(env)->ext_smmpm) {
765 return RISCV_EXCP_NONE;
766 }
767
768 return RISCV_EXCP_ILLEGAL_INST;
769 }
770
debug(CPURISCVState * env,int csrno)771 static RISCVException debug(CPURISCVState *env, int csrno)
772 {
773 if (riscv_cpu_cfg(env)->debug) {
774 return RISCV_EXCP_NONE;
775 }
776
777 return RISCV_EXCP_ILLEGAL_INST;
778 }
779
rnmi(CPURISCVState * env,int csrno)780 static RISCVException rnmi(CPURISCVState *env, int csrno)
781 {
782 RISCVCPU *cpu = env_archcpu(env);
783
784 if (cpu->cfg.ext_smrnmi) {
785 return RISCV_EXCP_NONE;
786 }
787
788 return RISCV_EXCP_ILLEGAL_INST;
789 }
790 #endif
791
seed(CPURISCVState * env,int csrno)792 static RISCVException seed(CPURISCVState *env, int csrno)
793 {
794 if (!riscv_cpu_cfg(env)->ext_zkr) {
795 return RISCV_EXCP_ILLEGAL_INST;
796 }
797
798 #if !defined(CONFIG_USER_ONLY)
799 if (env->debugger) {
800 return RISCV_EXCP_NONE;
801 }
802
803 /*
804 * With a CSR read-write instruction:
805 * 1) The seed CSR is always available in machine mode as normal.
806 * 2) Attempted access to seed from virtual modes VS and VU always raises
807 * an exception(virtual instruction exception only if mseccfg.sseed=1).
808 * 3) Without the corresponding access control bit set to 1, any attempted
809 * access to seed from U, S or HS modes will raise an illegal instruction
810 * exception.
811 */
812 if (env->priv == PRV_M) {
813 return RISCV_EXCP_NONE;
814 } else if (env->virt_enabled) {
815 if (env->mseccfg & MSECCFG_SSEED) {
816 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
817 } else {
818 return RISCV_EXCP_ILLEGAL_INST;
819 }
820 } else {
821 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
822 return RISCV_EXCP_NONE;
823 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
824 return RISCV_EXCP_NONE;
825 } else {
826 return RISCV_EXCP_ILLEGAL_INST;
827 }
828 }
829 #else
830 return RISCV_EXCP_NONE;
831 #endif
832 }
833
834 /* zicfiss CSR_SSP read and write */
read_ssp(CPURISCVState * env,int csrno,target_ulong * val)835 static RISCVException read_ssp(CPURISCVState *env, int csrno,
836 target_ulong *val)
837 {
838 *val = env->ssp;
839 return RISCV_EXCP_NONE;
840 }
841
write_ssp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)842 static RISCVException write_ssp(CPURISCVState *env, int csrno,
843 target_ulong val, uintptr_t ra)
844 {
845 env->ssp = val;
846 return RISCV_EXCP_NONE;
847 }
848
849 /* User Floating-Point CSRs */
read_fflags(CPURISCVState * env,int csrno,target_ulong * val)850 static RISCVException read_fflags(CPURISCVState *env, int csrno,
851 target_ulong *val)
852 {
853 *val = riscv_cpu_get_fflags(env);
854 return RISCV_EXCP_NONE;
855 }
856
write_fflags(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)857 static RISCVException write_fflags(CPURISCVState *env, int csrno,
858 target_ulong val, uintptr_t ra)
859 {
860 #if !defined(CONFIG_USER_ONLY)
861 if (riscv_has_ext(env, RVF)) {
862 env->mstatus |= MSTATUS_FS;
863 }
864 #endif
865 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
866 return RISCV_EXCP_NONE;
867 }
868
read_frm(CPURISCVState * env,int csrno,target_ulong * val)869 static RISCVException read_frm(CPURISCVState *env, int csrno,
870 target_ulong *val)
871 {
872 *val = env->frm;
873 return RISCV_EXCP_NONE;
874 }
875
write_frm(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)876 static RISCVException write_frm(CPURISCVState *env, int csrno,
877 target_ulong val, uintptr_t ra)
878 {
879 #if !defined(CONFIG_USER_ONLY)
880 if (riscv_has_ext(env, RVF)) {
881 env->mstatus |= MSTATUS_FS;
882 }
883 #endif
884 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
885 return RISCV_EXCP_NONE;
886 }
887
read_fcsr(CPURISCVState * env,int csrno,target_ulong * val)888 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
889 target_ulong *val)
890 {
891 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
892 | (env->frm << FSR_RD_SHIFT);
893 return RISCV_EXCP_NONE;
894 }
895
write_fcsr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)896 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
897 target_ulong val, uintptr_t ra)
898 {
899 #if !defined(CONFIG_USER_ONLY)
900 if (riscv_has_ext(env, RVF)) {
901 env->mstatus |= MSTATUS_FS;
902 }
903 #endif
904 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
905 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
906 return RISCV_EXCP_NONE;
907 }
908
read_vtype(CPURISCVState * env,int csrno,target_ulong * val)909 static RISCVException read_vtype(CPURISCVState *env, int csrno,
910 target_ulong *val)
911 {
912 uint64_t vill;
913 switch (env->xl) {
914 case MXL_RV32:
915 vill = (uint32_t)env->vill << 31;
916 break;
917 case MXL_RV64:
918 vill = (uint64_t)env->vill << 63;
919 break;
920 default:
921 g_assert_not_reached();
922 }
923 *val = (target_ulong)vill | env->vtype;
924 return RISCV_EXCP_NONE;
925 }
926
read_vl(CPURISCVState * env,int csrno,target_ulong * val)927 static RISCVException read_vl(CPURISCVState *env, int csrno,
928 target_ulong *val)
929 {
930 *val = env->vl;
931 return RISCV_EXCP_NONE;
932 }
933
read_vlenb(CPURISCVState * env,int csrno,target_ulong * val)934 static RISCVException read_vlenb(CPURISCVState *env, int csrno,
935 target_ulong *val)
936 {
937 *val = riscv_cpu_cfg(env)->vlenb;
938 return RISCV_EXCP_NONE;
939 }
940
read_vxrm(CPURISCVState * env,int csrno,target_ulong * val)941 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
942 target_ulong *val)
943 {
944 *val = env->vxrm;
945 return RISCV_EXCP_NONE;
946 }
947
write_vxrm(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)948 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
949 target_ulong val, uintptr_t ra)
950 {
951 #if !defined(CONFIG_USER_ONLY)
952 env->mstatus |= MSTATUS_VS;
953 #endif
954 env->vxrm = val;
955 return RISCV_EXCP_NONE;
956 }
957
read_vxsat(CPURISCVState * env,int csrno,target_ulong * val)958 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
959 target_ulong *val)
960 {
961 *val = env->vxsat & BIT(0);
962 return RISCV_EXCP_NONE;
963 }
964
write_vxsat(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)965 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
966 target_ulong val, uintptr_t ra)
967 {
968 #if !defined(CONFIG_USER_ONLY)
969 env->mstatus |= MSTATUS_VS;
970 #endif
971 env->vxsat = val & BIT(0);
972 return RISCV_EXCP_NONE;
973 }
974
read_vstart(CPURISCVState * env,int csrno,target_ulong * val)975 static RISCVException read_vstart(CPURISCVState *env, int csrno,
976 target_ulong *val)
977 {
978 *val = env->vstart;
979 return RISCV_EXCP_NONE;
980 }
981
write_vstart(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)982 static RISCVException write_vstart(CPURISCVState *env, int csrno,
983 target_ulong val, uintptr_t ra)
984 {
985 #if !defined(CONFIG_USER_ONLY)
986 env->mstatus |= MSTATUS_VS;
987 #endif
988 /*
989 * The vstart CSR is defined to have only enough writable bits
990 * to hold the largest element index, i.e. lg2(VLEN) bits.
991 */
992 env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3));
993 return RISCV_EXCP_NONE;
994 }
995
read_vcsr(CPURISCVState * env,int csrno,target_ulong * val)996 static RISCVException read_vcsr(CPURISCVState *env, int csrno,
997 target_ulong *val)
998 {
999 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
1000 return RISCV_EXCP_NONE;
1001 }
1002
write_vcsr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1003 static RISCVException write_vcsr(CPURISCVState *env, int csrno,
1004 target_ulong val, uintptr_t ra)
1005 {
1006 #if !defined(CONFIG_USER_ONLY)
1007 env->mstatus |= MSTATUS_VS;
1008 #endif
1009 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
1010 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
1011 return RISCV_EXCP_NONE;
1012 }
1013
1014 #if defined(CONFIG_USER_ONLY)
1015 /* User Timers and Counters */
get_ticks(bool shift)1016 static target_ulong get_ticks(bool shift)
1017 {
1018 int64_t val = cpu_get_host_ticks();
1019 target_ulong result = shift ? val >> 32 : val;
1020
1021 return result;
1022 }
1023
read_time(CPURISCVState * env,int csrno,target_ulong * val)1024 static RISCVException read_time(CPURISCVState *env, int csrno,
1025 target_ulong *val)
1026 {
1027 *val = cpu_get_host_ticks();
1028 return RISCV_EXCP_NONE;
1029 }
1030
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1031 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1032 target_ulong *val)
1033 {
1034 *val = cpu_get_host_ticks() >> 32;
1035 return RISCV_EXCP_NONE;
1036 }
1037
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1038 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1039 target_ulong *val)
1040 {
1041 *val = get_ticks(false);
1042 return RISCV_EXCP_NONE;
1043 }
1044
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1045 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1046 target_ulong *val)
1047 {
1048 *val = get_ticks(true);
1049 return RISCV_EXCP_NONE;
1050 }
1051
1052 #else /* CONFIG_USER_ONLY */
1053
read_mcyclecfg(CPURISCVState * env,int csrno,target_ulong * val)1054 static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
1055 target_ulong *val)
1056 {
1057 *val = env->mcyclecfg;
1058 return RISCV_EXCP_NONE;
1059 }
1060
write_mcyclecfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1061 static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
1062 target_ulong val, uintptr_t ra)
1063 {
1064 uint64_t inh_avail_mask;
1065
1066 if (riscv_cpu_mxl(env) == MXL_RV32) {
1067 env->mcyclecfg = val;
1068 } else {
1069 /* Set xINH fields if priv mode supported */
1070 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
1071 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
1072 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
1073 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1074 riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
1075 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1076 riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
1077 env->mcyclecfg = val & inh_avail_mask;
1078 }
1079
1080 return RISCV_EXCP_NONE;
1081 }
1082
read_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong * val)1083 static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
1084 target_ulong *val)
1085 {
1086 *val = env->mcyclecfgh;
1087 return RISCV_EXCP_NONE;
1088 }
1089
write_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1090 static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
1091 target_ulong val, uintptr_t ra)
1092 {
1093 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1094 MCYCLECFGH_BIT_MINH);
1095
1096 /* Set xINH fields if priv mode supported */
1097 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
1098 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
1099 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1100 riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
1101 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1102 riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
1103
1104 env->mcyclecfgh = val & inh_avail_mask;
1105 return RISCV_EXCP_NONE;
1106 }
1107
read_minstretcfg(CPURISCVState * env,int csrno,target_ulong * val)1108 static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
1109 target_ulong *val)
1110 {
1111 *val = env->minstretcfg;
1112 return RISCV_EXCP_NONE;
1113 }
1114
write_minstretcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1115 static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
1116 target_ulong val, uintptr_t ra)
1117 {
1118 uint64_t inh_avail_mask;
1119
1120 if (riscv_cpu_mxl(env) == MXL_RV32) {
1121 env->minstretcfg = val;
1122 } else {
1123 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
1124 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
1125 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
1126 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1127 riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
1128 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1129 riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
1130 env->minstretcfg = val & inh_avail_mask;
1131 }
1132 return RISCV_EXCP_NONE;
1133 }
1134
read_minstretcfgh(CPURISCVState * env,int csrno,target_ulong * val)1135 static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
1136 target_ulong *val)
1137 {
1138 *val = env->minstretcfgh;
1139 return RISCV_EXCP_NONE;
1140 }
1141
write_minstretcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1142 static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
1143 target_ulong val, uintptr_t ra)
1144 {
1145 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1146 MINSTRETCFGH_BIT_MINH);
1147
1148 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
1149 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
1150 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1151 riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
1152 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1153 riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
1154
1155 env->minstretcfgh = val & inh_avail_mask;
1156 return RISCV_EXCP_NONE;
1157 }
1158
read_mhpmevent(CPURISCVState * env,int csrno,target_ulong * val)1159 static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
1160 target_ulong *val)
1161 {
1162 int evt_index = csrno - CSR_MCOUNTINHIBIT;
1163
1164 *val = env->mhpmevent_val[evt_index];
1165
1166 return RISCV_EXCP_NONE;
1167 }
1168
write_mhpmevent(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1169 static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
1170 target_ulong val, uintptr_t ra)
1171 {
1172 int evt_index = csrno - CSR_MCOUNTINHIBIT;
1173 uint64_t mhpmevt_val = val;
1174 uint64_t inh_avail_mask;
1175
1176 if (riscv_cpu_mxl(env) == MXL_RV32) {
1177 env->mhpmevent_val[evt_index] = val;
1178 mhpmevt_val = mhpmevt_val |
1179 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
1180 } else {
1181 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH;
1182 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0;
1183 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0;
1184 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1185 riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0;
1186 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1187 riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0;
1188 mhpmevt_val = val & inh_avail_mask;
1189 env->mhpmevent_val[evt_index] = mhpmevt_val;
1190 }
1191
1192 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1193
1194 return RISCV_EXCP_NONE;
1195 }
1196
read_mhpmeventh(CPURISCVState * env,int csrno,target_ulong * val)1197 static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
1198 target_ulong *val)
1199 {
1200 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1201
1202 *val = env->mhpmeventh_val[evt_index];
1203
1204 return RISCV_EXCP_NONE;
1205 }
1206
write_mhpmeventh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1207 static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
1208 target_ulong val, uintptr_t ra)
1209 {
1210 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1211 uint64_t mhpmevth_val;
1212 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1213 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1214 MHPMEVENTH_BIT_MINH);
1215
1216 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0;
1217 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0;
1218 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1219 riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0;
1220 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1221 riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0;
1222
1223 mhpmevth_val = val & inh_avail_mask;
1224 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1225 env->mhpmeventh_val[evt_index] = mhpmevth_val;
1226
1227 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1228
1229 return RISCV_EXCP_NONE;
1230 }
1231
riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState * env,int counter_idx,bool upper_half)1232 static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
1233 int counter_idx,
1234 bool upper_half)
1235 {
1236 int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
1237 uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
1238 uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
1239 target_ulong result = 0;
1240 uint64_t curr_val = 0;
1241 uint64_t cfg_val = 0;
1242
1243 if (counter_idx == 0) {
1244 cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
1245 env->mcyclecfg;
1246 } else if (counter_idx == 2) {
1247 cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
1248 env->minstretcfg;
1249 } else {
1250 cfg_val = upper_half ?
1251 ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
1252 env->mhpmevent_val[counter_idx];
1253 cfg_val &= MHPMEVENT_FILTER_MASK;
1254 }
1255
1256 if (!cfg_val) {
1257 if (icount_enabled()) {
1258 curr_val = inst ? icount_get_raw() : icount_get();
1259 } else {
1260 curr_val = cpu_get_host_ticks();
1261 }
1262
1263 goto done;
1264 }
1265
1266 /* Update counter before reading. */
1267 riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
1268
1269 if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
1270 curr_val += counter_arr[PRV_M];
1271 }
1272
1273 if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
1274 curr_val += counter_arr[PRV_S];
1275 }
1276
1277 if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
1278 curr_val += counter_arr[PRV_U];
1279 }
1280
1281 if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
1282 curr_val += counter_arr_virt[PRV_S];
1283 }
1284
1285 if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
1286 curr_val += counter_arr_virt[PRV_U];
1287 }
1288
1289 done:
1290 if (riscv_cpu_mxl(env) == MXL_RV32) {
1291 result = upper_half ? curr_val >> 32 : curr_val;
1292 } else {
1293 result = curr_val;
1294 }
1295
1296 return result;
1297 }
1298
riscv_pmu_write_ctr(CPURISCVState * env,target_ulong val,uint32_t ctr_idx)1299 static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
1300 uint32_t ctr_idx)
1301 {
1302 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1303 uint64_t mhpmctr_val = val;
1304
1305 counter->mhpmcounter_val = val;
1306 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1307 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1308 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1309 counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1310 ctr_idx, false);
1311 if (ctr_idx > 2) {
1312 if (riscv_cpu_mxl(env) == MXL_RV32) {
1313 mhpmctr_val = mhpmctr_val |
1314 ((uint64_t)counter->mhpmcounterh_val << 32);
1315 }
1316 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1317 }
1318 } else {
1319 /* Other counters can keep incrementing from the given value */
1320 counter->mhpmcounter_prev = val;
1321 }
1322
1323 return RISCV_EXCP_NONE;
1324 }
1325
riscv_pmu_write_ctrh(CPURISCVState * env,target_ulong val,uint32_t ctr_idx)1326 static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
1327 uint32_t ctr_idx)
1328 {
1329 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1330 uint64_t mhpmctr_val = counter->mhpmcounter_val;
1331 uint64_t mhpmctrh_val = val;
1332
1333 counter->mhpmcounterh_val = val;
1334 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
1335 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1336 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1337 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1338 counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1339 ctr_idx, true);
1340 if (ctr_idx > 2) {
1341 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1342 }
1343 } else {
1344 counter->mhpmcounterh_prev = val;
1345 }
1346
1347 return RISCV_EXCP_NONE;
1348 }
1349
write_mhpmcounter(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1350 static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
1351 target_ulong val, uintptr_t ra)
1352 {
1353 int ctr_idx = csrno - CSR_MCYCLE;
1354
1355 return riscv_pmu_write_ctr(env, val, ctr_idx);
1356 }
1357
write_mhpmcounterh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1358 static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
1359 target_ulong val, uintptr_t ra)
1360 {
1361 int ctr_idx = csrno - CSR_MCYCLEH;
1362
1363 return riscv_pmu_write_ctrh(env, val, ctr_idx);
1364 }
1365
riscv_pmu_read_ctr(CPURISCVState * env,target_ulong * val,bool upper_half,uint32_t ctr_idx)1366 RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
1367 bool upper_half, uint32_t ctr_idx)
1368 {
1369 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1370 target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev :
1371 counter->mhpmcounter_prev;
1372 target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
1373 counter->mhpmcounter_val;
1374
1375 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
1376 /*
1377 * Counter should not increment if inhibit bit is set. Just return the
1378 * current counter value.
1379 */
1380 *val = ctr_val;
1381 return RISCV_EXCP_NONE;
1382 }
1383
1384 /*
1385 * The kernel computes the perf delta by subtracting the current value from
1386 * the value it initialized previously (ctr_val).
1387 */
1388 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1389 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
1390 *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
1391 ctr_prev + ctr_val;
1392 } else {
1393 *val = ctr_val;
1394 }
1395
1396 return RISCV_EXCP_NONE;
1397 }
1398
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1399 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1400 target_ulong *val)
1401 {
1402 uint16_t ctr_index;
1403
1404 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
1405 ctr_index = csrno - CSR_MCYCLE;
1406 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
1407 ctr_index = csrno - CSR_CYCLE;
1408 } else {
1409 return RISCV_EXCP_ILLEGAL_INST;
1410 }
1411
1412 return riscv_pmu_read_ctr(env, val, false, ctr_index);
1413 }
1414
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1415 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1416 target_ulong *val)
1417 {
1418 uint16_t ctr_index;
1419
1420 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
1421 ctr_index = csrno - CSR_MCYCLEH;
1422 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
1423 ctr_index = csrno - CSR_CYCLEH;
1424 } else {
1425 return RISCV_EXCP_ILLEGAL_INST;
1426 }
1427
1428 return riscv_pmu_read_ctr(env, val, true, ctr_index);
1429 }
1430
rmw_cd_mhpmcounter(CPURISCVState * env,int ctr_idx,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1431 static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
1432 target_ulong *val, target_ulong new_val,
1433 target_ulong wr_mask)
1434 {
1435 if (wr_mask != 0 && wr_mask != -1) {
1436 return -EINVAL;
1437 }
1438
1439 if (!wr_mask && val) {
1440 riscv_pmu_read_ctr(env, val, false, ctr_idx);
1441 } else if (wr_mask) {
1442 riscv_pmu_write_ctr(env, new_val, ctr_idx);
1443 } else {
1444 return -EINVAL;
1445 }
1446
1447 return 0;
1448 }
1449
rmw_cd_mhpmcounterh(CPURISCVState * env,int ctr_idx,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1450 static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
1451 target_ulong *val, target_ulong new_val,
1452 target_ulong wr_mask)
1453 {
1454 if (wr_mask != 0 && wr_mask != -1) {
1455 return -EINVAL;
1456 }
1457
1458 if (!wr_mask && val) {
1459 riscv_pmu_read_ctr(env, val, true, ctr_idx);
1460 } else if (wr_mask) {
1461 riscv_pmu_write_ctrh(env, new_val, ctr_idx);
1462 } else {
1463 return -EINVAL;
1464 }
1465
1466 return 0;
1467 }
1468
rmw_cd_mhpmevent(CPURISCVState * env,int evt_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1469 static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
1470 target_ulong *val, target_ulong new_val,
1471 target_ulong wr_mask)
1472 {
1473 uint64_t mhpmevt_val = new_val;
1474
1475 if (wr_mask != 0 && wr_mask != -1) {
1476 return -EINVAL;
1477 }
1478
1479 if (!wr_mask && val) {
1480 *val = env->mhpmevent_val[evt_index];
1481 if (riscv_cpu_cfg(env)->ext_sscofpmf) {
1482 *val &= ~MHPMEVENT_BIT_MINH;
1483 }
1484 } else if (wr_mask) {
1485 wr_mask &= ~MHPMEVENT_BIT_MINH;
1486 mhpmevt_val = (new_val & wr_mask) |
1487 (env->mhpmevent_val[evt_index] & ~wr_mask);
1488 if (riscv_cpu_mxl(env) == MXL_RV32) {
1489 mhpmevt_val = mhpmevt_val |
1490 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
1491 }
1492 env->mhpmevent_val[evt_index] = mhpmevt_val;
1493 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1494 } else {
1495 return -EINVAL;
1496 }
1497
1498 return 0;
1499 }
1500
rmw_cd_mhpmeventh(CPURISCVState * env,int evt_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1501 static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
1502 target_ulong *val, target_ulong new_val,
1503 target_ulong wr_mask)
1504 {
1505 uint64_t mhpmevth_val;
1506 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1507
1508 if (wr_mask != 0 && wr_mask != -1) {
1509 return -EINVAL;
1510 }
1511
1512 if (!wr_mask && val) {
1513 *val = env->mhpmeventh_val[evt_index];
1514 if (riscv_cpu_cfg(env)->ext_sscofpmf) {
1515 *val &= ~MHPMEVENTH_BIT_MINH;
1516 }
1517 } else if (wr_mask) {
1518 wr_mask &= ~MHPMEVENTH_BIT_MINH;
1519 env->mhpmeventh_val[evt_index] =
1520 (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
1521 mhpmevth_val = env->mhpmeventh_val[evt_index];
1522 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1523 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1524 } else {
1525 return -EINVAL;
1526 }
1527
1528 return 0;
1529 }
1530
rmw_cd_ctr_cfg(CPURISCVState * env,int cfg_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1531 static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
1532 target_ulong new_val, target_ulong wr_mask)
1533 {
1534 switch (cfg_index) {
1535 case 0: /* CYCLECFG */
1536 if (wr_mask) {
1537 wr_mask &= ~MCYCLECFG_BIT_MINH;
1538 env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
1539 } else {
1540 *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
1541 }
1542 break;
1543 case 2: /* INSTRETCFG */
1544 if (wr_mask) {
1545 wr_mask &= ~MINSTRETCFG_BIT_MINH;
1546 env->minstretcfg = (new_val & wr_mask) |
1547 (env->minstretcfg & ~wr_mask);
1548 } else {
1549 *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
1550 }
1551 break;
1552 default:
1553 return -EINVAL;
1554 }
1555 return 0;
1556 }
1557
rmw_cd_ctr_cfgh(CPURISCVState * env,int cfg_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1558 static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
1559 target_ulong new_val, target_ulong wr_mask)
1560 {
1561
1562 if (riscv_cpu_mxl(env) != MXL_RV32) {
1563 return RISCV_EXCP_ILLEGAL_INST;
1564 }
1565
1566 switch (cfg_index) {
1567 case 0: /* CYCLECFGH */
1568 if (wr_mask) {
1569 wr_mask &= ~MCYCLECFGH_BIT_MINH;
1570 env->mcyclecfgh = (new_val & wr_mask) |
1571 (env->mcyclecfgh & ~wr_mask);
1572 } else {
1573 *val = env->mcyclecfgh;
1574 }
1575 break;
1576 case 2: /* INSTRETCFGH */
1577 if (wr_mask) {
1578 wr_mask &= ~MINSTRETCFGH_BIT_MINH;
1579 env->minstretcfgh = (new_val & wr_mask) |
1580 (env->minstretcfgh & ~wr_mask);
1581 } else {
1582 *val = env->minstretcfgh;
1583 }
1584 break;
1585 default:
1586 return -EINVAL;
1587 }
1588 return 0;
1589 }
1590
1591
read_scountovf(CPURISCVState * env,int csrno,target_ulong * val)1592 static RISCVException read_scountovf(CPURISCVState *env, int csrno,
1593 target_ulong *val)
1594 {
1595 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
1596 int i;
1597 *val = 0;
1598 target_ulong *mhpm_evt_val;
1599 uint64_t of_bit_mask;
1600
1601 /* Virtualize scountovf for counter delegation */
1602 if (riscv_cpu_cfg(env)->ext_sscofpmf &&
1603 riscv_cpu_cfg(env)->ext_ssccfg &&
1604 get_field(env->menvcfg, MENVCFG_CDE) &&
1605 env->virt_enabled) {
1606 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1607 }
1608
1609 if (riscv_cpu_mxl(env) == MXL_RV32) {
1610 mhpm_evt_val = env->mhpmeventh_val;
1611 of_bit_mask = MHPMEVENTH_BIT_OF;
1612 } else {
1613 mhpm_evt_val = env->mhpmevent_val;
1614 of_bit_mask = MHPMEVENT_BIT_OF;
1615 }
1616
1617 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
1618 if ((get_field(env->mcounteren, BIT(i))) &&
1619 (mhpm_evt_val[i] & of_bit_mask)) {
1620 *val |= BIT(i);
1621 }
1622 }
1623
1624 return RISCV_EXCP_NONE;
1625 }
1626
read_time(CPURISCVState * env,int csrno,target_ulong * val)1627 static RISCVException read_time(CPURISCVState *env, int csrno,
1628 target_ulong *val)
1629 {
1630 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1631
1632 if (!env->rdtime_fn) {
1633 return RISCV_EXCP_ILLEGAL_INST;
1634 }
1635
1636 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
1637 return RISCV_EXCP_NONE;
1638 }
1639
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1640 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1641 target_ulong *val)
1642 {
1643 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1644
1645 if (!env->rdtime_fn) {
1646 return RISCV_EXCP_ILLEGAL_INST;
1647 }
1648
1649 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
1650 return RISCV_EXCP_NONE;
1651 }
1652
read_vstimecmp(CPURISCVState * env,int csrno,target_ulong * val)1653 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
1654 target_ulong *val)
1655 {
1656 *val = env->vstimecmp;
1657
1658 return RISCV_EXCP_NONE;
1659 }
1660
read_vstimecmph(CPURISCVState * env,int csrno,target_ulong * val)1661 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
1662 target_ulong *val)
1663 {
1664 *val = env->vstimecmp >> 32;
1665
1666 return RISCV_EXCP_NONE;
1667 }
1668
write_vstimecmp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1669 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
1670 target_ulong val, uintptr_t ra)
1671 {
1672 if (riscv_cpu_mxl(env) == MXL_RV32) {
1673 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
1674 } else {
1675 env->vstimecmp = val;
1676 }
1677
1678 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1679 env->htimedelta, MIP_VSTIP);
1680
1681 return RISCV_EXCP_NONE;
1682 }
1683
write_vstimecmph(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1684 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
1685 target_ulong val, uintptr_t ra)
1686 {
1687 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1688 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1689 env->htimedelta, MIP_VSTIP);
1690
1691 return RISCV_EXCP_NONE;
1692 }
1693
read_stimecmp(CPURISCVState * env,int csrno,target_ulong * val)1694 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1695 target_ulong *val)
1696 {
1697 if (env->virt_enabled) {
1698 *val = env->vstimecmp;
1699 } else {
1700 *val = env->stimecmp;
1701 }
1702
1703 return RISCV_EXCP_NONE;
1704 }
1705
read_stimecmph(CPURISCVState * env,int csrno,target_ulong * val)1706 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1707 target_ulong *val)
1708 {
1709 if (env->virt_enabled) {
1710 *val = env->vstimecmp >> 32;
1711 } else {
1712 *val = env->stimecmp >> 32;
1713 }
1714
1715 return RISCV_EXCP_NONE;
1716 }
1717
write_stimecmp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1718 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1719 target_ulong val, uintptr_t ra)
1720 {
1721 if (env->virt_enabled) {
1722 if (env->hvictl & HVICTL_VTI) {
1723 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1724 }
1725 return write_vstimecmp(env, csrno, val, ra);
1726 }
1727
1728 if (riscv_cpu_mxl(env) == MXL_RV32) {
1729 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1730 } else {
1731 env->stimecmp = val;
1732 }
1733
1734 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1735
1736 return RISCV_EXCP_NONE;
1737 }
1738
write_stimecmph(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1739 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1740 target_ulong val, uintptr_t ra)
1741 {
1742 if (env->virt_enabled) {
1743 if (env->hvictl & HVICTL_VTI) {
1744 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1745 }
1746 return write_vstimecmph(env, csrno, val, ra);
1747 }
1748
1749 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1750 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1751
1752 return RISCV_EXCP_NONE;
1753 }
1754
1755 #define VSTOPI_NUM_SRCS 5
1756
1757 /*
1758 * All core local interrupts except the fixed ones 0:12. This macro is for
1759 * virtual interrupts logic so please don't change this to avoid messing up
1760 * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
1761 * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
1762 * VS level`.
1763 */
1764 #define LOCAL_INTERRUPTS (~0x1FFFULL)
1765
1766 static const uint64_t delegable_ints =
1767 S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
1768 static const uint64_t vs_delegable_ints =
1769 (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
1770 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1771 HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
1772 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1773 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1774 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1775 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1776 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1777 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1778 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1779 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1780 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1781 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1782 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1783 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1784 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1785 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1786 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1787 (1ULL << (RISCV_EXCP_SW_CHECK)) | \
1788 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1789 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1790 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1791 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1792 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1793 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1794 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1795 (1ULL << (RISCV_EXCP_M_ECALL)) |
1796 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1797 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1798 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1799 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1800 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1801 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1802 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1803
1804 /*
1805 * Spec allows for bits 13:63 to be either read-only or writable.
1806 * So far we have interrupt LCOFIP in that region which is writable.
1807 *
1808 * Also, spec allows to inject virtual interrupts in this region even
1809 * without any hardware interrupts for that interrupt number.
1810 *
1811 * For now interrupt in 13:63 region are all kept writable. 13 being
1812 * LCOFIP and 14:63 being virtual only. Change this in future if we
1813 * introduce more interrupts that are not writable.
1814 */
1815
1816 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1817 static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
1818 LOCAL_INTERRUPTS;
1819 static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP |
1820 LOCAL_INTERRUPTS;
1821
1822 static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
1823 static const uint64_t hip_writable_mask = MIP_VSSIP;
1824 static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
1825 MIP_VSEIP | LOCAL_INTERRUPTS;
1826 static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS;
1827
1828 static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
1829
1830 const bool valid_vm_1_10_32[16] = {
1831 [VM_1_10_MBARE] = true,
1832 [VM_1_10_SV32] = true
1833 };
1834
1835 const bool valid_vm_1_10_64[16] = {
1836 [VM_1_10_MBARE] = true,
1837 [VM_1_10_SV39] = true,
1838 [VM_1_10_SV48] = true,
1839 [VM_1_10_SV57] = true
1840 };
1841
1842 /* Machine Information Registers */
read_zero(CPURISCVState * env,int csrno,target_ulong * val)1843 static RISCVException read_zero(CPURISCVState *env, int csrno,
1844 target_ulong *val)
1845 {
1846 *val = 0;
1847 return RISCV_EXCP_NONE;
1848 }
1849
write_ignore(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1850 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1851 target_ulong val, uintptr_t ra)
1852 {
1853 return RISCV_EXCP_NONE;
1854 }
1855
read_mvendorid(CPURISCVState * env,int csrno,target_ulong * val)1856 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1857 target_ulong *val)
1858 {
1859 *val = riscv_cpu_cfg(env)->mvendorid;
1860 return RISCV_EXCP_NONE;
1861 }
1862
read_marchid(CPURISCVState * env,int csrno,target_ulong * val)1863 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1864 target_ulong *val)
1865 {
1866 *val = riscv_cpu_cfg(env)->marchid;
1867 return RISCV_EXCP_NONE;
1868 }
1869
read_mimpid(CPURISCVState * env,int csrno,target_ulong * val)1870 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1871 target_ulong *val)
1872 {
1873 *val = riscv_cpu_cfg(env)->mimpid;
1874 return RISCV_EXCP_NONE;
1875 }
1876
read_mhartid(CPURISCVState * env,int csrno,target_ulong * val)1877 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1878 target_ulong *val)
1879 {
1880 *val = env->mhartid;
1881 return RISCV_EXCP_NONE;
1882 }
1883
1884 /* Machine Trap Setup */
1885
1886 /* We do not store SD explicitly, only compute it on demand. */
add_status_sd(RISCVMXL xl,uint64_t status)1887 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1888 {
1889 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1890 (status & MSTATUS_VS) == MSTATUS_VS ||
1891 (status & MSTATUS_XS) == MSTATUS_XS) {
1892 switch (xl) {
1893 case MXL_RV32:
1894 return status | MSTATUS32_SD;
1895 case MXL_RV64:
1896 return status | MSTATUS64_SD;
1897 case MXL_RV128:
1898 return MSTATUSH128_SD;
1899 default:
1900 g_assert_not_reached();
1901 }
1902 }
1903 return status;
1904 }
1905
read_mstatus(CPURISCVState * env,int csrno,target_ulong * val)1906 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1907 target_ulong *val)
1908 {
1909 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1910 return RISCV_EXCP_NONE;
1911 }
1912
validate_vm(CPURISCVState * env,target_ulong vm)1913 static bool validate_vm(CPURISCVState *env, target_ulong vm)
1914 {
1915 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32;
1916 RISCVCPU *cpu = env_archcpu(env);
1917 int satp_mode_supported_max = cpu->cfg.max_satp_mode;
1918 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
1919
1920 assert(satp_mode_supported_max >= 0);
1921 return vm <= satp_mode_supported_max && valid_vm[vm];
1922 }
1923
legalize_xatp(CPURISCVState * env,target_ulong old_xatp,target_ulong val)1924 static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
1925 target_ulong val)
1926 {
1927 target_ulong mask;
1928 bool vm;
1929 if (riscv_cpu_mxl(env) == MXL_RV32) {
1930 vm = validate_vm(env, get_field(val, SATP32_MODE));
1931 mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1932 } else {
1933 vm = validate_vm(env, get_field(val, SATP64_MODE));
1934 mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1935 }
1936
1937 if (vm && mask) {
1938 /*
1939 * The ISA defines SATP.MODE=Bare as "no translation", but we still
1940 * pass these through QEMU's TLB emulation as it improves
1941 * performance. Flushing the TLB on SATP writes with paging
1942 * enabled avoids leaking those invalid cached mappings.
1943 */
1944 tlb_flush(env_cpu(env));
1945 return val;
1946 }
1947 return old_xatp;
1948 }
1949
legalize_mpp(CPURISCVState * env,target_ulong old_mpp,target_ulong val)1950 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
1951 target_ulong val)
1952 {
1953 bool valid = false;
1954 target_ulong new_mpp = get_field(val, MSTATUS_MPP);
1955
1956 switch (new_mpp) {
1957 case PRV_M:
1958 valid = true;
1959 break;
1960 case PRV_S:
1961 valid = riscv_has_ext(env, RVS);
1962 break;
1963 case PRV_U:
1964 valid = riscv_has_ext(env, RVU);
1965 break;
1966 }
1967
1968 /* Remain field unchanged if new_mpp value is invalid */
1969 if (!valid) {
1970 val = set_field(val, MSTATUS_MPP, old_mpp);
1971 }
1972
1973 return val;
1974 }
1975
write_mstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1976 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1977 target_ulong val, uintptr_t ra)
1978 {
1979 uint64_t mstatus = env->mstatus;
1980 uint64_t mask = 0;
1981 RISCVMXL xl = riscv_cpu_mxl(env);
1982
1983 /*
1984 * MPP field have been made WARL since priv version 1.11. However,
1985 * legalization for it will not break any software running on 1.10.
1986 */
1987 val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val);
1988
1989 /* flush tlb on mstatus fields that affect VM */
1990 if ((val ^ mstatus) & MSTATUS_MXR) {
1991 tlb_flush(env_cpu(env));
1992 }
1993 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1994 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1995 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1996 MSTATUS_TW;
1997
1998 if (riscv_has_ext(env, RVF)) {
1999 mask |= MSTATUS_FS;
2000 }
2001 if (riscv_has_ext(env, RVV)) {
2002 mask |= MSTATUS_VS;
2003 }
2004
2005 if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) {
2006 mask |= MSTATUS_SDT;
2007 if ((val & MSTATUS_SDT) != 0) {
2008 val &= ~MSTATUS_SIE;
2009 }
2010 }
2011
2012 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
2013 mask |= MSTATUS_MDT;
2014 if ((val & MSTATUS_MDT) != 0) {
2015 val &= ~MSTATUS_MIE;
2016 }
2017 }
2018
2019 if (xl != MXL_RV32 || env->debugger) {
2020 if (riscv_has_ext(env, RVH)) {
2021 mask |= MSTATUS_MPV | MSTATUS_GVA;
2022 }
2023 if ((val & MSTATUS64_UXL) != 0) {
2024 mask |= MSTATUS64_UXL;
2025 }
2026 }
2027
2028 /* If cfi lp extension is available, then apply cfi lp mask */
2029 if (env_archcpu(env)->cfg.ext_zicfilp) {
2030 mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
2031 }
2032
2033 mstatus = (mstatus & ~mask) | (val & mask);
2034
2035 env->mstatus = mstatus;
2036
2037 /*
2038 * Except in debug mode, UXL/SXL can only be modified by higher
2039 * privilege mode. So xl will not be changed in normal mode.
2040 */
2041 if (env->debugger) {
2042 env->xl = cpu_recompute_xl(env);
2043 }
2044
2045 return RISCV_EXCP_NONE;
2046 }
2047
read_mstatush(CPURISCVState * env,int csrno,target_ulong * val)2048 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
2049 target_ulong *val)
2050 {
2051 *val = env->mstatus >> 32;
2052 return RISCV_EXCP_NONE;
2053 }
2054
write_mstatush(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2055 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
2056 target_ulong val, uintptr_t ra)
2057 {
2058 uint64_t valh = (uint64_t)val << 32;
2059 uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
2060
2061 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
2062 mask |= MSTATUS_MDT;
2063 if ((valh & MSTATUS_MDT) != 0) {
2064 mask |= MSTATUS_MIE;
2065 }
2066 }
2067 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
2068
2069 return RISCV_EXCP_NONE;
2070 }
2071
read_mstatus_i128(CPURISCVState * env,int csrno,Int128 * val)2072 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
2073 Int128 *val)
2074 {
2075 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128,
2076 env->mstatus));
2077 return RISCV_EXCP_NONE;
2078 }
2079
read_misa_i128(CPURISCVState * env,int csrno,Int128 * val)2080 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
2081 Int128 *val)
2082 {
2083 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
2084 return RISCV_EXCP_NONE;
2085 }
2086
read_misa(CPURISCVState * env,int csrno,target_ulong * val)2087 static RISCVException read_misa(CPURISCVState *env, int csrno,
2088 target_ulong *val)
2089 {
2090 target_ulong misa;
2091
2092 switch (env->misa_mxl) {
2093 case MXL_RV32:
2094 misa = (target_ulong)MXL_RV32 << 30;
2095 break;
2096 #ifdef TARGET_RISCV64
2097 case MXL_RV64:
2098 misa = (target_ulong)MXL_RV64 << 62;
2099 break;
2100 #endif
2101 default:
2102 g_assert_not_reached();
2103 }
2104
2105 *val = misa | env->misa_ext;
2106 return RISCV_EXCP_NONE;
2107 }
2108
get_next_pc(CPURISCVState * env,uintptr_t ra)2109 static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra)
2110 {
2111 uint64_t data[INSN_START_WORDS];
2112
2113 /* Outside of a running cpu, env contains the next pc. */
2114 if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) {
2115 return env->pc;
2116 }
2117
2118 /* Within unwind data, [0] is pc and [1] is the opcode. */
2119 return data[0] + insn_len(data[1]);
2120 }
2121
write_misa(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2122 static RISCVException write_misa(CPURISCVState *env, int csrno,
2123 target_ulong val, uintptr_t ra)
2124 {
2125 RISCVCPU *cpu = env_archcpu(env);
2126 uint32_t orig_misa_ext = env->misa_ext;
2127 Error *local_err = NULL;
2128
2129 if (!riscv_cpu_cfg(env)->misa_w) {
2130 /* drop write to misa */
2131 return RISCV_EXCP_NONE;
2132 }
2133
2134 /* Mask extensions that are not supported by this hart */
2135 val &= env->misa_ext_mask;
2136
2137 /* Suppress 'C' if next instruction is not aligned. */
2138 if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) {
2139 val &= ~RVC;
2140 }
2141
2142 /* Disable RVG if any of its dependencies are disabled */
2143 if (!(val & RVI && val & RVM && val & RVA &&
2144 val & RVF && val & RVD)) {
2145 val &= ~RVG;
2146 }
2147
2148 /* If nothing changed, do nothing. */
2149 if (val == env->misa_ext) {
2150 return RISCV_EXCP_NONE;
2151 }
2152
2153 env->misa_ext = val;
2154 riscv_cpu_validate_set_extensions(cpu, &local_err);
2155 if (local_err != NULL) {
2156 /* Rollback on validation error */
2157 qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
2158 "0x%x, keeping existing MISA ext 0x%x\n",
2159 env->misa_ext, orig_misa_ext);
2160
2161 env->misa_ext = orig_misa_ext;
2162
2163 return RISCV_EXCP_NONE;
2164 }
2165
2166 if (!(env->misa_ext & RVF)) {
2167 env->mstatus &= ~MSTATUS_FS;
2168 }
2169
2170 /* flush translation cache */
2171 tb_flush(env_cpu(env));
2172 env->xl = riscv_cpu_mxl(env);
2173 return RISCV_EXCP_NONE;
2174 }
2175
read_medeleg(CPURISCVState * env,int csrno,target_ulong * val)2176 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
2177 target_ulong *val)
2178 {
2179 *val = env->medeleg;
2180 return RISCV_EXCP_NONE;
2181 }
2182
write_medeleg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2183 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
2184 target_ulong val, uintptr_t ra)
2185 {
2186 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
2187 return RISCV_EXCP_NONE;
2188 }
2189
rmw_mideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2190 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
2191 uint64_t *ret_val,
2192 uint64_t new_val, uint64_t wr_mask)
2193 {
2194 uint64_t mask = wr_mask & delegable_ints;
2195
2196 if (ret_val) {
2197 *ret_val = env->mideleg;
2198 }
2199
2200 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
2201
2202 if (riscv_has_ext(env, RVH)) {
2203 env->mideleg |= HS_MODE_INTERRUPTS;
2204 }
2205
2206 return RISCV_EXCP_NONE;
2207 }
2208
rmw_mideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2209 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
2210 target_ulong *ret_val,
2211 target_ulong new_val, target_ulong wr_mask)
2212 {
2213 uint64_t rval;
2214 RISCVException ret;
2215
2216 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
2217 if (ret_val) {
2218 *ret_val = rval;
2219 }
2220
2221 return ret;
2222 }
2223
rmw_midelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2224 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
2225 target_ulong *ret_val,
2226 target_ulong new_val,
2227 target_ulong wr_mask)
2228 {
2229 uint64_t rval;
2230 RISCVException ret;
2231
2232 ret = rmw_mideleg64(env, csrno, &rval,
2233 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2234 if (ret_val) {
2235 *ret_val = rval >> 32;
2236 }
2237
2238 return ret;
2239 }
2240
rmw_mie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2241 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
2242 uint64_t *ret_val,
2243 uint64_t new_val, uint64_t wr_mask)
2244 {
2245 uint64_t mask = wr_mask & all_ints;
2246
2247 if (ret_val) {
2248 *ret_val = env->mie;
2249 }
2250
2251 env->mie = (env->mie & ~mask) | (new_val & mask);
2252
2253 if (!riscv_has_ext(env, RVH)) {
2254 env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
2255 }
2256
2257 return RISCV_EXCP_NONE;
2258 }
2259
rmw_mie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2260 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
2261 target_ulong *ret_val,
2262 target_ulong new_val, target_ulong wr_mask)
2263 {
2264 uint64_t rval;
2265 RISCVException ret;
2266
2267 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
2268 if (ret_val) {
2269 *ret_val = rval;
2270 }
2271
2272 return ret;
2273 }
2274
rmw_mieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2275 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
2276 target_ulong *ret_val,
2277 target_ulong new_val, target_ulong wr_mask)
2278 {
2279 uint64_t rval;
2280 RISCVException ret;
2281
2282 ret = rmw_mie64(env, csrno, &rval,
2283 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2284 if (ret_val) {
2285 *ret_val = rval >> 32;
2286 }
2287
2288 return ret;
2289 }
2290
rmw_mvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2291 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
2292 uint64_t *ret_val,
2293 uint64_t new_val, uint64_t wr_mask)
2294 {
2295 uint64_t mask = wr_mask & mvien_writable_mask;
2296
2297 if (ret_val) {
2298 *ret_val = env->mvien;
2299 }
2300
2301 env->mvien = (env->mvien & ~mask) | (new_val & mask);
2302
2303 return RISCV_EXCP_NONE;
2304 }
2305
rmw_mvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2306 static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
2307 target_ulong *ret_val,
2308 target_ulong new_val, target_ulong wr_mask)
2309 {
2310 uint64_t rval;
2311 RISCVException ret;
2312
2313 ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
2314 if (ret_val) {
2315 *ret_val = rval;
2316 }
2317
2318 return ret;
2319 }
2320
rmw_mvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2321 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
2322 target_ulong *ret_val,
2323 target_ulong new_val, target_ulong wr_mask)
2324 {
2325 uint64_t rval;
2326 RISCVException ret;
2327
2328 ret = rmw_mvien64(env, csrno, &rval,
2329 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2330 if (ret_val) {
2331 *ret_val = rval >> 32;
2332 }
2333
2334 return ret;
2335 }
2336
read_mtopi(CPURISCVState * env,int csrno,target_ulong * val)2337 static RISCVException read_mtopi(CPURISCVState *env, int csrno,
2338 target_ulong *val)
2339 {
2340 int irq;
2341 uint8_t iprio;
2342
2343 irq = riscv_cpu_mirq_pending(env);
2344 if (irq <= 0 || irq > 63) {
2345 *val = 0;
2346 } else {
2347 iprio = env->miprio[irq];
2348 if (!iprio) {
2349 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
2350 iprio = IPRIO_MMAXIPRIO;
2351 }
2352 }
2353 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2354 *val |= iprio;
2355 }
2356
2357 return RISCV_EXCP_NONE;
2358 }
2359
aia_xlate_vs_csrno(CPURISCVState * env,int csrno)2360 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
2361 {
2362 if (!env->virt_enabled) {
2363 return csrno;
2364 }
2365
2366 switch (csrno) {
2367 case CSR_SISELECT:
2368 return CSR_VSISELECT;
2369 case CSR_SIREG:
2370 return CSR_VSIREG;
2371 case CSR_STOPEI:
2372 return CSR_VSTOPEI;
2373 default:
2374 return csrno;
2375 };
2376 }
2377
csrind_xlate_vs_csrno(CPURISCVState * env,int csrno)2378 static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
2379 {
2380 if (!env->virt_enabled) {
2381 return csrno;
2382 }
2383
2384 switch (csrno) {
2385 case CSR_SISELECT:
2386 return CSR_VSISELECT;
2387 case CSR_SIREG:
2388 case CSR_SIREG2:
2389 case CSR_SIREG3:
2390 case CSR_SIREG4:
2391 case CSR_SIREG5:
2392 case CSR_SIREG6:
2393 return CSR_VSIREG + (csrno - CSR_SIREG);
2394 default:
2395 return csrno;
2396 };
2397 }
2398
rmw_xiselect(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2399 static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
2400 target_ulong *val, target_ulong new_val,
2401 target_ulong wr_mask)
2402 {
2403 target_ulong *iselect;
2404 int ret;
2405
2406 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2407 if (ret != RISCV_EXCP_NONE) {
2408 return ret;
2409 }
2410
2411 /* Translate CSR number for VS-mode */
2412 csrno = csrind_xlate_vs_csrno(env, csrno);
2413
2414 /* Find the iselect CSR based on CSR number */
2415 switch (csrno) {
2416 case CSR_MISELECT:
2417 iselect = &env->miselect;
2418 break;
2419 case CSR_SISELECT:
2420 iselect = &env->siselect;
2421 break;
2422 case CSR_VSISELECT:
2423 iselect = &env->vsiselect;
2424 break;
2425 default:
2426 return RISCV_EXCP_ILLEGAL_INST;
2427 };
2428
2429 if (val) {
2430 *val = *iselect;
2431 }
2432
2433 if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) {
2434 wr_mask &= ISELECT_MASK_SXCSRIND;
2435 } else {
2436 wr_mask &= ISELECT_MASK_AIA;
2437 }
2438
2439 if (wr_mask) {
2440 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
2441 }
2442
2443 return RISCV_EXCP_NONE;
2444 }
2445
xiselect_aia_range(target_ulong isel)2446 static bool xiselect_aia_range(target_ulong isel)
2447 {
2448 return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) ||
2449 (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
2450 }
2451
xiselect_cd_range(target_ulong isel)2452 static bool xiselect_cd_range(target_ulong isel)
2453 {
2454 return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
2455 }
2456
xiselect_ctr_range(int csrno,target_ulong isel)2457 static bool xiselect_ctr_range(int csrno, target_ulong isel)
2458 {
2459 /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
2460 return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
2461 csrno < CSR_MIREG;
2462 }
2463
rmw_iprio(target_ulong xlen,target_ulong iselect,uint8_t * iprio,target_ulong * val,target_ulong new_val,target_ulong wr_mask,int ext_irq_no)2464 static int rmw_iprio(target_ulong xlen,
2465 target_ulong iselect, uint8_t *iprio,
2466 target_ulong *val, target_ulong new_val,
2467 target_ulong wr_mask, int ext_irq_no)
2468 {
2469 int i, firq, nirqs;
2470 target_ulong old_val;
2471
2472 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
2473 return -EINVAL;
2474 }
2475 if (xlen != 32 && iselect & 0x1) {
2476 return -EINVAL;
2477 }
2478
2479 nirqs = 4 * (xlen / 32);
2480 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
2481
2482 old_val = 0;
2483 for (i = 0; i < nirqs; i++) {
2484 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
2485 }
2486
2487 if (val) {
2488 *val = old_val;
2489 }
2490
2491 if (wr_mask) {
2492 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
2493 for (i = 0; i < nirqs; i++) {
2494 /*
2495 * M-level and S-level external IRQ priority always read-only
2496 * zero. This means default priority order is always preferred
2497 * for M-level and S-level external IRQs.
2498 */
2499 if ((firq + i) == ext_irq_no) {
2500 continue;
2501 }
2502 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
2503 }
2504 }
2505
2506 return 0;
2507 }
2508
rmw_ctrsource(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2509 static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
2510 target_ulong new_val, target_ulong wr_mask)
2511 {
2512 /*
2513 * CTR arrays are treated as circular buffers and TOS always points to next
2514 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2515 * 0 is always the latest one, traversal is a bit different here. See the
2516 * below example.
2517 *
2518 * Depth = 16.
2519 *
2520 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2521 * TOS H
2522 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2523 */
2524 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2525 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2526 uint64_t idx;
2527
2528 /* Entry greater than depth-1 is read-only zero */
2529 if (entry >= depth) {
2530 if (val) {
2531 *val = 0;
2532 }
2533 return 0;
2534 }
2535
2536 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2537 idx = (idx - entry - 1) & (depth - 1);
2538
2539 if (val) {
2540 *val = env->ctr_src[idx];
2541 }
2542
2543 env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
2544
2545 return 0;
2546 }
2547
rmw_ctrtarget(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2548 static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
2549 target_ulong new_val, target_ulong wr_mask)
2550 {
2551 /*
2552 * CTR arrays are treated as circular buffers and TOS always points to next
2553 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2554 * 0 is always the latest one, traversal is a bit different here. See the
2555 * below example.
2556 *
2557 * Depth = 16.
2558 *
2559 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2560 * head H
2561 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2562 */
2563 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2564 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2565 uint64_t idx;
2566
2567 /* Entry greater than depth-1 is read-only zero */
2568 if (entry >= depth) {
2569 if (val) {
2570 *val = 0;
2571 }
2572 return 0;
2573 }
2574
2575 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2576 idx = (idx - entry - 1) & (depth - 1);
2577
2578 if (val) {
2579 *val = env->ctr_dst[idx];
2580 }
2581
2582 env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
2583
2584 return 0;
2585 }
2586
rmw_ctrdata(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2587 static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
2588 target_ulong new_val, target_ulong wr_mask)
2589 {
2590 /*
2591 * CTR arrays are treated as circular buffers and TOS always points to next
2592 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2593 * 0 is always the latest one, traversal is a bit different here. See the
2594 * below example.
2595 *
2596 * Depth = 16.
2597 *
2598 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2599 * head H
2600 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2601 */
2602 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2603 const uint64_t mask = wr_mask & CTRDATA_MASK;
2604 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2605 uint64_t idx;
2606
2607 /* Entry greater than depth-1 is read-only zero */
2608 if (entry >= depth) {
2609 if (val) {
2610 *val = 0;
2611 }
2612 return 0;
2613 }
2614
2615 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2616 idx = (idx - entry - 1) & (depth - 1);
2617
2618 if (val) {
2619 *val = env->ctr_data[idx];
2620 }
2621
2622 env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
2623
2624 return 0;
2625 }
2626
rmw_xireg_aia(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2627 static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
2628 target_ulong isel, target_ulong *val,
2629 target_ulong new_val, target_ulong wr_mask)
2630 {
2631 bool virt = false, isel_reserved = false;
2632 int ret = -EINVAL;
2633 uint8_t *iprio;
2634 target_ulong priv, vgein;
2635
2636 /* VS-mode CSR number passed in has already been translated */
2637 switch (csrno) {
2638 case CSR_MIREG:
2639 if (!riscv_cpu_cfg(env)->ext_smaia) {
2640 goto done;
2641 }
2642 iprio = env->miprio;
2643 priv = PRV_M;
2644 break;
2645 case CSR_SIREG:
2646 if (!riscv_cpu_cfg(env)->ext_ssaia ||
2647 (env->priv == PRV_S && env->mvien & MIP_SEIP &&
2648 env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
2649 env->siselect <= ISELECT_IMSIC_EIE63)) {
2650 goto done;
2651 }
2652 iprio = env->siprio;
2653 priv = PRV_S;
2654 break;
2655 case CSR_VSIREG:
2656 if (!riscv_cpu_cfg(env)->ext_ssaia) {
2657 goto done;
2658 }
2659 iprio = env->hviprio;
2660 priv = PRV_S;
2661 virt = true;
2662 break;
2663 default:
2664 goto done;
2665 };
2666
2667 /* Find the selected guest interrupt file */
2668 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2669
2670 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
2671 /* Local interrupt priority registers not available for VS-mode */
2672 if (!virt) {
2673 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
2674 isel, iprio, val, new_val, wr_mask,
2675 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
2676 }
2677 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
2678 /* IMSIC registers only available when machine implements it. */
2679 if (env->aia_ireg_rmw_fn[priv]) {
2680 /* Selected guest interrupt file should not be zero */
2681 if (virt && (!vgein || env->geilen < vgein)) {
2682 goto done;
2683 }
2684 /* Call machine specific IMSIC register emulation */
2685 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2686 AIA_MAKE_IREG(isel, priv, virt, vgein,
2687 riscv_cpu_mxl_bits(env)),
2688 val, new_val, wr_mask);
2689 }
2690 } else {
2691 isel_reserved = true;
2692 }
2693
2694 done:
2695 /*
2696 * If AIA is not enabled, illegal instruction exception is always
2697 * returned regardless of whether we are in VS-mode or not
2698 */
2699 if (ret) {
2700 return (env->virt_enabled && virt && !isel_reserved) ?
2701 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2702 }
2703
2704 return RISCV_EXCP_NONE;
2705 }
2706
rmw_xireg_cd(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2707 static int rmw_xireg_cd(CPURISCVState *env, int csrno,
2708 target_ulong isel, target_ulong *val,
2709 target_ulong new_val, target_ulong wr_mask)
2710 {
2711 int ret = -EINVAL;
2712 int ctr_index = isel - ISELECT_CD_FIRST;
2713 int isel_hpm_start = ISELECT_CD_FIRST + 3;
2714
2715 if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
2716 ret = RISCV_EXCP_ILLEGAL_INST;
2717 goto done;
2718 }
2719
2720 /* Invalid siselect value for reserved */
2721 if (ctr_index == 1) {
2722 goto done;
2723 }
2724
2725 /* sireg4 and sireg5 provides access RV32 only CSRs */
2726 if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
2727 (riscv_cpu_mxl(env) != MXL_RV32)) {
2728 ret = RISCV_EXCP_ILLEGAL_INST;
2729 goto done;
2730 }
2731
2732 /* Check Sscofpmf dependancy */
2733 if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
2734 (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
2735 goto done;
2736 }
2737
2738 /* Check smcntrpmf dependancy */
2739 if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
2740 (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
2741 (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
2742 goto done;
2743 }
2744
2745 if (!get_field(env->mcounteren, BIT(ctr_index)) ||
2746 !get_field(env->menvcfg, MENVCFG_CDE)) {
2747 goto done;
2748 }
2749
2750 switch (csrno) {
2751 case CSR_SIREG:
2752 ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
2753 break;
2754 case CSR_SIREG4:
2755 ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
2756 break;
2757 case CSR_SIREG2:
2758 if (ctr_index <= 2) {
2759 ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
2760 } else {
2761 ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
2762 }
2763 break;
2764 case CSR_SIREG5:
2765 if (ctr_index <= 2) {
2766 ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
2767 } else {
2768 ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
2769 }
2770 break;
2771 default:
2772 goto done;
2773 }
2774
2775 done:
2776 return ret;
2777 }
2778
rmw_xireg_ctr(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2779 static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
2780 target_ulong isel, target_ulong *val,
2781 target_ulong new_val, target_ulong wr_mask)
2782 {
2783 if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
2784 return -EINVAL;
2785 }
2786
2787 if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
2788 return rmw_ctrsource(env, isel, val, new_val, wr_mask);
2789 } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
2790 return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
2791 } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
2792 return rmw_ctrdata(env, isel, val, new_val, wr_mask);
2793 } else if (val) {
2794 *val = 0;
2795 }
2796
2797 return 0;
2798 }
2799
2800 /*
2801 * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
2802 *
2803 * Perform indirect access to xireg and xireg2-xireg6.
2804 * This is a generic interface for all xireg CSRs. Apart from AIA, all other
2805 * extension using csrind should be implemented here.
2806 */
rmw_xireg_csrind(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2807 static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
2808 target_ulong isel, target_ulong *val,
2809 target_ulong new_val, target_ulong wr_mask)
2810 {
2811 bool virt = csrno == CSR_VSIREG ? true : false;
2812 int ret = -EINVAL;
2813
2814 if (xiselect_cd_range(isel)) {
2815 ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
2816 } else if (xiselect_ctr_range(csrno, isel)) {
2817 ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
2818 } else {
2819 /*
2820 * As per the specification, access to unimplented region is undefined
2821 * but recommendation is to raise illegal instruction exception.
2822 */
2823 return RISCV_EXCP_ILLEGAL_INST;
2824 }
2825
2826 if (ret) {
2827 return (env->virt_enabled && virt) ?
2828 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2829 }
2830
2831 return RISCV_EXCP_NONE;
2832 }
2833
rmw_xiregi(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2834 static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
2835 target_ulong new_val, target_ulong wr_mask)
2836 {
2837 int ret = -EINVAL;
2838 target_ulong isel;
2839
2840 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2841 if (ret != RISCV_EXCP_NONE) {
2842 return ret;
2843 }
2844
2845 /* Translate CSR number for VS-mode */
2846 csrno = csrind_xlate_vs_csrno(env, csrno);
2847
2848 if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 &&
2849 csrno != CSR_MIREG4 - 1) {
2850 isel = env->miselect;
2851 } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 &&
2852 csrno != CSR_SIREG4 - 1) {
2853 isel = env->siselect;
2854 } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
2855 csrno != CSR_VSIREG4 - 1) {
2856 isel = env->vsiselect;
2857 } else {
2858 return RISCV_EXCP_ILLEGAL_INST;
2859 }
2860
2861 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
2862 }
2863
rmw_xireg(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2864 static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
2865 target_ulong *val, target_ulong new_val,
2866 target_ulong wr_mask)
2867 {
2868 int ret = -EINVAL;
2869 target_ulong isel;
2870
2871 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2872 if (ret != RISCV_EXCP_NONE) {
2873 return ret;
2874 }
2875
2876 /* Translate CSR number for VS-mode */
2877 csrno = csrind_xlate_vs_csrno(env, csrno);
2878
2879 /* Decode register details from CSR number */
2880 switch (csrno) {
2881 case CSR_MIREG:
2882 isel = env->miselect;
2883 break;
2884 case CSR_SIREG:
2885 isel = env->siselect;
2886 break;
2887 case CSR_VSIREG:
2888 isel = env->vsiselect;
2889 break;
2890 default:
2891 goto done;
2892 };
2893
2894 /*
2895 * Use the xiselect range to determine actual op on xireg.
2896 *
2897 * Since we only checked the existence of AIA or Indirect Access in the
2898 * predicate, we should check the existence of the exact extension when
2899 * we get to a specific range and return illegal instruction exception even
2900 * in VS-mode.
2901 */
2902 if (xiselect_aia_range(isel)) {
2903 return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
2904 } else if (riscv_cpu_cfg(env)->ext_smcsrind ||
2905 riscv_cpu_cfg(env)->ext_sscsrind) {
2906 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
2907 }
2908
2909 done:
2910 return RISCV_EXCP_ILLEGAL_INST;
2911 }
2912
rmw_xtopei(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2913 static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
2914 target_ulong *val, target_ulong new_val,
2915 target_ulong wr_mask)
2916 {
2917 bool virt;
2918 int ret = -EINVAL;
2919 target_ulong priv, vgein;
2920
2921 /* Translate CSR number for VS-mode */
2922 csrno = aia_xlate_vs_csrno(env, csrno);
2923
2924 /* Decode register details from CSR number */
2925 virt = false;
2926 switch (csrno) {
2927 case CSR_MTOPEI:
2928 priv = PRV_M;
2929 break;
2930 case CSR_STOPEI:
2931 if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
2932 goto done;
2933 }
2934 priv = PRV_S;
2935 break;
2936 case CSR_VSTOPEI:
2937 priv = PRV_S;
2938 virt = true;
2939 break;
2940 default:
2941 goto done;
2942 };
2943
2944 /* IMSIC CSRs only available when machine implements IMSIC. */
2945 if (!env->aia_ireg_rmw_fn[priv]) {
2946 goto done;
2947 }
2948
2949 /* Find the selected guest interrupt file */
2950 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2951
2952 /* Selected guest interrupt file should be valid */
2953 if (virt && (!vgein || env->geilen < vgein)) {
2954 goto done;
2955 }
2956
2957 /* Call machine specific IMSIC register emulation for TOPEI */
2958 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2959 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
2960 riscv_cpu_mxl_bits(env)),
2961 val, new_val, wr_mask);
2962
2963 done:
2964 if (ret) {
2965 return (env->virt_enabled && virt) ?
2966 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2967 }
2968 return RISCV_EXCP_NONE;
2969 }
2970
read_mtvec(CPURISCVState * env,int csrno,target_ulong * val)2971 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
2972 target_ulong *val)
2973 {
2974 *val = env->mtvec;
2975 return RISCV_EXCP_NONE;
2976 }
2977
write_mtvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2978 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
2979 target_ulong val, uintptr_t ra)
2980 {
2981 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2982 if ((val & 3) < 2) {
2983 env->mtvec = val;
2984 } else {
2985 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
2986 }
2987 return RISCV_EXCP_NONE;
2988 }
2989
read_mcountinhibit(CPURISCVState * env,int csrno,target_ulong * val)2990 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
2991 target_ulong *val)
2992 {
2993 *val = env->mcountinhibit;
2994 return RISCV_EXCP_NONE;
2995 }
2996
write_mcountinhibit(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2997 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
2998 target_ulong val, uintptr_t ra)
2999 {
3000 int cidx;
3001 PMUCTRState *counter;
3002 RISCVCPU *cpu = env_archcpu(env);
3003 uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
3004 target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
3005 uint64_t mhpmctr_val, prev_count, curr_count;
3006
3007 /* WARL register - disable unavailable counters; TM bit is always 0 */
3008 env->mcountinhibit = val & present_ctrs;
3009
3010 /* Check if any other counter is also monitoring cycles/instructions */
3011 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
3012 if (!(updated_ctrs & BIT(cidx)) ||
3013 (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
3014 !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
3015 continue;
3016 }
3017
3018 counter = &env->pmu_ctrs[cidx];
3019
3020 if (!get_field(env->mcountinhibit, BIT(cidx))) {
3021 counter->mhpmcounter_prev =
3022 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
3023 if (riscv_cpu_mxl(env) == MXL_RV32) {
3024 counter->mhpmcounterh_prev =
3025 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
3026 }
3027
3028 if (cidx > 2) {
3029 mhpmctr_val = counter->mhpmcounter_val;
3030 if (riscv_cpu_mxl(env) == MXL_RV32) {
3031 mhpmctr_val = mhpmctr_val |
3032 ((uint64_t)counter->mhpmcounterh_val << 32);
3033 }
3034 riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
3035 }
3036 } else {
3037 curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
3038
3039 mhpmctr_val = counter->mhpmcounter_val;
3040 prev_count = counter->mhpmcounter_prev;
3041 if (riscv_cpu_mxl(env) == MXL_RV32) {
3042 uint64_t tmp =
3043 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
3044
3045 curr_count = curr_count | (tmp << 32);
3046 mhpmctr_val = mhpmctr_val |
3047 ((uint64_t)counter->mhpmcounterh_val << 32);
3048 prev_count = prev_count |
3049 ((uint64_t)counter->mhpmcounterh_prev << 32);
3050 }
3051
3052 /* Adjust the counter for later reads. */
3053 mhpmctr_val = curr_count - prev_count + mhpmctr_val;
3054 counter->mhpmcounter_val = mhpmctr_val;
3055 if (riscv_cpu_mxl(env) == MXL_RV32) {
3056 counter->mhpmcounterh_val = mhpmctr_val >> 32;
3057 }
3058 }
3059 }
3060
3061 return RISCV_EXCP_NONE;
3062 }
3063
read_scountinhibit(CPURISCVState * env,int csrno,target_ulong * val)3064 static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
3065 target_ulong *val)
3066 {
3067 /* S-mode can only access the bits delegated by M-mode */
3068 *val = env->mcountinhibit & env->mcounteren;
3069 return RISCV_EXCP_NONE;
3070 }
3071
write_scountinhibit(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3072 static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
3073 target_ulong val, uintptr_t ra)
3074 {
3075 return write_mcountinhibit(env, csrno, val & env->mcounteren, ra);
3076 }
3077
read_mcounteren(CPURISCVState * env,int csrno,target_ulong * val)3078 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
3079 target_ulong *val)
3080 {
3081 *val = env->mcounteren;
3082 return RISCV_EXCP_NONE;
3083 }
3084
write_mcounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3085 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
3086 target_ulong val, uintptr_t ra)
3087 {
3088 RISCVCPU *cpu = env_archcpu(env);
3089
3090 /* WARL register - disable unavailable counters */
3091 env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3092 COUNTEREN_IR);
3093 return RISCV_EXCP_NONE;
3094 }
3095
3096 /* Machine Trap Handling */
read_mscratch_i128(CPURISCVState * env,int csrno,Int128 * val)3097 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
3098 Int128 *val)
3099 {
3100 *val = int128_make128(env->mscratch, env->mscratchh);
3101 return RISCV_EXCP_NONE;
3102 }
3103
write_mscratch_i128(CPURISCVState * env,int csrno,Int128 val)3104 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
3105 Int128 val)
3106 {
3107 env->mscratch = int128_getlo(val);
3108 env->mscratchh = int128_gethi(val);
3109 return RISCV_EXCP_NONE;
3110 }
3111
read_mscratch(CPURISCVState * env,int csrno,target_ulong * val)3112 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
3113 target_ulong *val)
3114 {
3115 *val = env->mscratch;
3116 return RISCV_EXCP_NONE;
3117 }
3118
write_mscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3119 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
3120 target_ulong val, uintptr_t ra)
3121 {
3122 env->mscratch = val;
3123 return RISCV_EXCP_NONE;
3124 }
3125
read_mepc(CPURISCVState * env,int csrno,target_ulong * val)3126 static RISCVException read_mepc(CPURISCVState *env, int csrno,
3127 target_ulong *val)
3128 {
3129 *val = env->mepc;
3130 return RISCV_EXCP_NONE;
3131 }
3132
write_mepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3133 static RISCVException write_mepc(CPURISCVState *env, int csrno,
3134 target_ulong val, uintptr_t ra)
3135 {
3136 env->mepc = val;
3137 return RISCV_EXCP_NONE;
3138 }
3139
read_mcause(CPURISCVState * env,int csrno,target_ulong * val)3140 static RISCVException read_mcause(CPURISCVState *env, int csrno,
3141 target_ulong *val)
3142 {
3143 *val = env->mcause;
3144 return RISCV_EXCP_NONE;
3145 }
3146
write_mcause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3147 static RISCVException write_mcause(CPURISCVState *env, int csrno,
3148 target_ulong val, uintptr_t ra)
3149 {
3150 env->mcause = val;
3151 return RISCV_EXCP_NONE;
3152 }
3153
read_mtval(CPURISCVState * env,int csrno,target_ulong * val)3154 static RISCVException read_mtval(CPURISCVState *env, int csrno,
3155 target_ulong *val)
3156 {
3157 *val = env->mtval;
3158 return RISCV_EXCP_NONE;
3159 }
3160
write_mtval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3161 static RISCVException write_mtval(CPURISCVState *env, int csrno,
3162 target_ulong val, uintptr_t ra)
3163 {
3164 env->mtval = val;
3165 return RISCV_EXCP_NONE;
3166 }
3167
3168 /* Execution environment configuration setup */
read_menvcfg(CPURISCVState * env,int csrno,target_ulong * val)3169 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
3170 target_ulong *val)
3171 {
3172 *val = env->menvcfg;
3173 return RISCV_EXCP_NONE;
3174 }
3175
3176 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
3177 target_ulong val, uintptr_t ra);
write_menvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3178 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
3179 target_ulong val, uintptr_t ra)
3180 {
3181 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3182 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
3183 MENVCFG_CBZE | MENVCFG_CDE;
3184
3185 if (riscv_cpu_mxl(env) == MXL_RV64) {
3186 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
3187 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
3188 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
3189 (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
3190 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
3191
3192 if (env_archcpu(env)->cfg.ext_zicfilp) {
3193 mask |= MENVCFG_LPE;
3194 }
3195
3196 if (env_archcpu(env)->cfg.ext_zicfiss) {
3197 mask |= MENVCFG_SSE;
3198 }
3199
3200 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3201 if (env_archcpu(env)->cfg.ext_smnpm &&
3202 get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
3203 mask |= MENVCFG_PMM;
3204 }
3205
3206 if ((val & MENVCFG_DTE) == 0) {
3207 env->mstatus &= ~MSTATUS_SDT;
3208 }
3209 }
3210 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
3211 return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra);
3212 }
3213
read_menvcfgh(CPURISCVState * env,int csrno,target_ulong * val)3214 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
3215 target_ulong *val)
3216 {
3217 *val = env->menvcfg >> 32;
3218 return RISCV_EXCP_NONE;
3219 }
3220
3221 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
3222 target_ulong val, uintptr_t ra);
write_menvcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3223 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
3224 target_ulong val, uintptr_t ra)
3225 {
3226 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3227 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
3228 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
3229 (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
3230 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
3231 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
3232 uint64_t valh = (uint64_t)val << 32;
3233
3234 if ((valh & MENVCFG_DTE) == 0) {
3235 env->mstatus &= ~MSTATUS_SDT;
3236 }
3237
3238 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
3239 return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra);
3240 }
3241
read_senvcfg(CPURISCVState * env,int csrno,target_ulong * val)3242 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
3243 target_ulong *val)
3244 {
3245 RISCVException ret;
3246
3247 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3248 if (ret != RISCV_EXCP_NONE) {
3249 return ret;
3250 }
3251
3252 *val = env->senvcfg;
3253 return RISCV_EXCP_NONE;
3254 }
3255
write_senvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3256 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
3257 target_ulong val, uintptr_t ra)
3258 {
3259 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
3260 RISCVException ret;
3261 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3262 if (env_archcpu(env)->cfg.ext_ssnpm &&
3263 riscv_cpu_mxl(env) == MXL_RV64 &&
3264 get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) {
3265 mask |= SENVCFG_PMM;
3266 }
3267
3268 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3269 if (ret != RISCV_EXCP_NONE) {
3270 return ret;
3271 }
3272
3273 if (env_archcpu(env)->cfg.ext_zicfilp) {
3274 mask |= SENVCFG_LPE;
3275 }
3276
3277 /* Higher mode SSE must be ON for next-less mode SSE to be ON */
3278 if (env_archcpu(env)->cfg.ext_zicfiss &&
3279 get_field(env->menvcfg, MENVCFG_SSE) &&
3280 (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
3281 mask |= SENVCFG_SSE;
3282 }
3283
3284 if (env_archcpu(env)->cfg.ext_svukte) {
3285 mask |= SENVCFG_UKTE;
3286 }
3287
3288 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
3289 return RISCV_EXCP_NONE;
3290 }
3291
read_henvcfg(CPURISCVState * env,int csrno,target_ulong * val)3292 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
3293 target_ulong *val)
3294 {
3295 RISCVException ret;
3296
3297 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3298 if (ret != RISCV_EXCP_NONE) {
3299 return ret;
3300 }
3301
3302 /*
3303 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
3304 * henvcfg.stce is read_only 0 when menvcfg.stce = 0
3305 * henvcfg.adue is read_only 0 when menvcfg.adue = 0
3306 * henvcfg.dte is read_only 0 when menvcfg.dte = 0
3307 */
3308 *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3309 HENVCFG_DTE) | env->menvcfg);
3310 return RISCV_EXCP_NONE;
3311 }
3312
write_henvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3313 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
3314 target_ulong val, uintptr_t ra)
3315 {
3316 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
3317 RISCVException ret;
3318
3319 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3320 if (ret != RISCV_EXCP_NONE) {
3321 return ret;
3322 }
3323
3324 if (riscv_cpu_mxl(env) == MXL_RV64) {
3325 mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3326 HENVCFG_DTE);
3327
3328 if (env_archcpu(env)->cfg.ext_zicfilp) {
3329 mask |= HENVCFG_LPE;
3330 }
3331
3332 /* H can light up SSE for VS only if HS had it from menvcfg */
3333 if (env_archcpu(env)->cfg.ext_zicfiss &&
3334 get_field(env->menvcfg, MENVCFG_SSE)) {
3335 mask |= HENVCFG_SSE;
3336 }
3337
3338 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3339 if (env_archcpu(env)->cfg.ext_ssnpm &&
3340 get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) {
3341 mask |= HENVCFG_PMM;
3342 }
3343 }
3344
3345 env->henvcfg = val & mask;
3346 if ((env->henvcfg & HENVCFG_DTE) == 0) {
3347 env->vsstatus &= ~MSTATUS_SDT;
3348 }
3349
3350 return RISCV_EXCP_NONE;
3351 }
3352
read_henvcfgh(CPURISCVState * env,int csrno,target_ulong * val)3353 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
3354 target_ulong *val)
3355 {
3356 RISCVException ret;
3357
3358 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3359 if (ret != RISCV_EXCP_NONE) {
3360 return ret;
3361 }
3362
3363 *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3364 HENVCFG_DTE) | env->menvcfg)) >> 32;
3365 return RISCV_EXCP_NONE;
3366 }
3367
write_henvcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3368 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
3369 target_ulong val, uintptr_t ra)
3370 {
3371 uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
3372 HENVCFG_ADUE | HENVCFG_DTE);
3373 uint64_t valh = (uint64_t)val << 32;
3374 RISCVException ret;
3375
3376 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3377 if (ret != RISCV_EXCP_NONE) {
3378 return ret;
3379 }
3380 env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
3381 if ((env->henvcfg & HENVCFG_DTE) == 0) {
3382 env->vsstatus &= ~MSTATUS_SDT;
3383 }
3384 return RISCV_EXCP_NONE;
3385 }
3386
read_mstateen(CPURISCVState * env,int csrno,target_ulong * val)3387 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
3388 target_ulong *val)
3389 {
3390 *val = env->mstateen[csrno - CSR_MSTATEEN0];
3391
3392 return RISCV_EXCP_NONE;
3393 }
3394
write_mstateen(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)3395 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
3396 uint64_t wr_mask, target_ulong new_val)
3397 {
3398 uint64_t *reg;
3399
3400 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
3401 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3402
3403 return RISCV_EXCP_NONE;
3404 }
3405
write_mstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3406 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
3407 target_ulong new_val, uintptr_t ra)
3408 {
3409 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3410 if (!riscv_has_ext(env, RVF)) {
3411 wr_mask |= SMSTATEEN0_FCSR;
3412 }
3413
3414 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
3415 wr_mask |= SMSTATEEN0_P1P13;
3416 }
3417
3418 if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) {
3419 wr_mask |= SMSTATEEN0_SVSLCT;
3420 }
3421
3422 /*
3423 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
3424 * implemented. However, that information is with MachineState and we can't
3425 * figure that out in csr.c. Just enable if Smaia is available.
3426 */
3427 if (riscv_cpu_cfg(env)->ext_smaia) {
3428 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
3429 }
3430
3431 if (riscv_cpu_cfg(env)->ext_ssctr) {
3432 wr_mask |= SMSTATEEN0_CTR;
3433 }
3434
3435 return write_mstateen(env, csrno, wr_mask, new_val);
3436 }
3437
write_mstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3438 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
3439 target_ulong new_val, uintptr_t ra)
3440 {
3441 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3442 }
3443
read_mstateenh(CPURISCVState * env,int csrno,target_ulong * val)3444 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
3445 target_ulong *val)
3446 {
3447 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
3448
3449 return RISCV_EXCP_NONE;
3450 }
3451
write_mstateenh(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)3452 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
3453 uint64_t wr_mask, target_ulong new_val)
3454 {
3455 uint64_t *reg, val;
3456
3457 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
3458 val = (uint64_t)new_val << 32;
3459 val |= *reg & 0xFFFFFFFF;
3460 *reg = (*reg & ~wr_mask) | (val & wr_mask);
3461
3462 return RISCV_EXCP_NONE;
3463 }
3464
write_mstateen0h(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3465 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
3466 target_ulong new_val, uintptr_t ra)
3467 {
3468 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3469
3470 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
3471 wr_mask |= SMSTATEEN0_P1P13;
3472 }
3473
3474 if (riscv_cpu_cfg(env)->ext_ssctr) {
3475 wr_mask |= SMSTATEEN0_CTR;
3476 }
3477
3478 return write_mstateenh(env, csrno, wr_mask, new_val);
3479 }
3480
write_mstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3481 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
3482 target_ulong new_val, uintptr_t ra)
3483 {
3484 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
3485 }
3486
read_hstateen(CPURISCVState * env,int csrno,target_ulong * val)3487 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
3488 target_ulong *val)
3489 {
3490 int index = csrno - CSR_HSTATEEN0;
3491
3492 *val = env->hstateen[index] & env->mstateen[index];
3493
3494 return RISCV_EXCP_NONE;
3495 }
3496
write_hstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3497 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
3498 uint64_t mask, target_ulong new_val)
3499 {
3500 int index = csrno - CSR_HSTATEEN0;
3501 uint64_t *reg, wr_mask;
3502
3503 reg = &env->hstateen[index];
3504 wr_mask = env->mstateen[index] & mask;
3505 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3506
3507 return RISCV_EXCP_NONE;
3508 }
3509
write_hstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3510 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
3511 target_ulong new_val, uintptr_t ra)
3512 {
3513 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3514
3515 if (!riscv_has_ext(env, RVF)) {
3516 wr_mask |= SMSTATEEN0_FCSR;
3517 }
3518
3519 if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) {
3520 wr_mask |= SMSTATEEN0_SVSLCT;
3521 }
3522
3523 /*
3524 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
3525 * implemented. However, that information is with MachineState and we can't
3526 * figure that out in csr.c. Just enable if Ssaia is available.
3527 */
3528 if (riscv_cpu_cfg(env)->ext_ssaia) {
3529 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
3530 }
3531
3532 if (riscv_cpu_cfg(env)->ext_ssctr) {
3533 wr_mask |= SMSTATEEN0_CTR;
3534 }
3535
3536 return write_hstateen(env, csrno, wr_mask, new_val);
3537 }
3538
write_hstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3539 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
3540 target_ulong new_val, uintptr_t ra)
3541 {
3542 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3543 }
3544
read_hstateenh(CPURISCVState * env,int csrno,target_ulong * val)3545 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
3546 target_ulong *val)
3547 {
3548 int index = csrno - CSR_HSTATEEN0H;
3549
3550 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
3551
3552 return RISCV_EXCP_NONE;
3553 }
3554
write_hstateenh(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3555 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
3556 uint64_t mask, target_ulong new_val)
3557 {
3558 int index = csrno - CSR_HSTATEEN0H;
3559 uint64_t *reg, wr_mask, val;
3560
3561 reg = &env->hstateen[index];
3562 val = (uint64_t)new_val << 32;
3563 val |= *reg & 0xFFFFFFFF;
3564 wr_mask = env->mstateen[index] & mask;
3565 *reg = (*reg & ~wr_mask) | (val & wr_mask);
3566
3567 return RISCV_EXCP_NONE;
3568 }
3569
write_hstateen0h(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3570 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
3571 target_ulong new_val, uintptr_t ra)
3572 {
3573 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3574
3575 if (riscv_cpu_cfg(env)->ext_ssctr) {
3576 wr_mask |= SMSTATEEN0_CTR;
3577 }
3578
3579 return write_hstateenh(env, csrno, wr_mask, new_val);
3580 }
3581
write_hstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3582 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
3583 target_ulong new_val, uintptr_t ra)
3584 {
3585 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
3586 }
3587
read_sstateen(CPURISCVState * env,int csrno,target_ulong * val)3588 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
3589 target_ulong *val)
3590 {
3591 bool virt = env->virt_enabled;
3592 int index = csrno - CSR_SSTATEEN0;
3593
3594 *val = env->sstateen[index] & env->mstateen[index];
3595 if (virt) {
3596 *val &= env->hstateen[index];
3597 }
3598
3599 return RISCV_EXCP_NONE;
3600 }
3601
write_sstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3602 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
3603 uint64_t mask, target_ulong new_val)
3604 {
3605 bool virt = env->virt_enabled;
3606 int index = csrno - CSR_SSTATEEN0;
3607 uint64_t wr_mask;
3608 uint64_t *reg;
3609
3610 wr_mask = env->mstateen[index] & mask;
3611 if (virt) {
3612 wr_mask &= env->hstateen[index];
3613 }
3614
3615 reg = &env->sstateen[index];
3616 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3617
3618 return RISCV_EXCP_NONE;
3619 }
3620
write_sstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3621 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
3622 target_ulong new_val, uintptr_t ra)
3623 {
3624 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3625
3626 if (!riscv_has_ext(env, RVF)) {
3627 wr_mask |= SMSTATEEN0_FCSR;
3628 }
3629
3630 return write_sstateen(env, csrno, wr_mask, new_val);
3631 }
3632
write_sstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3633 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
3634 target_ulong new_val, uintptr_t ra)
3635 {
3636 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3637 }
3638
rmw_mip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3639 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
3640 uint64_t *ret_val,
3641 uint64_t new_val, uint64_t wr_mask)
3642 {
3643 uint64_t old_mip, mask = wr_mask & delegable_ints;
3644 uint32_t gin;
3645
3646 if (mask & MIP_SEIP) {
3647 env->software_seip = new_val & MIP_SEIP;
3648 new_val |= env->external_seip * MIP_SEIP;
3649 }
3650
3651 if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) &&
3652 get_field(env->menvcfg, MENVCFG_STCE)) {
3653 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
3654 mask = mask & ~(MIP_STIP | MIP_VSTIP);
3655 }
3656
3657 if (mask) {
3658 old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask));
3659 } else {
3660 old_mip = env->mip;
3661 }
3662
3663 if (csrno != CSR_HVIP) {
3664 gin = get_field(env->hstatus, HSTATUS_VGEIN);
3665 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
3666 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
3667 }
3668
3669 if (ret_val) {
3670 *ret_val = old_mip;
3671 }
3672
3673 return RISCV_EXCP_NONE;
3674 }
3675
rmw_mip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3676 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
3677 target_ulong *ret_val,
3678 target_ulong new_val, target_ulong wr_mask)
3679 {
3680 uint64_t rval;
3681 RISCVException ret;
3682
3683 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
3684 if (ret_val) {
3685 *ret_val = rval;
3686 }
3687
3688 return ret;
3689 }
3690
rmw_miph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3691 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
3692 target_ulong *ret_val,
3693 target_ulong new_val, target_ulong wr_mask)
3694 {
3695 uint64_t rval;
3696 RISCVException ret;
3697
3698 ret = rmw_mip64(env, csrno, &rval,
3699 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3700 if (ret_val) {
3701 *ret_val = rval >> 32;
3702 }
3703
3704 return ret;
3705 }
3706
3707 /*
3708 * The function is written for two use-cases:
3709 * 1- To access mvip csr as is for m-mode access.
3710 * 2- To access sip as a combination of mip and mvip for s-mode.
3711 *
3712 * Both report bits 1, 5, 9 and 13:63 but with the exception of
3713 * STIP being read-only zero in case of mvip when sstc extension
3714 * is present.
3715 * Also, sip needs to be read-only zero when both mideleg[i] and
3716 * mvien[i] are zero but mvip needs to be an alias of mip.
3717 */
rmw_mvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3718 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
3719 uint64_t *ret_val,
3720 uint64_t new_val, uint64_t wr_mask)
3721 {
3722 RISCVCPU *cpu = env_archcpu(env);
3723 target_ulong ret_mip = 0;
3724 RISCVException ret;
3725 uint64_t old_mvip;
3726
3727 /*
3728 * mideleg[i] mvien[i]
3729 * 0 0 No delegation. mvip[i] is alias of mip[i].
3730 * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
3731 * 1 X mip[i] is source of interrupt and mvip[i] aliases
3732 * mip[i].
3733 *
3734 * So alias condition would be for bits:
3735 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
3736 * (!sstc & MIP_STIP)
3737 *
3738 * Non-alias condition will be for bits:
3739 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
3740 *
3741 * alias_mask denotes the bits that come from mip nalias_mask denotes bits
3742 * that come from hvip.
3743 */
3744 uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3745 (env->mideleg | ~env->mvien)) | MIP_STIP;
3746 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3747 (~env->mideleg & env->mvien);
3748 uint64_t wr_mask_mvip;
3749 uint64_t wr_mask_mip;
3750
3751 /*
3752 * mideleg[i] mvien[i]
3753 * 0 0 sip[i] read-only zero.
3754 * 0 1 sip[i] alias of mvip[i].
3755 * 1 X sip[i] alias of mip[i].
3756 *
3757 * Both alias and non-alias mask remain same for sip except for bits
3758 * which are zero in both mideleg and mvien.
3759 */
3760 if (csrno == CSR_SIP) {
3761 /* Remove bits that are zero in both mideleg and mvien. */
3762 alias_mask &= (env->mideleg | env->mvien);
3763 nalias_mask &= (env->mideleg | env->mvien);
3764 }
3765
3766 /*
3767 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
3768 * that our in mip returned value.
3769 */
3770 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
3771 get_field(env->menvcfg, MENVCFG_STCE)) {
3772 alias_mask &= ~MIP_STIP;
3773 }
3774
3775 wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
3776 wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
3777
3778 /*
3779 * For bits set in alias_mask, mvip needs to be alias of mip, so forward
3780 * this to rmw_mip.
3781 */
3782 ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
3783 if (ret != RISCV_EXCP_NONE) {
3784 return ret;
3785 }
3786
3787 old_mvip = env->mvip;
3788
3789 /*
3790 * Write to mvip. Update only non-alias bits. Alias bits were updated
3791 * in mip in rmw_mip above.
3792 */
3793 if (wr_mask_mvip) {
3794 env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
3795
3796 /*
3797 * Given mvip is separate source from mip, we need to trigger interrupt
3798 * from here separately. Normally this happen from riscv_cpu_update_mip.
3799 */
3800 riscv_cpu_interrupt(env);
3801 }
3802
3803 if (ret_val) {
3804 ret_mip &= alias_mask;
3805 old_mvip &= nalias_mask;
3806
3807 *ret_val = old_mvip | ret_mip;
3808 }
3809
3810 return RISCV_EXCP_NONE;
3811 }
3812
rmw_mvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3813 static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
3814 target_ulong *ret_val,
3815 target_ulong new_val, target_ulong wr_mask)
3816 {
3817 uint64_t rval;
3818 RISCVException ret;
3819
3820 ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
3821 if (ret_val) {
3822 *ret_val = rval;
3823 }
3824
3825 return ret;
3826 }
3827
rmw_mviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3828 static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
3829 target_ulong *ret_val,
3830 target_ulong new_val, target_ulong wr_mask)
3831 {
3832 uint64_t rval;
3833 RISCVException ret;
3834
3835 ret = rmw_mvip64(env, csrno, &rval,
3836 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3837 if (ret_val) {
3838 *ret_val = rval >> 32;
3839 }
3840
3841 return ret;
3842 }
3843
3844 /* Supervisor Trap Setup */
read_sstatus_i128(CPURISCVState * env,int csrno,Int128 * val)3845 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
3846 Int128 *val)
3847 {
3848 uint64_t mask = sstatus_v1_10_mask;
3849 uint64_t sstatus = env->mstatus & mask;
3850 if (env->xl != MXL_RV32 || env->debugger) {
3851 mask |= SSTATUS64_UXL;
3852 }
3853 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3854 mask |= SSTATUS_SDT;
3855 }
3856
3857 if (env_archcpu(env)->cfg.ext_zicfilp) {
3858 mask |= SSTATUS_SPELP;
3859 }
3860
3861 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
3862 return RISCV_EXCP_NONE;
3863 }
3864
read_sstatus(CPURISCVState * env,int csrno,target_ulong * val)3865 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
3866 target_ulong *val)
3867 {
3868 target_ulong mask = (sstatus_v1_10_mask);
3869 if (env->xl != MXL_RV32 || env->debugger) {
3870 mask |= SSTATUS64_UXL;
3871 }
3872
3873 if (env_archcpu(env)->cfg.ext_zicfilp) {
3874 mask |= SSTATUS_SPELP;
3875 }
3876 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3877 mask |= SSTATUS_SDT;
3878 }
3879 /* TODO: Use SXL not MXL. */
3880 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
3881 return RISCV_EXCP_NONE;
3882 }
3883
write_sstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3884 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
3885 target_ulong val, uintptr_t ra)
3886 {
3887 target_ulong mask = (sstatus_v1_10_mask);
3888
3889 if (env->xl != MXL_RV32 || env->debugger) {
3890 if ((val & SSTATUS64_UXL) != 0) {
3891 mask |= SSTATUS64_UXL;
3892 }
3893 }
3894
3895 if (env_archcpu(env)->cfg.ext_zicfilp) {
3896 mask |= SSTATUS_SPELP;
3897 }
3898 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3899 mask |= SSTATUS_SDT;
3900 }
3901 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
3902 return write_mstatus(env, CSR_MSTATUS, newval, ra);
3903 }
3904
rmw_vsie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3905 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
3906 uint64_t *ret_val,
3907 uint64_t new_val, uint64_t wr_mask)
3908 {
3909 uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
3910 env->hideleg;
3911 uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
3912 uint64_t rval, rval_vs, vsbits;
3913 uint64_t wr_mask_vsie;
3914 uint64_t wr_mask_mie;
3915 RISCVException ret;
3916
3917 /* Bring VS-level bits to correct position */
3918 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3919 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3920 new_val |= vsbits << 1;
3921
3922 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3923 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3924 wr_mask |= vsbits << 1;
3925
3926 wr_mask_mie = wr_mask & alias_mask;
3927 wr_mask_vsie = wr_mask & nalias_mask;
3928
3929 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
3930
3931 rval_vs = env->vsie & nalias_mask;
3932 env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
3933
3934 if (ret_val) {
3935 rval &= alias_mask;
3936 vsbits = rval & VS_MODE_INTERRUPTS;
3937 rval &= ~VS_MODE_INTERRUPTS;
3938 *ret_val = rval | (vsbits >> 1) | rval_vs;
3939 }
3940
3941 return ret;
3942 }
3943
rmw_vsie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3944 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
3945 target_ulong *ret_val,
3946 target_ulong new_val, target_ulong wr_mask)
3947 {
3948 uint64_t rval;
3949 RISCVException ret;
3950
3951 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
3952 if (ret_val) {
3953 *ret_val = rval;
3954 }
3955
3956 return ret;
3957 }
3958
rmw_vsieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3959 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
3960 target_ulong *ret_val,
3961 target_ulong new_val, target_ulong wr_mask)
3962 {
3963 uint64_t rval;
3964 RISCVException ret;
3965
3966 ret = rmw_vsie64(env, csrno, &rval,
3967 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3968 if (ret_val) {
3969 *ret_val = rval >> 32;
3970 }
3971
3972 return ret;
3973 }
3974
rmw_sie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3975 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
3976 uint64_t *ret_val,
3977 uint64_t new_val, uint64_t wr_mask)
3978 {
3979 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3980 (~env->mideleg & env->mvien);
3981 uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
3982 uint64_t sie_mask = wr_mask & nalias_mask;
3983 RISCVException ret;
3984
3985 /*
3986 * mideleg[i] mvien[i]
3987 * 0 0 sie[i] read-only zero.
3988 * 0 1 sie[i] is a separate writable bit.
3989 * 1 X sie[i] alias of mie[i].
3990 *
3991 * Both alias and non-alias mask remain same for sip except for bits
3992 * which are zero in both mideleg and mvien.
3993 */
3994 if (env->virt_enabled) {
3995 if (env->hvictl & HVICTL_VTI) {
3996 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3997 }
3998 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
3999 if (ret_val) {
4000 *ret_val &= alias_mask;
4001 }
4002 } else {
4003 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
4004 if (ret_val) {
4005 *ret_val &= alias_mask;
4006 *ret_val |= env->sie & nalias_mask;
4007 }
4008
4009 env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
4010 }
4011
4012 return ret;
4013 }
4014
rmw_sie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4015 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
4016 target_ulong *ret_val,
4017 target_ulong new_val, target_ulong wr_mask)
4018 {
4019 uint64_t rval;
4020 RISCVException ret;
4021
4022 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
4023 if (ret == RISCV_EXCP_NONE && ret_val) {
4024 *ret_val = rval;
4025 }
4026
4027 return ret;
4028 }
4029
rmw_sieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4030 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
4031 target_ulong *ret_val,
4032 target_ulong new_val, target_ulong wr_mask)
4033 {
4034 uint64_t rval;
4035 RISCVException ret;
4036
4037 ret = rmw_sie64(env, csrno, &rval,
4038 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4039 if (ret_val) {
4040 *ret_val = rval >> 32;
4041 }
4042
4043 return ret;
4044 }
4045
read_stvec(CPURISCVState * env,int csrno,target_ulong * val)4046 static RISCVException read_stvec(CPURISCVState *env, int csrno,
4047 target_ulong *val)
4048 {
4049 *val = env->stvec;
4050 return RISCV_EXCP_NONE;
4051 }
4052
write_stvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4053 static RISCVException write_stvec(CPURISCVState *env, int csrno,
4054 target_ulong val, uintptr_t ra)
4055 {
4056 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
4057 if ((val & 3) < 2) {
4058 env->stvec = val;
4059 } else {
4060 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
4061 }
4062 return RISCV_EXCP_NONE;
4063 }
4064
read_scounteren(CPURISCVState * env,int csrno,target_ulong * val)4065 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
4066 target_ulong *val)
4067 {
4068 *val = env->scounteren;
4069 return RISCV_EXCP_NONE;
4070 }
4071
write_scounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4072 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
4073 target_ulong val, uintptr_t ra)
4074 {
4075 RISCVCPU *cpu = env_archcpu(env);
4076
4077 /* WARL register - disable unavailable counters */
4078 env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
4079 COUNTEREN_IR);
4080 return RISCV_EXCP_NONE;
4081 }
4082
4083 /* Supervisor Trap Handling */
read_sscratch_i128(CPURISCVState * env,int csrno,Int128 * val)4084 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
4085 Int128 *val)
4086 {
4087 *val = int128_make128(env->sscratch, env->sscratchh);
4088 return RISCV_EXCP_NONE;
4089 }
4090
write_sscratch_i128(CPURISCVState * env,int csrno,Int128 val)4091 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
4092 Int128 val)
4093 {
4094 env->sscratch = int128_getlo(val);
4095 env->sscratchh = int128_gethi(val);
4096 return RISCV_EXCP_NONE;
4097 }
4098
read_sscratch(CPURISCVState * env,int csrno,target_ulong * val)4099 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
4100 target_ulong *val)
4101 {
4102 *val = env->sscratch;
4103 return RISCV_EXCP_NONE;
4104 }
4105
write_sscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4106 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
4107 target_ulong val, uintptr_t ra)
4108 {
4109 env->sscratch = val;
4110 return RISCV_EXCP_NONE;
4111 }
4112
read_sepc(CPURISCVState * env,int csrno,target_ulong * val)4113 static RISCVException read_sepc(CPURISCVState *env, int csrno,
4114 target_ulong *val)
4115 {
4116 *val = env->sepc;
4117 return RISCV_EXCP_NONE;
4118 }
4119
write_sepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4120 static RISCVException write_sepc(CPURISCVState *env, int csrno,
4121 target_ulong val, uintptr_t ra)
4122 {
4123 env->sepc = val;
4124 return RISCV_EXCP_NONE;
4125 }
4126
read_scause(CPURISCVState * env,int csrno,target_ulong * val)4127 static RISCVException read_scause(CPURISCVState *env, int csrno,
4128 target_ulong *val)
4129 {
4130 *val = env->scause;
4131 return RISCV_EXCP_NONE;
4132 }
4133
write_scause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4134 static RISCVException write_scause(CPURISCVState *env, int csrno,
4135 target_ulong val, uintptr_t ra)
4136 {
4137 env->scause = val;
4138 return RISCV_EXCP_NONE;
4139 }
4140
read_stval(CPURISCVState * env,int csrno,target_ulong * val)4141 static RISCVException read_stval(CPURISCVState *env, int csrno,
4142 target_ulong *val)
4143 {
4144 *val = env->stval;
4145 return RISCV_EXCP_NONE;
4146 }
4147
write_stval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4148 static RISCVException write_stval(CPURISCVState *env, int csrno,
4149 target_ulong val, uintptr_t ra)
4150 {
4151 env->stval = val;
4152 return RISCV_EXCP_NONE;
4153 }
4154
4155 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
4156 uint64_t *ret_val,
4157 uint64_t new_val, uint64_t wr_mask);
4158
rmw_vsip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4159 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
4160 uint64_t *ret_val,
4161 uint64_t new_val, uint64_t wr_mask)
4162 {
4163 RISCVException ret;
4164 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
4165 uint64_t vsbits;
4166
4167 /* Add virtualized bits into vsip mask. */
4168 mask |= env->hvien & ~env->hideleg;
4169
4170 /* Bring VS-level bits to correct position */
4171 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
4172 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
4173 new_val |= vsbits << 1;
4174 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
4175 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
4176 wr_mask |= vsbits << 1;
4177
4178 ret = rmw_hvip64(env, csrno, &rval, new_val,
4179 wr_mask & mask & vsip_writable_mask);
4180 if (ret_val) {
4181 rval &= mask;
4182 vsbits = rval & VS_MODE_INTERRUPTS;
4183 rval &= ~VS_MODE_INTERRUPTS;
4184 *ret_val = rval | (vsbits >> 1);
4185 }
4186
4187 return ret;
4188 }
4189
rmw_vsip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4190 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
4191 target_ulong *ret_val,
4192 target_ulong new_val, target_ulong wr_mask)
4193 {
4194 uint64_t rval;
4195 RISCVException ret;
4196
4197 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
4198 if (ret_val) {
4199 *ret_val = rval;
4200 }
4201
4202 return ret;
4203 }
4204
rmw_vsiph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4205 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
4206 target_ulong *ret_val,
4207 target_ulong new_val, target_ulong wr_mask)
4208 {
4209 uint64_t rval;
4210 RISCVException ret;
4211
4212 ret = rmw_vsip64(env, csrno, &rval,
4213 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4214 if (ret_val) {
4215 *ret_val = rval >> 32;
4216 }
4217
4218 return ret;
4219 }
4220
rmw_sip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4221 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
4222 uint64_t *ret_val,
4223 uint64_t new_val, uint64_t wr_mask)
4224 {
4225 RISCVException ret;
4226 uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
4227
4228 if (env->virt_enabled) {
4229 if (env->hvictl & HVICTL_VTI) {
4230 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4231 }
4232 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
4233 } else {
4234 ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
4235 }
4236
4237 if (ret_val) {
4238 *ret_val &= (env->mideleg | env->mvien) &
4239 (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
4240 }
4241
4242 return ret;
4243 }
4244
rmw_sip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4245 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
4246 target_ulong *ret_val,
4247 target_ulong new_val, target_ulong wr_mask)
4248 {
4249 uint64_t rval;
4250 RISCVException ret;
4251
4252 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
4253 if (ret_val) {
4254 *ret_val = rval;
4255 }
4256
4257 return ret;
4258 }
4259
rmw_siph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4260 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
4261 target_ulong *ret_val,
4262 target_ulong new_val, target_ulong wr_mask)
4263 {
4264 uint64_t rval;
4265 RISCVException ret;
4266
4267 ret = rmw_sip64(env, csrno, &rval,
4268 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4269 if (ret_val) {
4270 *ret_val = rval >> 32;
4271 }
4272
4273 return ret;
4274 }
4275
4276 /* Supervisor Protection and Translation */
read_satp(CPURISCVState * env,int csrno,target_ulong * val)4277 static RISCVException read_satp(CPURISCVState *env, int csrno,
4278 target_ulong *val)
4279 {
4280 if (!riscv_cpu_cfg(env)->mmu) {
4281 *val = 0;
4282 return RISCV_EXCP_NONE;
4283 }
4284 *val = env->satp;
4285 return RISCV_EXCP_NONE;
4286 }
4287
write_satp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4288 static RISCVException write_satp(CPURISCVState *env, int csrno,
4289 target_ulong val, uintptr_t ra)
4290 {
4291 if (!riscv_cpu_cfg(env)->mmu) {
4292 return RISCV_EXCP_NONE;
4293 }
4294
4295 env->satp = legalize_xatp(env, env->satp, val);
4296 return RISCV_EXCP_NONE;
4297 }
4298
rmw_sctrdepth(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4299 static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
4300 target_ulong *ret_val,
4301 target_ulong new_val, target_ulong wr_mask)
4302 {
4303 uint64_t mask = wr_mask & SCTRDEPTH_MASK;
4304
4305 if (ret_val) {
4306 *ret_val = env->sctrdepth;
4307 }
4308
4309 env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
4310
4311 /* Correct depth. */
4312 if (mask) {
4313 uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
4314
4315 if (depth > SCTRDEPTH_MAX) {
4316 depth = SCTRDEPTH_MAX;
4317 env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
4318 }
4319
4320 /* Update sctrstatus.WRPTR with a legal value */
4321 depth = 16ULL << depth;
4322 env->sctrstatus =
4323 env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
4324 }
4325
4326 return RISCV_EXCP_NONE;
4327 }
4328
rmw_sctrstatus(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4329 static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
4330 target_ulong *ret_val,
4331 target_ulong new_val, target_ulong wr_mask)
4332 {
4333 uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
4334 uint32_t mask = wr_mask & SCTRSTATUS_MASK;
4335
4336 if (ret_val) {
4337 *ret_val = env->sctrstatus;
4338 }
4339
4340 env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
4341
4342 /* Update sctrstatus.WRPTR with a legal value */
4343 env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
4344
4345 return RISCV_EXCP_NONE;
4346 }
4347
rmw_xctrctl(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4348 static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
4349 target_ulong *ret_val,
4350 target_ulong new_val, target_ulong wr_mask)
4351 {
4352 uint64_t csr_mask, mask = wr_mask;
4353 uint64_t *ctl_ptr = &env->mctrctl;
4354
4355 if (csrno == CSR_MCTRCTL) {
4356 csr_mask = MCTRCTL_MASK;
4357 } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
4358 csr_mask = SCTRCTL_MASK;
4359 } else {
4360 /*
4361 * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
4362 * or csrno == CSR_VSCTRCTL.
4363 */
4364 csr_mask = VSCTRCTL_MASK;
4365 ctl_ptr = &env->vsctrctl;
4366 }
4367
4368 mask &= csr_mask;
4369
4370 if (ret_val) {
4371 *ret_val = *ctl_ptr & csr_mask;
4372 }
4373
4374 *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
4375
4376 return RISCV_EXCP_NONE;
4377 }
4378
read_vstopi(CPURISCVState * env,int csrno,target_ulong * val)4379 static RISCVException read_vstopi(CPURISCVState *env, int csrno,
4380 target_ulong *val)
4381 {
4382 int irq, ret;
4383 target_ulong topei;
4384 uint64_t vseip, vsgein;
4385 uint32_t iid, iprio, hviid, hviprio, gein;
4386 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
4387
4388 gein = get_field(env->hstatus, HSTATUS_VGEIN);
4389 hviid = get_field(env->hvictl, HVICTL_IID);
4390 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
4391
4392 if (gein) {
4393 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
4394 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
4395 if (gein <= env->geilen && vseip) {
4396 siid[scount] = IRQ_S_EXT;
4397 siprio[scount] = IPRIO_MMAXIPRIO + 1;
4398 if (env->aia_ireg_rmw_fn[PRV_S]) {
4399 /*
4400 * Call machine specific IMSIC register emulation for
4401 * reading TOPEI.
4402 */
4403 ret = env->aia_ireg_rmw_fn[PRV_S](
4404 env->aia_ireg_rmw_fn_arg[PRV_S],
4405 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
4406 riscv_cpu_mxl_bits(env)),
4407 &topei, 0, 0);
4408 if (!ret && topei) {
4409 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
4410 }
4411 }
4412 scount++;
4413 }
4414 } else {
4415 if (hviid == IRQ_S_EXT && hviprio) {
4416 siid[scount] = IRQ_S_EXT;
4417 siprio[scount] = hviprio;
4418 scount++;
4419 }
4420 }
4421
4422 if (env->hvictl & HVICTL_VTI) {
4423 if (hviid != IRQ_S_EXT) {
4424 siid[scount] = hviid;
4425 siprio[scount] = hviprio;
4426 scount++;
4427 }
4428 } else {
4429 irq = riscv_cpu_vsirq_pending(env);
4430 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
4431 siid[scount] = irq;
4432 siprio[scount] = env->hviprio[irq];
4433 scount++;
4434 }
4435 }
4436
4437 iid = 0;
4438 iprio = UINT_MAX;
4439 for (s = 0; s < scount; s++) {
4440 if (siprio[s] < iprio) {
4441 iid = siid[s];
4442 iprio = siprio[s];
4443 }
4444 }
4445
4446 if (iid) {
4447 if (env->hvictl & HVICTL_IPRIOM) {
4448 if (iprio > IPRIO_MMAXIPRIO) {
4449 iprio = IPRIO_MMAXIPRIO;
4450 }
4451 if (!iprio) {
4452 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
4453 iprio = IPRIO_MMAXIPRIO;
4454 }
4455 }
4456 } else {
4457 iprio = 1;
4458 }
4459 } else {
4460 iprio = 0;
4461 }
4462
4463 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
4464 *val |= iprio;
4465
4466 return RISCV_EXCP_NONE;
4467 }
4468
read_stopi(CPURISCVState * env,int csrno,target_ulong * val)4469 static RISCVException read_stopi(CPURISCVState *env, int csrno,
4470 target_ulong *val)
4471 {
4472 int irq;
4473 uint8_t iprio;
4474
4475 if (env->virt_enabled) {
4476 return read_vstopi(env, CSR_VSTOPI, val);
4477 }
4478
4479 irq = riscv_cpu_sirq_pending(env);
4480 if (irq <= 0 || irq > 63) {
4481 *val = 0;
4482 } else {
4483 iprio = env->siprio[irq];
4484 if (!iprio) {
4485 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
4486 iprio = IPRIO_MMAXIPRIO;
4487 }
4488 }
4489 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
4490 *val |= iprio;
4491 }
4492
4493 return RISCV_EXCP_NONE;
4494 }
4495
4496 /* Hypervisor Extensions */
read_hstatus(CPURISCVState * env,int csrno,target_ulong * val)4497 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
4498 target_ulong *val)
4499 {
4500 *val = env->hstatus;
4501 if (riscv_cpu_mxl(env) != MXL_RV32) {
4502 /* We only support 64-bit VSXL */
4503 *val = set_field(*val, HSTATUS_VSXL, 2);
4504 }
4505 /* We only support little endian */
4506 *val = set_field(*val, HSTATUS_VSBE, 0);
4507 return RISCV_EXCP_NONE;
4508 }
4509
write_hstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4510 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
4511 target_ulong val, uintptr_t ra)
4512 {
4513 uint64_t mask = (target_ulong)-1;
4514 if (!env_archcpu(env)->cfg.ext_svukte) {
4515 mask &= ~HSTATUS_HUKTE;
4516 }
4517 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
4518 if (!env_archcpu(env)->cfg.ext_ssnpm ||
4519 riscv_cpu_mxl(env) != MXL_RV64 ||
4520 get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) {
4521 mask &= ~HSTATUS_HUPMM;
4522 }
4523 env->hstatus = (env->hstatus & ~mask) | (val & mask);
4524
4525 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
4526 qemu_log_mask(LOG_UNIMP,
4527 "QEMU does not support mixed HSXLEN options.");
4528 }
4529 if (get_field(val, HSTATUS_VSBE) != 0) {
4530 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
4531 }
4532 return RISCV_EXCP_NONE;
4533 }
4534
read_hedeleg(CPURISCVState * env,int csrno,target_ulong * val)4535 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
4536 target_ulong *val)
4537 {
4538 *val = env->hedeleg;
4539 return RISCV_EXCP_NONE;
4540 }
4541
write_hedeleg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4542 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
4543 target_ulong val, uintptr_t ra)
4544 {
4545 env->hedeleg = val & vs_delegable_excps;
4546 return RISCV_EXCP_NONE;
4547 }
4548
read_hedelegh(CPURISCVState * env,int csrno,target_ulong * val)4549 static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
4550 target_ulong *val)
4551 {
4552 RISCVException ret;
4553 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
4554 if (ret != RISCV_EXCP_NONE) {
4555 return ret;
4556 }
4557
4558 /* Reserved, now read zero */
4559 *val = 0;
4560 return RISCV_EXCP_NONE;
4561 }
4562
write_hedelegh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4563 static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
4564 target_ulong val, uintptr_t ra)
4565 {
4566 RISCVException ret;
4567 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
4568 if (ret != RISCV_EXCP_NONE) {
4569 return ret;
4570 }
4571
4572 /* Reserved, now write ignore */
4573 return RISCV_EXCP_NONE;
4574 }
4575
rmw_hvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4576 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
4577 uint64_t *ret_val,
4578 uint64_t new_val, uint64_t wr_mask)
4579 {
4580 uint64_t mask = wr_mask & hvien_writable_mask;
4581
4582 if (ret_val) {
4583 *ret_val = env->hvien;
4584 }
4585
4586 env->hvien = (env->hvien & ~mask) | (new_val & mask);
4587
4588 return RISCV_EXCP_NONE;
4589 }
4590
rmw_hvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4591 static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
4592 target_ulong *ret_val,
4593 target_ulong new_val, target_ulong wr_mask)
4594 {
4595 uint64_t rval;
4596 RISCVException ret;
4597
4598 ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
4599 if (ret_val) {
4600 *ret_val = rval;
4601 }
4602
4603 return ret;
4604 }
4605
rmw_hvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4606 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
4607 target_ulong *ret_val,
4608 target_ulong new_val, target_ulong wr_mask)
4609 {
4610 uint64_t rval;
4611 RISCVException ret;
4612
4613 ret = rmw_hvien64(env, csrno, &rval,
4614 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4615 if (ret_val) {
4616 *ret_val = rval >> 32;
4617 }
4618
4619 return ret;
4620 }
4621
rmw_hideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4622 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
4623 uint64_t *ret_val,
4624 uint64_t new_val, uint64_t wr_mask)
4625 {
4626 uint64_t mask = wr_mask & vs_delegable_ints;
4627
4628 if (ret_val) {
4629 *ret_val = env->hideleg & vs_delegable_ints;
4630 }
4631
4632 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
4633 return RISCV_EXCP_NONE;
4634 }
4635
rmw_hideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4636 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
4637 target_ulong *ret_val,
4638 target_ulong new_val, target_ulong wr_mask)
4639 {
4640 uint64_t rval;
4641 RISCVException ret;
4642
4643 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
4644 if (ret_val) {
4645 *ret_val = rval;
4646 }
4647
4648 return ret;
4649 }
4650
rmw_hidelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4651 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
4652 target_ulong *ret_val,
4653 target_ulong new_val, target_ulong wr_mask)
4654 {
4655 uint64_t rval;
4656 RISCVException ret;
4657
4658 ret = rmw_hideleg64(env, csrno, &rval,
4659 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4660 if (ret_val) {
4661 *ret_val = rval >> 32;
4662 }
4663
4664 return ret;
4665 }
4666
4667 /*
4668 * The function is written for two use-cases:
4669 * 1- To access hvip csr as is for HS-mode access.
4670 * 2- To access vsip as a combination of hvip, and mip for vs-mode.
4671 *
4672 * Both report bits 2, 6, 10 and 13:63.
4673 * vsip needs to be read-only zero when both hideleg[i] and
4674 * hvien[i] are zero.
4675 */
rmw_hvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4676 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
4677 uint64_t *ret_val,
4678 uint64_t new_val, uint64_t wr_mask)
4679 {
4680 RISCVException ret;
4681 uint64_t old_hvip;
4682 uint64_t ret_mip;
4683
4684 /*
4685 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
4686 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
4687 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
4688 * bits are actually being maintained in mip so we read them from there.
4689 * This way we have a single source of truth and allows for easier
4690 * implementation.
4691 *
4692 * For bits 13:63 we have:
4693 *
4694 * hideleg[i] hvien[i]
4695 * 0 0 No delegation. vsip[i] readonly zero.
4696 * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
4697 * 1 X vsip[i] is alias of sip[i], hvip bypassed.
4698 *
4699 * alias_mask denotes the bits that come from sip (mip here given we
4700 * maintain all bits there). nalias_mask denotes bits that come from
4701 * hvip.
4702 */
4703 uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
4704 uint64_t nalias_mask = (~env->hideleg & env->hvien);
4705 uint64_t wr_mask_hvip;
4706 uint64_t wr_mask_mip;
4707
4708 /*
4709 * Both alias and non-alias mask remain same for vsip except:
4710 * 1- For VS* bits if they are zero in hideleg.
4711 * 2- For 13:63 bits if they are zero in both hideleg and hvien.
4712 */
4713 if (csrno == CSR_VSIP) {
4714 /* zero-out VS* bits that are not delegated to VS mode. */
4715 alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
4716
4717 /*
4718 * zero-out 13:63 bits that are zero in both hideleg and hvien.
4719 * nalias_mask mask can not contain any VS* bits so only second
4720 * condition applies on it.
4721 */
4722 nalias_mask &= (env->hideleg | env->hvien);
4723 alias_mask &= (env->hideleg | env->hvien);
4724 }
4725
4726 wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
4727 wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
4728
4729 /* Aliased bits, bits 10, 6, 2 need to come from mip. */
4730 ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
4731 if (ret != RISCV_EXCP_NONE) {
4732 return ret;
4733 }
4734
4735 old_hvip = env->hvip;
4736
4737 if (wr_mask_hvip) {
4738 env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
4739
4740 /*
4741 * Given hvip is separate source from mip, we need to trigger interrupt
4742 * from here separately. Normally this happen from riscv_cpu_update_mip.
4743 */
4744 riscv_cpu_interrupt(env);
4745 }
4746
4747 if (ret_val) {
4748 /* Only take VS* bits from mip. */
4749 ret_mip &= alias_mask;
4750
4751 /* Take in non-delegated 13:63 bits from hvip. */
4752 old_hvip &= nalias_mask;
4753
4754 *ret_val = ret_mip | old_hvip;
4755 }
4756
4757 return ret;
4758 }
4759
rmw_hvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4760 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
4761 target_ulong *ret_val,
4762 target_ulong new_val, target_ulong wr_mask)
4763 {
4764 uint64_t rval;
4765 RISCVException ret;
4766
4767 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
4768 if (ret_val) {
4769 *ret_val = rval;
4770 }
4771
4772 return ret;
4773 }
4774
rmw_hviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4775 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
4776 target_ulong *ret_val,
4777 target_ulong new_val, target_ulong wr_mask)
4778 {
4779 uint64_t rval;
4780 RISCVException ret;
4781
4782 ret = rmw_hvip64(env, csrno, &rval,
4783 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4784 if (ret_val) {
4785 *ret_val = rval >> 32;
4786 }
4787
4788 return ret;
4789 }
4790
rmw_hip(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4791 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
4792 target_ulong *ret_value,
4793 target_ulong new_value, target_ulong write_mask)
4794 {
4795 int ret = rmw_mip(env, csrno, ret_value, new_value,
4796 write_mask & hip_writable_mask);
4797
4798 if (ret_value) {
4799 *ret_value &= HS_MODE_INTERRUPTS;
4800 }
4801 return ret;
4802 }
4803
rmw_hie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4804 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
4805 target_ulong *ret_val,
4806 target_ulong new_val, target_ulong wr_mask)
4807 {
4808 uint64_t rval;
4809 RISCVException ret;
4810
4811 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
4812 if (ret_val) {
4813 *ret_val = rval & HS_MODE_INTERRUPTS;
4814 }
4815
4816 return ret;
4817 }
4818
read_hcounteren(CPURISCVState * env,int csrno,target_ulong * val)4819 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
4820 target_ulong *val)
4821 {
4822 *val = env->hcounteren;
4823 return RISCV_EXCP_NONE;
4824 }
4825
write_hcounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4826 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
4827 target_ulong val, uintptr_t ra)
4828 {
4829 RISCVCPU *cpu = env_archcpu(env);
4830
4831 /* WARL register - disable unavailable counters */
4832 env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
4833 COUNTEREN_IR);
4834 return RISCV_EXCP_NONE;
4835 }
4836
read_hgeie(CPURISCVState * env,int csrno,target_ulong * val)4837 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
4838 target_ulong *val)
4839 {
4840 if (val) {
4841 *val = env->hgeie;
4842 }
4843 return RISCV_EXCP_NONE;
4844 }
4845
write_hgeie(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4846 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
4847 target_ulong val, uintptr_t ra)
4848 {
4849 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
4850 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
4851 env->hgeie = val;
4852 /* Update mip.SGEIP bit */
4853 riscv_cpu_update_mip(env, MIP_SGEIP,
4854 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
4855 return RISCV_EXCP_NONE;
4856 }
4857
read_htval(CPURISCVState * env,int csrno,target_ulong * val)4858 static RISCVException read_htval(CPURISCVState *env, int csrno,
4859 target_ulong *val)
4860 {
4861 *val = env->htval;
4862 return RISCV_EXCP_NONE;
4863 }
4864
write_htval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4865 static RISCVException write_htval(CPURISCVState *env, int csrno,
4866 target_ulong val, uintptr_t ra)
4867 {
4868 env->htval = val;
4869 return RISCV_EXCP_NONE;
4870 }
4871
read_htinst(CPURISCVState * env,int csrno,target_ulong * val)4872 static RISCVException read_htinst(CPURISCVState *env, int csrno,
4873 target_ulong *val)
4874 {
4875 *val = env->htinst;
4876 return RISCV_EXCP_NONE;
4877 }
4878
write_htinst(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4879 static RISCVException write_htinst(CPURISCVState *env, int csrno,
4880 target_ulong val, uintptr_t ra)
4881 {
4882 return RISCV_EXCP_NONE;
4883 }
4884
read_hgeip(CPURISCVState * env,int csrno,target_ulong * val)4885 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
4886 target_ulong *val)
4887 {
4888 if (val) {
4889 *val = env->hgeip;
4890 }
4891 return RISCV_EXCP_NONE;
4892 }
4893
read_hgatp(CPURISCVState * env,int csrno,target_ulong * val)4894 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
4895 target_ulong *val)
4896 {
4897 *val = env->hgatp;
4898 return RISCV_EXCP_NONE;
4899 }
4900
write_hgatp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4901 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
4902 target_ulong val, uintptr_t ra)
4903 {
4904 env->hgatp = legalize_xatp(env, env->hgatp, val);
4905 return RISCV_EXCP_NONE;
4906 }
4907
read_htimedelta(CPURISCVState * env,int csrno,target_ulong * val)4908 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
4909 target_ulong *val)
4910 {
4911 if (!env->rdtime_fn) {
4912 return RISCV_EXCP_ILLEGAL_INST;
4913 }
4914
4915 *val = env->htimedelta;
4916 return RISCV_EXCP_NONE;
4917 }
4918
write_htimedelta(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4919 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
4920 target_ulong val, uintptr_t ra)
4921 {
4922 if (!env->rdtime_fn) {
4923 return RISCV_EXCP_ILLEGAL_INST;
4924 }
4925
4926 if (riscv_cpu_mxl(env) == MXL_RV32) {
4927 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
4928 } else {
4929 env->htimedelta = val;
4930 }
4931
4932 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
4933 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
4934 env->htimedelta, MIP_VSTIP);
4935 }
4936
4937 return RISCV_EXCP_NONE;
4938 }
4939
read_htimedeltah(CPURISCVState * env,int csrno,target_ulong * val)4940 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
4941 target_ulong *val)
4942 {
4943 if (!env->rdtime_fn) {
4944 return RISCV_EXCP_ILLEGAL_INST;
4945 }
4946
4947 *val = env->htimedelta >> 32;
4948 return RISCV_EXCP_NONE;
4949 }
4950
write_htimedeltah(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4951 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
4952 target_ulong val, uintptr_t ra)
4953 {
4954 if (!env->rdtime_fn) {
4955 return RISCV_EXCP_ILLEGAL_INST;
4956 }
4957
4958 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
4959
4960 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
4961 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
4962 env->htimedelta, MIP_VSTIP);
4963 }
4964
4965 return RISCV_EXCP_NONE;
4966 }
4967
read_hvictl(CPURISCVState * env,int csrno,target_ulong * val)4968 static RISCVException read_hvictl(CPURISCVState *env, int csrno,
4969 target_ulong *val)
4970 {
4971 *val = env->hvictl;
4972 return RISCV_EXCP_NONE;
4973 }
4974
write_hvictl(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4975 static RISCVException write_hvictl(CPURISCVState *env, int csrno,
4976 target_ulong val, uintptr_t ra)
4977 {
4978 env->hvictl = val & HVICTL_VALID_MASK;
4979 return RISCV_EXCP_NONE;
4980 }
4981
read_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong * val)4982 static RISCVException read_hvipriox(CPURISCVState *env, int first_index,
4983 uint8_t *iprio, target_ulong *val)
4984 {
4985 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
4986
4987 /* First index has to be a multiple of number of irqs per register */
4988 if (first_index % num_irqs) {
4989 return (env->virt_enabled) ?
4990 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
4991 }
4992
4993 /* Fill-up return value */
4994 *val = 0;
4995 for (i = 0; i < num_irqs; i++) {
4996 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
4997 continue;
4998 }
4999 if (rdzero) {
5000 continue;
5001 }
5002 *val |= ((target_ulong)iprio[irq]) << (i * 8);
5003 }
5004
5005 return RISCV_EXCP_NONE;
5006 }
5007
write_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong val)5008 static RISCVException write_hvipriox(CPURISCVState *env, int first_index,
5009 uint8_t *iprio, target_ulong val)
5010 {
5011 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
5012
5013 /* First index has to be a multiple of number of irqs per register */
5014 if (first_index % num_irqs) {
5015 return (env->virt_enabled) ?
5016 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
5017 }
5018
5019 /* Fill-up priority array */
5020 for (i = 0; i < num_irqs; i++) {
5021 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
5022 continue;
5023 }
5024 if (rdzero) {
5025 iprio[irq] = 0;
5026 } else {
5027 iprio[irq] = (val >> (i * 8)) & 0xff;
5028 }
5029 }
5030
5031 return RISCV_EXCP_NONE;
5032 }
5033
read_hviprio1(CPURISCVState * env,int csrno,target_ulong * val)5034 static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
5035 target_ulong *val)
5036 {
5037 return read_hvipriox(env, 0, env->hviprio, val);
5038 }
5039
write_hviprio1(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5040 static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
5041 target_ulong val, uintptr_t ra)
5042 {
5043 return write_hvipriox(env, 0, env->hviprio, val);
5044 }
5045
read_hviprio1h(CPURISCVState * env,int csrno,target_ulong * val)5046 static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
5047 target_ulong *val)
5048 {
5049 return read_hvipriox(env, 4, env->hviprio, val);
5050 }
5051
write_hviprio1h(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5052 static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
5053 target_ulong val, uintptr_t ra)
5054 {
5055 return write_hvipriox(env, 4, env->hviprio, val);
5056 }
5057
read_hviprio2(CPURISCVState * env,int csrno,target_ulong * val)5058 static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
5059 target_ulong *val)
5060 {
5061 return read_hvipriox(env, 8, env->hviprio, val);
5062 }
5063
write_hviprio2(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5064 static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
5065 target_ulong val, uintptr_t ra)
5066 {
5067 return write_hvipriox(env, 8, env->hviprio, val);
5068 }
5069
read_hviprio2h(CPURISCVState * env,int csrno,target_ulong * val)5070 static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
5071 target_ulong *val)
5072 {
5073 return read_hvipriox(env, 12, env->hviprio, val);
5074 }
5075
write_hviprio2h(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5076 static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
5077 target_ulong val, uintptr_t ra)
5078 {
5079 return write_hvipriox(env, 12, env->hviprio, val);
5080 }
5081
5082 /* Virtual CSR Registers */
read_vsstatus(CPURISCVState * env,int csrno,target_ulong * val)5083 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
5084 target_ulong *val)
5085 {
5086 *val = env->vsstatus;
5087 return RISCV_EXCP_NONE;
5088 }
5089
write_vsstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5090 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
5091 target_ulong val, uintptr_t ra)
5092 {
5093 uint64_t mask = (target_ulong)-1;
5094 if ((val & VSSTATUS64_UXL) == 0) {
5095 mask &= ~VSSTATUS64_UXL;
5096 }
5097 if ((env->henvcfg & HENVCFG_DTE)) {
5098 if ((val & SSTATUS_SDT) != 0) {
5099 val &= ~SSTATUS_SIE;
5100 }
5101 } else {
5102 val &= ~SSTATUS_SDT;
5103 }
5104 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
5105 return RISCV_EXCP_NONE;
5106 }
5107
read_vstvec(CPURISCVState * env,int csrno,target_ulong * val)5108 static RISCVException read_vstvec(CPURISCVState *env, int csrno,
5109 target_ulong *val)
5110 {
5111 *val = env->vstvec;
5112 return RISCV_EXCP_NONE;
5113 }
5114
write_vstvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5115 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
5116 target_ulong val, uintptr_t ra)
5117 {
5118 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
5119 if ((val & 3) < 2) {
5120 env->vstvec = val;
5121 } else {
5122 qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n");
5123 }
5124 return RISCV_EXCP_NONE;
5125 }
5126
read_vsscratch(CPURISCVState * env,int csrno,target_ulong * val)5127 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
5128 target_ulong *val)
5129 {
5130 *val = env->vsscratch;
5131 return RISCV_EXCP_NONE;
5132 }
5133
write_vsscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5134 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
5135 target_ulong val, uintptr_t ra)
5136 {
5137 env->vsscratch = val;
5138 return RISCV_EXCP_NONE;
5139 }
5140
read_vsepc(CPURISCVState * env,int csrno,target_ulong * val)5141 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
5142 target_ulong *val)
5143 {
5144 *val = env->vsepc;
5145 return RISCV_EXCP_NONE;
5146 }
5147
write_vsepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5148 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
5149 target_ulong val, uintptr_t ra)
5150 {
5151 env->vsepc = val;
5152 return RISCV_EXCP_NONE;
5153 }
5154
read_vscause(CPURISCVState * env,int csrno,target_ulong * val)5155 static RISCVException read_vscause(CPURISCVState *env, int csrno,
5156 target_ulong *val)
5157 {
5158 *val = env->vscause;
5159 return RISCV_EXCP_NONE;
5160 }
5161
write_vscause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5162 static RISCVException write_vscause(CPURISCVState *env, int csrno,
5163 target_ulong val, uintptr_t ra)
5164 {
5165 env->vscause = val;
5166 return RISCV_EXCP_NONE;
5167 }
5168
read_vstval(CPURISCVState * env,int csrno,target_ulong * val)5169 static RISCVException read_vstval(CPURISCVState *env, int csrno,
5170 target_ulong *val)
5171 {
5172 *val = env->vstval;
5173 return RISCV_EXCP_NONE;
5174 }
5175
write_vstval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5176 static RISCVException write_vstval(CPURISCVState *env, int csrno,
5177 target_ulong val, uintptr_t ra)
5178 {
5179 env->vstval = val;
5180 return RISCV_EXCP_NONE;
5181 }
5182
read_vsatp(CPURISCVState * env,int csrno,target_ulong * val)5183 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
5184 target_ulong *val)
5185 {
5186 *val = env->vsatp;
5187 return RISCV_EXCP_NONE;
5188 }
5189
write_vsatp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5190 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
5191 target_ulong val, uintptr_t ra)
5192 {
5193 env->vsatp = legalize_xatp(env, env->vsatp, val);
5194 return RISCV_EXCP_NONE;
5195 }
5196
read_mtval2(CPURISCVState * env,int csrno,target_ulong * val)5197 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
5198 target_ulong *val)
5199 {
5200 *val = env->mtval2;
5201 return RISCV_EXCP_NONE;
5202 }
5203
write_mtval2(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5204 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
5205 target_ulong val, uintptr_t ra)
5206 {
5207 env->mtval2 = val;
5208 return RISCV_EXCP_NONE;
5209 }
5210
read_mtinst(CPURISCVState * env,int csrno,target_ulong * val)5211 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
5212 target_ulong *val)
5213 {
5214 *val = env->mtinst;
5215 return RISCV_EXCP_NONE;
5216 }
5217
write_mtinst(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5218 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
5219 target_ulong val, uintptr_t ra)
5220 {
5221 env->mtinst = val;
5222 return RISCV_EXCP_NONE;
5223 }
5224
5225 /* Physical Memory Protection */
read_mseccfg(CPURISCVState * env,int csrno,target_ulong * val)5226 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
5227 target_ulong *val)
5228 {
5229 *val = mseccfg_csr_read(env);
5230 return RISCV_EXCP_NONE;
5231 }
5232
write_mseccfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5233 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
5234 target_ulong val, uintptr_t ra)
5235 {
5236 mseccfg_csr_write(env, val);
5237 return RISCV_EXCP_NONE;
5238 }
5239
read_pmpcfg(CPURISCVState * env,int csrno,target_ulong * val)5240 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
5241 target_ulong *val)
5242 {
5243 uint32_t reg_index = csrno - CSR_PMPCFG0;
5244
5245 *val = pmpcfg_csr_read(env, reg_index);
5246 return RISCV_EXCP_NONE;
5247 }
5248
write_pmpcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5249 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
5250 target_ulong val, uintptr_t ra)
5251 {
5252 uint32_t reg_index = csrno - CSR_PMPCFG0;
5253
5254 pmpcfg_csr_write(env, reg_index, val);
5255 return RISCV_EXCP_NONE;
5256 }
5257
read_pmpaddr(CPURISCVState * env,int csrno,target_ulong * val)5258 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
5259 target_ulong *val)
5260 {
5261 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
5262 return RISCV_EXCP_NONE;
5263 }
5264
write_pmpaddr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5265 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
5266 target_ulong val, uintptr_t ra)
5267 {
5268 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
5269 return RISCV_EXCP_NONE;
5270 }
5271
read_tselect(CPURISCVState * env,int csrno,target_ulong * val)5272 static RISCVException read_tselect(CPURISCVState *env, int csrno,
5273 target_ulong *val)
5274 {
5275 *val = tselect_csr_read(env);
5276 return RISCV_EXCP_NONE;
5277 }
5278
write_tselect(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5279 static RISCVException write_tselect(CPURISCVState *env, int csrno,
5280 target_ulong val, uintptr_t ra)
5281 {
5282 tselect_csr_write(env, val);
5283 return RISCV_EXCP_NONE;
5284 }
5285
read_tdata(CPURISCVState * env,int csrno,target_ulong * val)5286 static RISCVException read_tdata(CPURISCVState *env, int csrno,
5287 target_ulong *val)
5288 {
5289 /* return 0 in tdata1 to end the trigger enumeration */
5290 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
5291 *val = 0;
5292 return RISCV_EXCP_NONE;
5293 }
5294
5295 if (!tdata_available(env, csrno - CSR_TDATA1)) {
5296 return RISCV_EXCP_ILLEGAL_INST;
5297 }
5298
5299 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
5300 return RISCV_EXCP_NONE;
5301 }
5302
write_tdata(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5303 static RISCVException write_tdata(CPURISCVState *env, int csrno,
5304 target_ulong val, uintptr_t ra)
5305 {
5306 if (!tdata_available(env, csrno - CSR_TDATA1)) {
5307 return RISCV_EXCP_ILLEGAL_INST;
5308 }
5309
5310 tdata_csr_write(env, csrno - CSR_TDATA1, val);
5311 return RISCV_EXCP_NONE;
5312 }
5313
read_tinfo(CPURISCVState * env,int csrno,target_ulong * val)5314 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
5315 target_ulong *val)
5316 {
5317 *val = tinfo_csr_read(env);
5318 return RISCV_EXCP_NONE;
5319 }
5320
read_mcontext(CPURISCVState * env,int csrno,target_ulong * val)5321 static RISCVException read_mcontext(CPURISCVState *env, int csrno,
5322 target_ulong *val)
5323 {
5324 *val = env->mcontext;
5325 return RISCV_EXCP_NONE;
5326 }
5327
write_mcontext(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5328 static RISCVException write_mcontext(CPURISCVState *env, int csrno,
5329 target_ulong val, uintptr_t ra)
5330 {
5331 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
5332 int32_t mask;
5333
5334 if (riscv_has_ext(env, RVH)) {
5335 /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
5336 mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT;
5337 } else {
5338 /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
5339 mask = rv32 ? MCONTEXT32 : MCONTEXT64;
5340 }
5341
5342 env->mcontext = val & mask;
5343 return RISCV_EXCP_NONE;
5344 }
5345
read_mnscratch(CPURISCVState * env,int csrno,target_ulong * val)5346 static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
5347 target_ulong *val)
5348 {
5349 *val = env->mnscratch;
5350 return RISCV_EXCP_NONE;
5351 }
5352
write_mnscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5353 static RISCVException write_mnscratch(CPURISCVState *env, int csrno,
5354 target_ulong val, uintptr_t ra)
5355 {
5356 env->mnscratch = val;
5357 return RISCV_EXCP_NONE;
5358 }
5359
read_mnepc(CPURISCVState * env,int csrno,target_ulong * val)5360 static RISCVException read_mnepc(CPURISCVState *env, int csrno,
5361 target_ulong *val)
5362 {
5363 *val = env->mnepc;
5364 return RISCV_EXCP_NONE;
5365 }
5366
write_mnepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5367 static RISCVException write_mnepc(CPURISCVState *env, int csrno,
5368 target_ulong val, uintptr_t ra)
5369 {
5370 env->mnepc = val;
5371 return RISCV_EXCP_NONE;
5372 }
5373
read_mncause(CPURISCVState * env,int csrno,target_ulong * val)5374 static RISCVException read_mncause(CPURISCVState *env, int csrno,
5375 target_ulong *val)
5376 {
5377 *val = env->mncause;
5378 return RISCV_EXCP_NONE;
5379 }
5380
write_mncause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5381 static RISCVException write_mncause(CPURISCVState *env, int csrno,
5382 target_ulong val, uintptr_t ra)
5383 {
5384 env->mncause = val;
5385 return RISCV_EXCP_NONE;
5386 }
5387
read_mnstatus(CPURISCVState * env,int csrno,target_ulong * val)5388 static RISCVException read_mnstatus(CPURISCVState *env, int csrno,
5389 target_ulong *val)
5390 {
5391 *val = env->mnstatus;
5392 return RISCV_EXCP_NONE;
5393 }
5394
write_mnstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5395 static RISCVException write_mnstatus(CPURISCVState *env, int csrno,
5396 target_ulong val, uintptr_t ra)
5397 {
5398 target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
5399
5400 if (riscv_has_ext(env, RVH)) {
5401 /* Flush tlb on mnstatus fields that affect VM. */
5402 if ((val ^ env->mnstatus) & MNSTATUS_MNPV) {
5403 tlb_flush(env_cpu(env));
5404 }
5405
5406 mask |= MNSTATUS_MNPV;
5407 }
5408
5409 /* mnstatus.mnie can only be cleared by hardware. */
5410 env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask);
5411 return RISCV_EXCP_NONE;
5412 }
5413
5414 #endif
5415
5416 /* Crypto Extension */
riscv_new_csr_seed(target_ulong new_value,target_ulong write_mask)5417 target_ulong riscv_new_csr_seed(target_ulong new_value,
5418 target_ulong write_mask)
5419 {
5420 uint16_t random_v;
5421 Error *random_e = NULL;
5422 int random_r;
5423 target_ulong rval;
5424
5425 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
5426 if (unlikely(random_r < 0)) {
5427 /*
5428 * Failed, for unknown reasons in the crypto subsystem.
5429 * The best we can do is log the reason and return a
5430 * failure indication to the guest. There is no reason
5431 * we know to expect the failure to be transitory, so
5432 * indicate DEAD to avoid having the guest spin on WAIT.
5433 */
5434 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5435 __func__, error_get_pretty(random_e));
5436 error_free(random_e);
5437 rval = SEED_OPST_DEAD;
5438 } else {
5439 rval = random_v | SEED_OPST_ES16;
5440 }
5441
5442 return rval;
5443 }
5444
rmw_seed(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)5445 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
5446 target_ulong *ret_value,
5447 target_ulong new_value,
5448 target_ulong write_mask)
5449 {
5450 target_ulong rval;
5451
5452 rval = riscv_new_csr_seed(new_value, write_mask);
5453
5454 if (ret_value) {
5455 *ret_value = rval;
5456 }
5457
5458 return RISCV_EXCP_NONE;
5459 }
5460
5461 /*
5462 * riscv_csrrw - read and/or update control and status register
5463 *
5464 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
5465 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
5466 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
5467 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
5468 */
5469
riscv_csrrw_check(CPURISCVState * env,int csrno,bool write)5470 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
5471 int csrno,
5472 bool write)
5473 {
5474 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
5475 bool read_only = get_field(csrno, 0xC00) == 3;
5476 int csr_min_priv = csr_ops[csrno].min_priv_ver;
5477
5478 /* ensure the CSR extension is enabled */
5479 if (!riscv_cpu_cfg(env)->ext_zicsr) {
5480 return RISCV_EXCP_ILLEGAL_INST;
5481 }
5482
5483 /* ensure CSR is implemented by checking predicate */
5484 if (!csr_ops[csrno].predicate) {
5485 return RISCV_EXCP_ILLEGAL_INST;
5486 }
5487
5488 /* privileged spec version check */
5489 if (env->priv_ver < csr_min_priv) {
5490 return RISCV_EXCP_ILLEGAL_INST;
5491 }
5492
5493 /* read / write check */
5494 if (write && read_only) {
5495 return RISCV_EXCP_ILLEGAL_INST;
5496 }
5497
5498 /*
5499 * The predicate() not only does existence check but also does some
5500 * access control check which triggers for example virtual instruction
5501 * exception in some cases. When writing read-only CSRs in those cases
5502 * illegal instruction exception should be triggered instead of virtual
5503 * instruction exception. Hence this comes after the read / write check.
5504 */
5505 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
5506 if (ret != RISCV_EXCP_NONE) {
5507 return ret;
5508 }
5509
5510 #if !defined(CONFIG_USER_ONLY)
5511 int csr_priv, effective_priv = env->priv;
5512
5513 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
5514 !env->virt_enabled) {
5515 /*
5516 * We are in HS mode. Add 1 to the effective privilege level to
5517 * allow us to access the Hypervisor CSRs.
5518 */
5519 effective_priv++;
5520 }
5521
5522 csr_priv = get_field(csrno, 0x300);
5523 if (!env->debugger && (effective_priv < csr_priv)) {
5524 if (csr_priv == (PRV_S + 1) && env->virt_enabled) {
5525 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
5526 }
5527 return RISCV_EXCP_ILLEGAL_INST;
5528 }
5529 #endif
5530 return RISCV_EXCP_NONE;
5531 }
5532
riscv_csrrw_do64(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask,uintptr_t ra)5533 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
5534 target_ulong *ret_value,
5535 target_ulong new_value,
5536 target_ulong write_mask,
5537 uintptr_t ra)
5538 {
5539 RISCVException ret;
5540 target_ulong old_value = 0;
5541
5542 /* execute combined read/write operation if it exists */
5543 if (csr_ops[csrno].op) {
5544 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
5545 }
5546
5547 /*
5548 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
5549 * and we can't throw side effects caused by CSR reads.
5550 */
5551 if (ret_value) {
5552 /* if no accessor exists then return failure */
5553 if (!csr_ops[csrno].read) {
5554 return RISCV_EXCP_ILLEGAL_INST;
5555 }
5556 /* read old value */
5557 ret = csr_ops[csrno].read(env, csrno, &old_value);
5558 if (ret != RISCV_EXCP_NONE) {
5559 return ret;
5560 }
5561 }
5562
5563 /* write value if writable and write mask set, otherwise drop writes */
5564 if (write_mask) {
5565 new_value = (old_value & ~write_mask) | (new_value & write_mask);
5566 if (csr_ops[csrno].write) {
5567 ret = csr_ops[csrno].write(env, csrno, new_value, ra);
5568 if (ret != RISCV_EXCP_NONE) {
5569 return ret;
5570 }
5571 }
5572 }
5573
5574 /* return old value */
5575 if (ret_value) {
5576 *ret_value = old_value;
5577 }
5578
5579 return RISCV_EXCP_NONE;
5580 }
5581
riscv_csrr(CPURISCVState * env,int csrno,target_ulong * ret_value)5582 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
5583 target_ulong *ret_value)
5584 {
5585 RISCVException ret = riscv_csrrw_check(env, csrno, false);
5586 if (ret != RISCV_EXCP_NONE) {
5587 return ret;
5588 }
5589
5590 return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0);
5591 }
5592
riscv_csrrw(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask,uintptr_t ra)5593 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
5594 target_ulong *ret_value, target_ulong new_value,
5595 target_ulong write_mask, uintptr_t ra)
5596 {
5597 RISCVException ret = riscv_csrrw_check(env, csrno, true);
5598 if (ret != RISCV_EXCP_NONE) {
5599 return ret;
5600 }
5601
5602 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra);
5603 }
5604
riscv_csrrw_do128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask,uintptr_t ra)5605 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
5606 Int128 *ret_value,
5607 Int128 new_value,
5608 Int128 write_mask, uintptr_t ra)
5609 {
5610 RISCVException ret;
5611 Int128 old_value;
5612
5613 /* read old value */
5614 ret = csr_ops[csrno].read128(env, csrno, &old_value);
5615 if (ret != RISCV_EXCP_NONE) {
5616 return ret;
5617 }
5618
5619 /* write value if writable and write mask set, otherwise drop writes */
5620 if (int128_nz(write_mask)) {
5621 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
5622 int128_and(new_value, write_mask));
5623 if (csr_ops[csrno].write128) {
5624 ret = csr_ops[csrno].write128(env, csrno, new_value);
5625 if (ret != RISCV_EXCP_NONE) {
5626 return ret;
5627 }
5628 } else if (csr_ops[csrno].write) {
5629 /* avoids having to write wrappers for all registers */
5630 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra);
5631 if (ret != RISCV_EXCP_NONE) {
5632 return ret;
5633 }
5634 }
5635 }
5636
5637 /* return old value */
5638 if (ret_value) {
5639 *ret_value = old_value;
5640 }
5641
5642 return RISCV_EXCP_NONE;
5643 }
5644
riscv_csrr_i128(CPURISCVState * env,int csrno,Int128 * ret_value)5645 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
5646 Int128 *ret_value)
5647 {
5648 RISCVException ret;
5649
5650 ret = riscv_csrrw_check(env, csrno, false);
5651 if (ret != RISCV_EXCP_NONE) {
5652 return ret;
5653 }
5654
5655 if (csr_ops[csrno].read128) {
5656 return riscv_csrrw_do128(env, csrno, ret_value,
5657 int128_zero(), int128_zero(), 0);
5658 }
5659
5660 /*
5661 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
5662 * at all defined.
5663 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
5664 * significant), for those, this fallback is correctly handling the
5665 * accesses
5666 */
5667 target_ulong old_value;
5668 ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0);
5669 if (ret == RISCV_EXCP_NONE && ret_value) {
5670 *ret_value = int128_make64(old_value);
5671 }
5672 return ret;
5673 }
5674
riscv_csrrw_i128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask,uintptr_t ra)5675 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
5676 Int128 *ret_value, Int128 new_value,
5677 Int128 write_mask, uintptr_t ra)
5678 {
5679 RISCVException ret;
5680
5681 ret = riscv_csrrw_check(env, csrno, true);
5682 if (ret != RISCV_EXCP_NONE) {
5683 return ret;
5684 }
5685
5686 if (csr_ops[csrno].read128) {
5687 return riscv_csrrw_do128(env, csrno, ret_value,
5688 new_value, write_mask, ra);
5689 }
5690
5691 /*
5692 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
5693 * at all defined.
5694 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
5695 * significant), for those, this fallback is correctly handling the
5696 * accesses
5697 */
5698 target_ulong old_value;
5699 ret = riscv_csrrw_do64(env, csrno, &old_value,
5700 int128_getlo(new_value),
5701 int128_getlo(write_mask), ra);
5702 if (ret == RISCV_EXCP_NONE && ret_value) {
5703 *ret_value = int128_make64(old_value);
5704 }
5705 return ret;
5706 }
5707
5708 /*
5709 * Debugger support. If not in user mode, set env->debugger before the
5710 * riscv_csrrw call and clear it after the call.
5711 */
riscv_csrrw_debug(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)5712 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
5713 target_ulong *ret_value,
5714 target_ulong new_value,
5715 target_ulong write_mask)
5716 {
5717 RISCVException ret;
5718 #if !defined(CONFIG_USER_ONLY)
5719 env->debugger = true;
5720 #endif
5721 if (!write_mask) {
5722 ret = riscv_csrr(env, csrno, ret_value);
5723 } else {
5724 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0);
5725 }
5726 #if !defined(CONFIG_USER_ONLY)
5727 env->debugger = false;
5728 #endif
5729 return ret;
5730 }
5731
read_jvt(CPURISCVState * env,int csrno,target_ulong * val)5732 static RISCVException read_jvt(CPURISCVState *env, int csrno,
5733 target_ulong *val)
5734 {
5735 *val = env->jvt;
5736 return RISCV_EXCP_NONE;
5737 }
5738
write_jvt(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5739 static RISCVException write_jvt(CPURISCVState *env, int csrno,
5740 target_ulong val, uintptr_t ra)
5741 {
5742 env->jvt = val;
5743 return RISCV_EXCP_NONE;
5744 }
5745
5746 /*
5747 * Control and Status Register function table
5748 * riscv_csr_operations::predicate() must be provided for an implemented CSR
5749 */
5750 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
5751 /* User Floating-Point CSRs */
5752 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
5753 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
5754 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
5755 /* Vector CSRs */
5756 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
5757 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
5758 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
5759 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
5760 [CSR_VL] = { "vl", vs, read_vl },
5761 [CSR_VTYPE] = { "vtype", vs, read_vtype },
5762 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
5763 /* User Timers and Counters */
5764 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
5765 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
5766 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
5767 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
5768
5769 /*
5770 * In privileged mode, the monitor will have to emulate TIME CSRs only if
5771 * rdtime callback is not provided by machine/platform emulation.
5772 */
5773 [CSR_TIME] = { "time", ctr, read_time },
5774 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
5775
5776 /* Crypto Extension */
5777 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
5778
5779 /* Zcmt Extension */
5780 [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
5781
5782 /* zicfiss Extension, shadow stack register */
5783 [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
5784
5785 #if !defined(CONFIG_USER_ONLY)
5786 /* Machine Timers and Counters */
5787 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
5788 write_mhpmcounter },
5789 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
5790 write_mhpmcounter },
5791 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
5792 write_mhpmcounterh },
5793 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
5794 write_mhpmcounterh },
5795
5796 /* Machine Information Registers */
5797 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
5798 [CSR_MARCHID] = { "marchid", any, read_marchid },
5799 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
5800 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
5801
5802 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
5803 .min_priv_ver = PRIV_VERSION_1_12_0 },
5804 /* Machine Trap Setup */
5805 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
5806 NULL, read_mstatus_i128 },
5807 [CSR_MISA] = { "misa", any, read_misa, write_misa,
5808 NULL, read_misa_i128 },
5809 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
5810 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
5811 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
5812 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
5813 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
5814 write_mcounteren },
5815
5816 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
5817 write_mstatush },
5818 [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore,
5819 .min_priv_ver = PRIV_VERSION_1_13_0 },
5820 [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh,
5821 .min_priv_ver = PRIV_VERSION_1_13_0 },
5822
5823 /* Machine Trap Handling */
5824 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
5825 NULL, read_mscratch_i128, write_mscratch_i128 },
5826 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
5827 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
5828 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
5829 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
5830
5831 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
5832 [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL,
5833 rmw_xiselect },
5834 [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
5835 rmw_xireg },
5836
5837 /* Machine Indirect Register Alias */
5838 [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi,
5839 .min_priv_ver = PRIV_VERSION_1_12_0 },
5840 [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi,
5841 .min_priv_ver = PRIV_VERSION_1_12_0 },
5842 [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi,
5843 .min_priv_ver = PRIV_VERSION_1_12_0 },
5844 [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi,
5845 .min_priv_ver = PRIV_VERSION_1_12_0 },
5846 [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi,
5847 .min_priv_ver = PRIV_VERSION_1_12_0 },
5848
5849 /* Machine-Level Interrupts (AIA) */
5850 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
5851 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
5852
5853 /* Virtual Interrupts for Supervisor Level (AIA) */
5854 [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
5855 [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
5856
5857 /* Machine-Level High-Half CSRs (AIA) */
5858 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
5859 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
5860 [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
5861 [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
5862 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
5863
5864 /* Execution environment configuration */
5865 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
5866 .min_priv_ver = PRIV_VERSION_1_12_0 },
5867 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
5868 .min_priv_ver = PRIV_VERSION_1_12_0 },
5869 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
5870 .min_priv_ver = PRIV_VERSION_1_12_0 },
5871 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
5872 .min_priv_ver = PRIV_VERSION_1_12_0 },
5873 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
5874 .min_priv_ver = PRIV_VERSION_1_12_0 },
5875
5876 /* Smstateen extension CSRs */
5877 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
5878 .min_priv_ver = PRIV_VERSION_1_12_0 },
5879 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
5880 write_mstateen0h,
5881 .min_priv_ver = PRIV_VERSION_1_12_0 },
5882 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
5883 write_mstateen_1_3,
5884 .min_priv_ver = PRIV_VERSION_1_12_0 },
5885 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
5886 write_mstateenh_1_3,
5887 .min_priv_ver = PRIV_VERSION_1_12_0 },
5888 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
5889 write_mstateen_1_3,
5890 .min_priv_ver = PRIV_VERSION_1_12_0 },
5891 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
5892 write_mstateenh_1_3,
5893 .min_priv_ver = PRIV_VERSION_1_12_0 },
5894 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
5895 write_mstateen_1_3,
5896 .min_priv_ver = PRIV_VERSION_1_12_0 },
5897 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
5898 write_mstateenh_1_3,
5899 .min_priv_ver = PRIV_VERSION_1_12_0 },
5900 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
5901 .min_priv_ver = PRIV_VERSION_1_12_0 },
5902 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
5903 write_hstateen0h,
5904 .min_priv_ver = PRIV_VERSION_1_12_0 },
5905 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
5906 write_hstateen_1_3,
5907 .min_priv_ver = PRIV_VERSION_1_12_0 },
5908 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
5909 write_hstateenh_1_3,
5910 .min_priv_ver = PRIV_VERSION_1_12_0 },
5911 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
5912 write_hstateen_1_3,
5913 .min_priv_ver = PRIV_VERSION_1_12_0 },
5914 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
5915 write_hstateenh_1_3,
5916 .min_priv_ver = PRIV_VERSION_1_12_0 },
5917 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
5918 write_hstateen_1_3,
5919 .min_priv_ver = PRIV_VERSION_1_12_0 },
5920 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
5921 write_hstateenh_1_3,
5922 .min_priv_ver = PRIV_VERSION_1_12_0 },
5923 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
5924 .min_priv_ver = PRIV_VERSION_1_12_0 },
5925 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
5926 write_sstateen_1_3,
5927 .min_priv_ver = PRIV_VERSION_1_12_0 },
5928 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
5929 write_sstateen_1_3,
5930 .min_priv_ver = PRIV_VERSION_1_12_0 },
5931 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
5932 write_sstateen_1_3,
5933 .min_priv_ver = PRIV_VERSION_1_12_0 },
5934
5935 /* RNMI */
5936 [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch,
5937 .min_priv_ver = PRIV_VERSION_1_12_0 },
5938 [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc,
5939 .min_priv_ver = PRIV_VERSION_1_12_0 },
5940 [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause,
5941 .min_priv_ver = PRIV_VERSION_1_12_0 },
5942 [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
5943 .min_priv_ver = PRIV_VERSION_1_12_0 },
5944
5945 /* Supervisor Counter Delegation */
5946 [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
5947 read_scountinhibit, write_scountinhibit,
5948 .min_priv_ver = PRIV_VERSION_1_12_0 },
5949
5950 /* Supervisor Trap Setup */
5951 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
5952 NULL, read_sstatus_i128 },
5953 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
5954 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
5955 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
5956 write_scounteren },
5957
5958 /* Supervisor Trap Handling */
5959 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
5960 NULL, read_sscratch_i128, write_sscratch_i128 },
5961 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
5962 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
5963 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
5964 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
5965 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
5966 .min_priv_ver = PRIV_VERSION_1_12_0 },
5967 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
5968 .min_priv_ver = PRIV_VERSION_1_12_0 },
5969 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
5970 write_vstimecmp,
5971 .min_priv_ver = PRIV_VERSION_1_12_0 },
5972 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
5973 write_vstimecmph,
5974 .min_priv_ver = PRIV_VERSION_1_12_0 },
5975
5976 /* Supervisor Protection and Translation */
5977 [CSR_SATP] = { "satp", satp, read_satp, write_satp },
5978
5979 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
5980 [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL,
5981 rmw_xiselect },
5982 [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
5983 rmw_xireg },
5984
5985 /* Supervisor Indirect Register Alias */
5986 [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi,
5987 .min_priv_ver = PRIV_VERSION_1_12_0 },
5988 [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi,
5989 .min_priv_ver = PRIV_VERSION_1_12_0 },
5990 [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi,
5991 .min_priv_ver = PRIV_VERSION_1_12_0 },
5992 [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi,
5993 .min_priv_ver = PRIV_VERSION_1_12_0 },
5994 [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi,
5995 .min_priv_ver = PRIV_VERSION_1_12_0 },
5996
5997 /* Supervisor-Level Interrupts (AIA) */
5998 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
5999 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
6000
6001 /* Supervisor-Level High-Half CSRs (AIA) */
6002 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
6003 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
6004
6005 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
6006 .min_priv_ver = PRIV_VERSION_1_12_0 },
6007 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
6008 .min_priv_ver = PRIV_VERSION_1_12_0 },
6009 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
6010 .min_priv_ver = PRIV_VERSION_1_12_0 },
6011 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
6012 .min_priv_ver = PRIV_VERSION_1_12_0 },
6013 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
6014 .min_priv_ver = PRIV_VERSION_1_12_0 },
6015 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
6016 .min_priv_ver = PRIV_VERSION_1_12_0 },
6017 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
6018 write_hcounteren,
6019 .min_priv_ver = PRIV_VERSION_1_12_0 },
6020 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
6021 .min_priv_ver = PRIV_VERSION_1_12_0 },
6022 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
6023 .min_priv_ver = PRIV_VERSION_1_12_0 },
6024 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
6025 .min_priv_ver = PRIV_VERSION_1_12_0 },
6026 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
6027 .min_priv_ver = PRIV_VERSION_1_12_0 },
6028 [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp,
6029 .min_priv_ver = PRIV_VERSION_1_12_0 },
6030 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
6031 write_htimedelta,
6032 .min_priv_ver = PRIV_VERSION_1_12_0 },
6033 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
6034 write_htimedeltah,
6035 .min_priv_ver = PRIV_VERSION_1_12_0 },
6036
6037 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
6038 write_vsstatus,
6039 .min_priv_ver = PRIV_VERSION_1_12_0 },
6040 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
6041 .min_priv_ver = PRIV_VERSION_1_12_0 },
6042 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
6043 .min_priv_ver = PRIV_VERSION_1_12_0 },
6044 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
6045 .min_priv_ver = PRIV_VERSION_1_12_0 },
6046 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
6047 write_vsscratch,
6048 .min_priv_ver = PRIV_VERSION_1_12_0 },
6049 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
6050 .min_priv_ver = PRIV_VERSION_1_12_0 },
6051 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
6052 .min_priv_ver = PRIV_VERSION_1_12_0 },
6053 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
6054 .min_priv_ver = PRIV_VERSION_1_12_0 },
6055 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
6056 .min_priv_ver = PRIV_VERSION_1_12_0 },
6057
6058 [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2,
6059 .min_priv_ver = PRIV_VERSION_1_12_0 },
6060 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
6061 .min_priv_ver = PRIV_VERSION_1_12_0 },
6062
6063 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
6064 [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
6065 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
6066 write_hvictl },
6067 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
6068 write_hviprio1 },
6069 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
6070 write_hviprio2 },
6071 /*
6072 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
6073 */
6074 [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL,
6075 rmw_xiselect },
6076 [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
6077 rmw_xireg },
6078
6079 /* Virtual Supervisor Indirect Alias */
6080 [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi,
6081 .min_priv_ver = PRIV_VERSION_1_12_0 },
6082 [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi,
6083 .min_priv_ver = PRIV_VERSION_1_12_0 },
6084 [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi,
6085 .min_priv_ver = PRIV_VERSION_1_12_0 },
6086 [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi,
6087 .min_priv_ver = PRIV_VERSION_1_12_0 },
6088 [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi,
6089 .min_priv_ver = PRIV_VERSION_1_12_0 },
6090
6091 /* VS-Level Interrupts (H-extension with AIA) */
6092 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
6093 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
6094
6095 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
6096 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
6097 rmw_hidelegh },
6098 [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
6099 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
6100 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
6101 write_hviprio1h },
6102 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
6103 write_hviprio2h },
6104 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
6105 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
6106
6107 /* Physical Memory Protection */
6108 [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
6109 .min_priv_ver = PRIV_VERSION_1_11_0 },
6110 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
6111 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
6112 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
6113 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
6114 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
6115 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
6116 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
6117 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
6118 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
6119 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
6120 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
6121 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
6122 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
6123 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
6124 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
6125 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
6126 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
6127 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
6128 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
6129 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
6130
6131 /* Debug CSRs */
6132 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
6133 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
6134 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
6135 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
6136 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
6137 [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
6138
6139 [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
6140 [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
6141 [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
6142 [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
6143 [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
6144
6145 /* Performance Counters */
6146 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
6147 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
6148 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
6149 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
6150 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
6151 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
6152 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
6153 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
6154 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
6155 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
6156 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
6157 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
6158 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
6159 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
6160 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
6161 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
6162 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
6163 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
6164 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
6165 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
6166 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
6167 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
6168 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
6169 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
6170 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
6171 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
6172 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
6173 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
6174 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
6175
6176 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
6177 write_mhpmcounter },
6178 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
6179 write_mhpmcounter },
6180 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
6181 write_mhpmcounter },
6182 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
6183 write_mhpmcounter },
6184 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
6185 write_mhpmcounter },
6186 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
6187 write_mhpmcounter },
6188 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
6189 write_mhpmcounter },
6190 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
6191 write_mhpmcounter },
6192 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
6193 write_mhpmcounter },
6194 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
6195 write_mhpmcounter },
6196 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
6197 write_mhpmcounter },
6198 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
6199 write_mhpmcounter },
6200 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
6201 write_mhpmcounter },
6202 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
6203 write_mhpmcounter },
6204 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
6205 write_mhpmcounter },
6206 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
6207 write_mhpmcounter },
6208 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
6209 write_mhpmcounter },
6210 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
6211 write_mhpmcounter },
6212 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
6213 write_mhpmcounter },
6214 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
6215 write_mhpmcounter },
6216 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
6217 write_mhpmcounter },
6218 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
6219 write_mhpmcounter },
6220 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
6221 write_mhpmcounter },
6222 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
6223 write_mhpmcounter },
6224 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
6225 write_mhpmcounter },
6226 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
6227 write_mhpmcounter },
6228 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
6229 write_mhpmcounter },
6230 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
6231 write_mhpmcounter },
6232 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
6233 write_mhpmcounter },
6234
6235 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
6236 write_mcountinhibit,
6237 .min_priv_ver = PRIV_VERSION_1_11_0 },
6238
6239 [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
6240 write_mcyclecfg,
6241 .min_priv_ver = PRIV_VERSION_1_12_0 },
6242 [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
6243 write_minstretcfg,
6244 .min_priv_ver = PRIV_VERSION_1_12_0 },
6245
6246 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
6247 write_mhpmevent },
6248 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
6249 write_mhpmevent },
6250 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
6251 write_mhpmevent },
6252 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
6253 write_mhpmevent },
6254 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
6255 write_mhpmevent },
6256 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
6257 write_mhpmevent },
6258 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
6259 write_mhpmevent },
6260 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
6261 write_mhpmevent },
6262 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
6263 write_mhpmevent },
6264 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
6265 write_mhpmevent },
6266 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
6267 write_mhpmevent },
6268 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
6269 write_mhpmevent },
6270 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
6271 write_mhpmevent },
6272 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
6273 write_mhpmevent },
6274 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
6275 write_mhpmevent },
6276 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
6277 write_mhpmevent },
6278 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
6279 write_mhpmevent },
6280 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
6281 write_mhpmevent },
6282 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
6283 write_mhpmevent },
6284 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
6285 write_mhpmevent },
6286 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
6287 write_mhpmevent },
6288 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
6289 write_mhpmevent },
6290 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
6291 write_mhpmevent },
6292 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
6293 write_mhpmevent },
6294 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
6295 write_mhpmevent },
6296 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
6297 write_mhpmevent },
6298 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
6299 write_mhpmevent },
6300 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
6301 write_mhpmevent },
6302 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
6303 write_mhpmevent },
6304
6305 [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
6306 write_mcyclecfgh,
6307 .min_priv_ver = PRIV_VERSION_1_12_0 },
6308 [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
6309 write_minstretcfgh,
6310 .min_priv_ver = PRIV_VERSION_1_12_0 },
6311
6312 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
6313 write_mhpmeventh,
6314 .min_priv_ver = PRIV_VERSION_1_12_0 },
6315 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh,
6316 write_mhpmeventh,
6317 .min_priv_ver = PRIV_VERSION_1_12_0 },
6318 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh,
6319 write_mhpmeventh,
6320 .min_priv_ver = PRIV_VERSION_1_12_0 },
6321 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh,
6322 write_mhpmeventh,
6323 .min_priv_ver = PRIV_VERSION_1_12_0 },
6324 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh,
6325 write_mhpmeventh,
6326 .min_priv_ver = PRIV_VERSION_1_12_0 },
6327 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh,
6328 write_mhpmeventh,
6329 .min_priv_ver = PRIV_VERSION_1_12_0 },
6330 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh,
6331 write_mhpmeventh,
6332 .min_priv_ver = PRIV_VERSION_1_12_0 },
6333 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh,
6334 write_mhpmeventh,
6335 .min_priv_ver = PRIV_VERSION_1_12_0 },
6336 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh,
6337 write_mhpmeventh,
6338 .min_priv_ver = PRIV_VERSION_1_12_0 },
6339 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh,
6340 write_mhpmeventh,
6341 .min_priv_ver = PRIV_VERSION_1_12_0 },
6342 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh,
6343 write_mhpmeventh,
6344 .min_priv_ver = PRIV_VERSION_1_12_0 },
6345 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh,
6346 write_mhpmeventh,
6347 .min_priv_ver = PRIV_VERSION_1_12_0 },
6348 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh,
6349 write_mhpmeventh,
6350 .min_priv_ver = PRIV_VERSION_1_12_0 },
6351 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh,
6352 write_mhpmeventh,
6353 .min_priv_ver = PRIV_VERSION_1_12_0 },
6354 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh,
6355 write_mhpmeventh,
6356 .min_priv_ver = PRIV_VERSION_1_12_0 },
6357 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh,
6358 write_mhpmeventh,
6359 .min_priv_ver = PRIV_VERSION_1_12_0 },
6360 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh,
6361 write_mhpmeventh,
6362 .min_priv_ver = PRIV_VERSION_1_12_0 },
6363 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh,
6364 write_mhpmeventh,
6365 .min_priv_ver = PRIV_VERSION_1_12_0 },
6366 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh,
6367 write_mhpmeventh,
6368 .min_priv_ver = PRIV_VERSION_1_12_0 },
6369 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh,
6370 write_mhpmeventh,
6371 .min_priv_ver = PRIV_VERSION_1_12_0 },
6372 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh,
6373 write_mhpmeventh,
6374 .min_priv_ver = PRIV_VERSION_1_12_0 },
6375 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh,
6376 write_mhpmeventh,
6377 .min_priv_ver = PRIV_VERSION_1_12_0 },
6378 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh,
6379 write_mhpmeventh,
6380 .min_priv_ver = PRIV_VERSION_1_12_0 },
6381 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh,
6382 write_mhpmeventh,
6383 .min_priv_ver = PRIV_VERSION_1_12_0 },
6384 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh,
6385 write_mhpmeventh,
6386 .min_priv_ver = PRIV_VERSION_1_12_0 },
6387 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh,
6388 write_mhpmeventh,
6389 .min_priv_ver = PRIV_VERSION_1_12_0 },
6390 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh,
6391 write_mhpmeventh,
6392 .min_priv_ver = PRIV_VERSION_1_12_0 },
6393 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh,
6394 write_mhpmeventh,
6395 .min_priv_ver = PRIV_VERSION_1_12_0 },
6396 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh,
6397 write_mhpmeventh,
6398 .min_priv_ver = PRIV_VERSION_1_12_0 },
6399
6400 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
6401 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
6402 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
6403 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
6404 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
6405 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
6406 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
6407 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
6408 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
6409 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
6410 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
6411 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
6412 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
6413 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
6414 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
6415 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
6416 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
6417 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
6418 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
6419 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
6420 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
6421 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
6422 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
6423 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
6424 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
6425 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
6426 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
6427 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
6428 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
6429
6430 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
6431 write_mhpmcounterh },
6432 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
6433 write_mhpmcounterh },
6434 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
6435 write_mhpmcounterh },
6436 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
6437 write_mhpmcounterh },
6438 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
6439 write_mhpmcounterh },
6440 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
6441 write_mhpmcounterh },
6442 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
6443 write_mhpmcounterh },
6444 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
6445 write_mhpmcounterh },
6446 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
6447 write_mhpmcounterh },
6448 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
6449 write_mhpmcounterh },
6450 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
6451 write_mhpmcounterh },
6452 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
6453 write_mhpmcounterh },
6454 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
6455 write_mhpmcounterh },
6456 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
6457 write_mhpmcounterh },
6458 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
6459 write_mhpmcounterh },
6460 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
6461 write_mhpmcounterh },
6462 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
6463 write_mhpmcounterh },
6464 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
6465 write_mhpmcounterh },
6466 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
6467 write_mhpmcounterh },
6468 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
6469 write_mhpmcounterh },
6470 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
6471 write_mhpmcounterh },
6472 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
6473 write_mhpmcounterh },
6474 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
6475 write_mhpmcounterh },
6476 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
6477 write_mhpmcounterh },
6478 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
6479 write_mhpmcounterh },
6480 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
6481 write_mhpmcounterh },
6482 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
6483 write_mhpmcounterh },
6484 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
6485 write_mhpmcounterh },
6486 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
6487 write_mhpmcounterh },
6488 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
6489 .min_priv_ver = PRIV_VERSION_1_12_0 },
6490
6491 #endif /* !CONFIG_USER_ONLY */
6492 };
6493