xref: /qemu/target/riscv/tcg/tcg-cpu.c (revision 3072961b6edc99abfbd87caac3de29bb58a52ccf)
1 /*
2  * riscv TCG cpu class initialization
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "exec/translation-block.h"
22 #include "tcg-cpu.h"
23 #include "cpu.h"
24 #include "exec/target_page.h"
25 #include "internals.h"
26 #include "pmu.h"
27 #include "time_helper.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/accel.h"
31 #include "qemu/error-report.h"
32 #include "qemu/log.h"
33 #include "accel/accel-cpu-target.h"
34 #include "accel/tcg/cpu-ops.h"
35 #include "tcg/tcg.h"
36 #ifndef CONFIG_USER_ONLY
37 #include "hw/boards.h"
38 #include "system/tcg.h"
39 #include "exec/icount.h"
40 #endif
41 
42 /* Hash that stores user set extensions */
43 static GHashTable *multi_ext_user_opts;
44 static GHashTable *misa_ext_user_opts;
45 
46 static GHashTable *multi_ext_implied_rules;
47 static GHashTable *misa_ext_implied_rules;
48 
cpu_cfg_ext_is_user_set(uint32_t ext_offset)49 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
50 {
51     return g_hash_table_contains(multi_ext_user_opts,
52                                  GUINT_TO_POINTER(ext_offset));
53 }
54 
cpu_misa_ext_is_user_set(uint32_t misa_bit)55 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit)
56 {
57     return g_hash_table_contains(misa_ext_user_opts,
58                                  GUINT_TO_POINTER(misa_bit));
59 }
60 
cpu_cfg_ext_add_user_opt(uint32_t ext_offset,bool value)61 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value)
62 {
63     g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset),
64                         (gpointer)value);
65 }
66 
cpu_misa_ext_add_user_opt(uint32_t bit,bool value)67 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value)
68 {
69     g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit),
70                         (gpointer)value);
71 }
72 
riscv_cpu_write_misa_bit(RISCVCPU * cpu,uint32_t bit,bool enabled)73 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
74                                      bool enabled)
75 {
76     CPURISCVState *env = &cpu->env;
77 
78     if (enabled) {
79         env->misa_ext |= bit;
80         env->misa_ext_mask |= bit;
81     } else {
82         env->misa_ext &= ~bit;
83         env->misa_ext_mask &= ~bit;
84     }
85 }
86 
cpu_priv_ver_to_str(int priv_ver)87 static const char *cpu_priv_ver_to_str(int priv_ver)
88 {
89     const char *priv_spec_str = priv_spec_to_str(priv_ver);
90 
91     g_assert(priv_spec_str);
92 
93     return priv_spec_str;
94 }
95 
riscv_cpu_mmu_index(CPUState * cs,bool ifetch)96 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
97 {
98     return riscv_env_mmu_index(cpu_env(cs), ifetch);
99 }
100 
riscv_get_tb_cpu_state(CPUState * cs)101 static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs)
102 {
103     CPURISCVState *env = cpu_env(cs);
104     RISCVCPU *cpu = env_archcpu(env);
105     RISCVExtStatus fs, vs;
106     uint32_t flags = 0;
107     bool pm_signext = riscv_cpu_virt_mem_enabled(env);
108 
109     if (cpu->cfg.ext_zve32x) {
110         /*
111          * If env->vl equals to VLMAX, we can use generic vector operation
112          * expanders (GVEC) to accerlate the vector operations.
113          * However, as LMUL could be a fractional number. The maximum
114          * vector size can be operated might be less than 8 bytes,
115          * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
116          * only when maxsz >= 8 bytes.
117          */
118 
119         /* lmul encoded as in DisasContext::lmul */
120         int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
121         uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
122         uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
123         uint32_t maxsz = vlmax << vsew;
124         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
125                            (maxsz >= 8);
126         flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
127         flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
128         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
129                            FIELD_EX64(env->vtype, VTYPE, VLMUL));
130         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
131         flags = FIELD_DP32(flags, TB_FLAGS, VTA,
132                            FIELD_EX64(env->vtype, VTYPE, VTA));
133         flags = FIELD_DP32(flags, TB_FLAGS, VMA,
134                            FIELD_EX64(env->vtype, VTYPE, VMA));
135         flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
136     } else {
137         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
138     }
139 
140     if (cpu_get_fcfien(env)) {
141         /*
142          * For Forward CFI, only the expectation of a lpad at
143          * the start of the block is tracked via env->elp. env->elp
144          * is turned on during jalr translation.
145          */
146         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
147         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
148     }
149 
150     if (cpu_get_bcfien(env)) {
151         flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
152     }
153 
154 #ifdef CONFIG_USER_ONLY
155     fs = EXT_STATUS_DIRTY;
156     vs = EXT_STATUS_DIRTY;
157 #else
158     flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
159 
160     flags |= riscv_env_mmu_index(env, 0);
161     fs = get_field(env->mstatus, MSTATUS_FS);
162     vs = get_field(env->mstatus, MSTATUS_VS);
163 
164     if (env->virt_enabled) {
165         flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
166         /*
167          * Merge DISABLED and !DIRTY states using MIN.
168          * We will set both fields when dirtying.
169          */
170         fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
171         vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
172     }
173 
174     /* With Zfinx, floating point is enabled/disabled by Smstateen. */
175     if (!riscv_has_ext(env, RVF)) {
176         fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
177              ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
178     }
179 
180     if (cpu->cfg.debug && !icount_enabled()) {
181         flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
182     }
183 #endif
184 
185     flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
186     flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
187     flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
188     flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
189     flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
190     flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
191 
192     return (TCGTBCPUState){
193         .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc,
194         .flags = flags
195     };
196 }
197 
riscv_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)198 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
199                                           const TranslationBlock *tb)
200 {
201     if (!(tb_cflags(tb) & CF_PCREL)) {
202         RISCVCPU *cpu = RISCV_CPU(cs);
203         CPURISCVState *env = &cpu->env;
204         RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
205 
206         tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
207 
208         if (xl == MXL_RV32) {
209             env->pc = (int32_t) tb->pc;
210         } else {
211             env->pc = tb->pc;
212         }
213     }
214 }
215 
riscv_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)216 static void riscv_restore_state_to_opc(CPUState *cs,
217                                        const TranslationBlock *tb,
218                                        const uint64_t *data)
219 {
220     RISCVCPU *cpu = RISCV_CPU(cs);
221     CPURISCVState *env = &cpu->env;
222     RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
223     target_ulong pc;
224 
225     if (tb_cflags(tb) & CF_PCREL) {
226         pc = (env->pc & TARGET_PAGE_MASK) | data[0];
227     } else {
228         pc = data[0];
229     }
230 
231     if (xl == MXL_RV32) {
232         env->pc = (int32_t)pc;
233     } else {
234         env->pc = pc;
235     }
236     env->bins = data[1];
237     env->excp_uw2 = data[2];
238 }
239 
240 #ifndef CONFIG_USER_ONLY
riscv_pointer_wrap(CPUState * cs,int mmu_idx,vaddr result,vaddr base)241 static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx,
242                                 vaddr result, vaddr base)
243 {
244     CPURISCVState *env = cpu_env(cs);
245     uint32_t pm_len;
246     bool pm_signext;
247 
248     if (cpu_address_xl(env) == MXL_RV32) {
249         return (uint32_t)result;
250     }
251 
252     pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env));
253     if (pm_len == 0) {
254         return result;
255     }
256 
257     pm_signext = riscv_cpu_virt_mem_enabled(env);
258     if (pm_signext) {
259         return sextract64(result, 0, 64 - pm_len);
260     }
261     return extract64(result, 0, 64 - pm_len);
262 }
263 #endif
264 
265 const TCGCPUOps riscv_tcg_ops = {
266     .mttcg_supported = true,
267     .guest_default_memory_order = 0,
268 
269     .initialize = riscv_translate_init,
270     .translate_code = riscv_translate_code,
271     .get_tb_cpu_state = riscv_get_tb_cpu_state,
272     .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
273     .restore_state_to_opc = riscv_restore_state_to_opc,
274     .mmu_index = riscv_cpu_mmu_index,
275 
276 #ifndef CONFIG_USER_ONLY
277     .tlb_fill = riscv_cpu_tlb_fill,
278     .pointer_wrap = riscv_pointer_wrap,
279     .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
280     .cpu_exec_halt = riscv_cpu_has_work,
281     .cpu_exec_reset = cpu_reset,
282     .do_interrupt = riscv_cpu_do_interrupt,
283     .do_transaction_failed = riscv_cpu_do_transaction_failed,
284     .do_unaligned_access = riscv_cpu_do_unaligned_access,
285     .debug_excp_handler = riscv_cpu_debug_excp_handler,
286     .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
287     .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
288 #endif /* !CONFIG_USER_ONLY */
289 };
290 
cpu_cfg_ext_get_min_version(uint32_t ext_offset)291 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset)
292 {
293     const RISCVIsaExtData *edata;
294 
295     for (edata = isa_edata_arr; edata && edata->name; edata++) {
296         if (edata->ext_enable_offset != ext_offset) {
297             continue;
298         }
299 
300         return edata->min_version;
301     }
302 
303     g_assert_not_reached();
304 }
305 
cpu_cfg_ext_get_name(uint32_t ext_offset)306 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset)
307 {
308     const RISCVCPUMultiExtConfig *feat;
309     const RISCVIsaExtData *edata;
310 
311     for (edata = isa_edata_arr; edata->name != NULL; edata++) {
312         if (edata->ext_enable_offset == ext_offset) {
313             return edata->name;
314         }
315     }
316 
317     for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
318         if (feat->offset == ext_offset) {
319             return feat->name;
320         }
321     }
322 
323     g_assert_not_reached();
324 }
325 
cpu_cfg_offset_is_named_feat(uint32_t ext_offset)326 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
327 {
328     const RISCVCPUMultiExtConfig *feat;
329 
330     for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
331         if (feat->offset == ext_offset) {
332             return true;
333         }
334     }
335 
336     return false;
337 }
338 
riscv_cpu_enable_named_feat(RISCVCPU * cpu,uint32_t feat_offset)339 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
340 {
341      /*
342       * All other named features are already enabled
343       * in riscv_tcg_cpu_instance_init().
344       */
345     switch (feat_offset) {
346     case CPU_CFG_OFFSET(ext_zic64b):
347         cpu->cfg.cbom_blocksize = 64;
348         cpu->cfg.cbop_blocksize = 64;
349         cpu->cfg.cboz_blocksize = 64;
350         break;
351     case CPU_CFG_OFFSET(ext_sha):
352         if (!cpu_misa_ext_is_user_set(RVH)) {
353             riscv_cpu_write_misa_bit(cpu, RVH, true);
354         }
355         /* fallthrough */
356     case CPU_CFG_OFFSET(ext_ssstateen):
357         cpu->cfg.ext_smstateen = true;
358         break;
359     }
360 }
361 
cpu_bump_multi_ext_priv_ver(CPURISCVState * env,uint32_t ext_offset)362 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
363                                         uint32_t ext_offset)
364 {
365     int ext_priv_ver;
366 
367     if (env->priv_ver == PRIV_VERSION_LATEST) {
368         return;
369     }
370 
371     ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
372 
373     if (env->priv_ver < ext_priv_ver) {
374         /*
375          * Note: the 'priv_spec' command line option, if present,
376          * will take precedence over this priv_ver bump.
377          */
378         env->priv_ver = ext_priv_ver;
379     }
380 }
381 
cpu_cfg_ext_auto_update(RISCVCPU * cpu,uint32_t ext_offset,bool value)382 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset,
383                                     bool value)
384 {
385     CPURISCVState *env = &cpu->env;
386     bool prev_val = isa_ext_is_enabled(cpu, ext_offset);
387     int min_version;
388 
389     if (prev_val == value) {
390         return;
391     }
392 
393     if (cpu_cfg_ext_is_user_set(ext_offset)) {
394         return;
395     }
396 
397     if (value && env->priv_ver != PRIV_VERSION_LATEST) {
398         /* Do not enable it if priv_ver is older than min_version */
399         min_version = cpu_cfg_ext_get_min_version(ext_offset);
400         if (env->priv_ver < min_version) {
401             return;
402         }
403     }
404 
405     isa_ext_update_enabled(cpu, ext_offset, value);
406 }
407 
riscv_cpu_validate_misa_priv(CPURISCVState * env,Error ** errp)408 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
409 {
410     if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
411         error_setg(errp, "H extension requires priv spec 1.12.0");
412         return;
413     }
414 }
415 
riscv_cpu_validate_v(CPURISCVState * env,RISCVCPUConfig * cfg,Error ** errp)416 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
417                                  Error **errp)
418 {
419     uint32_t vlen = cfg->vlenb << 3;
420 
421     if (vlen > RV_VLEN_MAX || vlen < 128) {
422         error_setg(errp,
423                    "Vector extension implementation only supports VLEN "
424                    "in the range [128, %d]", RV_VLEN_MAX);
425         return;
426     }
427 
428     if (cfg->elen > 64 || cfg->elen < 8) {
429         error_setg(errp,
430                    "Vector extension implementation only supports ELEN "
431                    "in the range [8, 64]");
432         return;
433     }
434 }
435 
riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU * cpu)436 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
437 {
438     CPURISCVState *env = &cpu->env;
439     const RISCVIsaExtData *edata;
440 
441     /* Force disable extensions if priv spec version does not match */
442     for (edata = isa_edata_arr; edata && edata->name; edata++) {
443         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) &&
444             (env->priv_ver < edata->min_version)) {
445             /*
446              * These two extensions are always enabled as they were supported
447              * by QEMU before they were added as extensions in the ISA.
448              */
449             if (!strcmp(edata->name, "zicntr") ||
450                 !strcmp(edata->name, "zihpm")) {
451                 continue;
452             }
453 
454             isa_ext_update_enabled(cpu, edata->ext_enable_offset, false);
455 
456             /*
457              * Do not show user warnings for named features that users
458              * can't enable/disable in the command line. See commit
459              * 68c9e54bea for more info.
460              */
461             if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) {
462                 continue;
463             }
464 #ifndef CONFIG_USER_ONLY
465             warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
466                         " because privilege spec version does not match",
467                         edata->name, env->mhartid);
468 #else
469             warn_report("disabling %s extension because "
470                         "privilege spec version does not match",
471                         edata->name);
472 #endif
473         }
474     }
475 }
476 
riscv_cpu_update_named_features(RISCVCPU * cpu)477 static void riscv_cpu_update_named_features(RISCVCPU *cpu)
478 {
479     if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) {
480         cpu->cfg.has_priv_1_11 = true;
481     }
482 
483     if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) {
484         cpu->cfg.has_priv_1_12 = true;
485     }
486 
487     if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) {
488         cpu->cfg.has_priv_1_13 = true;
489     }
490 
491     cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
492                           cpu->cfg.cbop_blocksize == 64 &&
493                           cpu->cfg.cboz_blocksize == 64;
494 
495     cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen;
496 
497     cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
498                        cpu->cfg.ext_ssstateen;
499 
500     cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11;
501 }
502 
riscv_cpu_validate_g(RISCVCPU * cpu)503 static void riscv_cpu_validate_g(RISCVCPU *cpu)
504 {
505     const char *warn_msg = "RVG mandates disabled extension %s";
506     uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD};
507     bool send_warn = cpu_misa_ext_is_user_set(RVG);
508 
509     for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) {
510         uint32_t bit = g_misa_bits[i];
511 
512         if (riscv_has_ext(&cpu->env, bit)) {
513             continue;
514         }
515 
516         if (!cpu_misa_ext_is_user_set(bit)) {
517             riscv_cpu_write_misa_bit(cpu, bit, true);
518             continue;
519         }
520 
521         if (send_warn) {
522             warn_report(warn_msg, riscv_get_misa_ext_name(bit));
523         }
524     }
525 
526     if (!cpu->cfg.ext_zicsr) {
527         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) {
528             cpu->cfg.ext_zicsr = true;
529         } else if (send_warn) {
530             warn_report(warn_msg, "zicsr");
531         }
532     }
533 
534     if (!cpu->cfg.ext_zifencei) {
535         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) {
536             cpu->cfg.ext_zifencei = true;
537         } else if (send_warn) {
538             warn_report(warn_msg, "zifencei");
539         }
540     }
541 }
542 
riscv_cpu_validate_b(RISCVCPU * cpu)543 static void riscv_cpu_validate_b(RISCVCPU *cpu)
544 {
545     const char *warn_msg = "RVB mandates disabled extension %s";
546 
547     if (!cpu->cfg.ext_zba) {
548         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) {
549             cpu->cfg.ext_zba = true;
550         } else {
551             warn_report(warn_msg, "zba");
552         }
553     }
554 
555     if (!cpu->cfg.ext_zbb) {
556         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) {
557             cpu->cfg.ext_zbb = true;
558         } else {
559             warn_report(warn_msg, "zbb");
560         }
561     }
562 
563     if (!cpu->cfg.ext_zbs) {
564         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) {
565             cpu->cfg.ext_zbs = true;
566         } else {
567             warn_report(warn_msg, "zbs");
568         }
569     }
570 }
571 
572 /*
573  * Check consistency between chosen extensions while setting
574  * cpu->cfg accordingly.
575  */
riscv_cpu_validate_set_extensions(RISCVCPU * cpu,Error ** errp)576 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
577 {
578     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
579     CPURISCVState *env = &cpu->env;
580     Error *local_err = NULL;
581 
582     if (riscv_has_ext(env, RVG)) {
583         riscv_cpu_validate_g(cpu);
584     }
585 
586     if (riscv_has_ext(env, RVB)) {
587         riscv_cpu_validate_b(cpu);
588     }
589 
590     if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
591         error_setg(errp,
592                    "I and E extensions are incompatible");
593         return;
594     }
595 
596     if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
597         error_setg(errp,
598                    "Either I or E extension must be set");
599         return;
600     }
601 
602     if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
603         error_setg(errp,
604                    "Setting S extension without U extension is illegal");
605         return;
606     }
607 
608     if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
609         error_setg(errp,
610                    "H depends on an I base integer ISA with 32 x registers");
611         return;
612     }
613 
614     if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
615         error_setg(errp, "H extension implicitly requires S-mode");
616         return;
617     }
618 
619     if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) {
620         error_setg(errp, "F extension requires Zicsr");
621         return;
622     }
623 
624     if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) {
625         error_setg(errp, "Zacas extension requires A extension");
626         return;
627     }
628 
629     if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
630         error_setg(errp, "Zawrs extension requires A extension");
631         return;
632     }
633 
634     if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
635         error_setg(errp, "Zfa extension requires F extension");
636         return;
637     }
638 
639     if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
640         error_setg(errp, "Zfh/Zfhmin extensions require F extension");
641         return;
642     }
643 
644     if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
645         error_setg(errp, "Zfbfmin extension depends on F extension");
646         return;
647     }
648 
649     if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
650         error_setg(errp, "D extension requires F extension");
651         return;
652     }
653 
654     if (riscv_has_ext(env, RVV)) {
655         riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
656         if (local_err != NULL) {
657             error_propagate(errp, local_err);
658             return;
659         }
660     }
661 
662     /* The Zve64d extension depends on the Zve64f extension */
663     if (cpu->cfg.ext_zve64d) {
664         if (!riscv_has_ext(env, RVD)) {
665             error_setg(errp, "Zve64d/V extensions require D extension");
666             return;
667         }
668     }
669 
670     /* The Zve32f extension depends on the Zve32x extension */
671     if (cpu->cfg.ext_zve32f) {
672         if (!riscv_has_ext(env, RVF)) {
673             error_setg(errp, "Zve32f/Zve64f extensions require F extension");
674             return;
675         }
676     }
677 
678     if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
679         error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
680         return;
681     }
682 
683     if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
684         error_setg(errp, "Zvfh extensions requires Zfhmin extension");
685         return;
686     }
687 
688     if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
689         error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
690         return;
691     }
692 
693     if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
694         error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
695         return;
696     }
697 
698     if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
699         error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
700         return;
701     }
702 
703     if (cpu->cfg.ext_zfinx) {
704         if (!cpu->cfg.ext_zicsr) {
705             error_setg(errp, "Zfinx extension requires Zicsr");
706             return;
707         }
708         if (riscv_has_ext(env, RVF)) {
709             error_setg(errp,
710                        "Zfinx cannot be supported together with F extension");
711             return;
712         }
713     }
714 
715     if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) {
716         error_setg(errp, "Zcmop extensions require Zca");
717         return;
718     }
719 
720     if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
721         error_setg(errp, "Zcf extension is only relevant to RV32");
722         return;
723     }
724 
725     if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
726         error_setg(errp, "Zcf extension requires F extension");
727         return;
728     }
729 
730     if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
731         error_setg(errp, "Zcd extension requires D extension");
732         return;
733     }
734 
735     if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
736          cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
737         error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
738                          "extension");
739         return;
740     }
741 
742     if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
743         error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
744                          "Zcd extension");
745         return;
746     }
747 
748     if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) {
749         error_setg(errp, "Zcmt extension requires Zicsr extension");
750         return;
751     }
752 
753     if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
754          cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
755          cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) {
756         error_setg(errp,
757                    "Vector crypto extensions require V or Zve* extensions");
758         return;
759     }
760 
761     if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) {
762         error_setg(
763             errp,
764             "Zvbc and Zvknhb extensions require V or Zve64x extensions");
765         return;
766     }
767 
768     if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) {
769         if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) {
770             error_setg(errp, "zicntr requires zicsr");
771             return;
772         }
773         cpu->cfg.ext_zicntr = false;
774     }
775 
776     if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) {
777         if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) {
778             error_setg(errp, "zihpm requires zicsr");
779             return;
780         }
781         cpu->cfg.ext_zihpm = false;
782     }
783 
784     if (cpu->cfg.ext_zicfiss) {
785         if (!cpu->cfg.ext_zicsr) {
786             error_setg(errp, "zicfiss extension requires zicsr extension");
787             return;
788         }
789         if (!riscv_has_ext(env, RVA)) {
790             error_setg(errp, "zicfiss extension requires A extension");
791             return;
792         }
793         if (!riscv_has_ext(env, RVS)) {
794             error_setg(errp, "zicfiss extension requires S");
795             return;
796         }
797         if (!cpu->cfg.ext_zimop) {
798             error_setg(errp, "zicfiss extension requires zimop extension");
799             return;
800         }
801         if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) {
802             error_setg(errp, "zicfiss with zca requires zcmop extension");
803             return;
804         }
805     }
806 
807     if (!cpu->cfg.ext_zihpm) {
808         cpu->cfg.pmu_mask = 0;
809         cpu->pmu_avail_ctrs = 0;
810     }
811 
812     if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) {
813         error_setg(errp, "zicfilp extension requires zicsr extension");
814         return;
815     }
816 
817     if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) {
818         error_setg(errp, "svukte is not supported for RV32");
819         return;
820     }
821 
822     if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) &&
823         (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) {
824         if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) ||
825             cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) {
826             error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind");
827             return;
828         }
829         cpu->cfg.ext_smctr = false;
830         cpu->cfg.ext_ssctr = false;
831     }
832 
833     /*
834      * Disable isa extensions based on priv spec after we
835      * validated and set everything we need.
836      */
837     riscv_cpu_disable_priv_spec_isa_exts(cpu);
838 }
839 
840 #ifndef CONFIG_USER_ONLY
riscv_cpu_validate_profile_satp(RISCVCPU * cpu,RISCVCPUProfile * profile,bool send_warn)841 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
842                                             RISCVCPUProfile *profile,
843                                             bool send_warn)
844 {
845     int satp_max = cpu->cfg.max_satp_mode;
846 
847     assert(satp_max >= 0);
848     if (profile->satp_mode > satp_max) {
849         if (send_warn) {
850             bool is_32bit = riscv_cpu_is_32bit(cpu);
851             const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit);
852             const char *cur_satp = satp_mode_str(satp_max, is_32bit);
853 
854             warn_report("Profile %s requires satp mode %s, "
855                         "but satp mode %s was set", profile->name,
856                         req_satp, cur_satp);
857         }
858 
859         return false;
860     }
861 
862     return true;
863 }
864 #endif
865 
riscv_cpu_check_parent_profile(RISCVCPU * cpu,RISCVCPUProfile * profile,RISCVCPUProfile * parent)866 static void riscv_cpu_check_parent_profile(RISCVCPU *cpu,
867                                            RISCVCPUProfile *profile,
868                                            RISCVCPUProfile *parent)
869 {
870     const char *parent_name;
871     bool parent_enabled;
872 
873     if (!profile->enabled || !parent) {
874         return;
875     }
876 
877     parent_name = parent->name;
878     parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL);
879     profile->enabled = parent_enabled;
880 }
881 
riscv_cpu_validate_profile(RISCVCPU * cpu,RISCVCPUProfile * profile)882 static void riscv_cpu_validate_profile(RISCVCPU *cpu,
883                                        RISCVCPUProfile *profile)
884 {
885     CPURISCVState *env = &cpu->env;
886     const char *warn_msg = "Profile %s mandates disabled extension %s";
887     bool send_warn = profile->user_set && profile->enabled;
888     bool profile_impl = true;
889     int i;
890 
891 #ifndef CONFIG_USER_ONLY
892     if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
893         profile_impl = riscv_cpu_validate_profile_satp(cpu, profile,
894                                                        send_warn);
895     }
896 #endif
897 
898     if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
899         profile->priv_spec > env->priv_ver) {
900         profile_impl = false;
901 
902         if (send_warn) {
903             warn_report("Profile %s requires priv spec %s, "
904                         "but priv ver %s was set", profile->name,
905                         cpu_priv_ver_to_str(profile->priv_spec),
906                         cpu_priv_ver_to_str(env->priv_ver));
907         }
908     }
909 
910     for (i = 0; misa_bits[i] != 0; i++) {
911         uint32_t bit = misa_bits[i];
912 
913         if (!(profile->misa_ext & bit)) {
914             continue;
915         }
916 
917         if (!riscv_has_ext(&cpu->env, bit)) {
918             profile_impl = false;
919 
920             if (send_warn) {
921                 warn_report(warn_msg, profile->name,
922                             riscv_get_misa_ext_name(bit));
923             }
924         }
925     }
926 
927     for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
928         int ext_offset = profile->ext_offsets[i];
929 
930         if (!isa_ext_is_enabled(cpu, ext_offset)) {
931             profile_impl = false;
932 
933             if (send_warn) {
934                 warn_report(warn_msg, profile->name,
935                             cpu_cfg_ext_get_name(ext_offset));
936             }
937         }
938     }
939 
940     profile->enabled = profile_impl;
941 
942     riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent);
943     riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent);
944 }
945 
riscv_cpu_validate_profiles(RISCVCPU * cpu)946 static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
947 {
948     for (int i = 0; riscv_profiles[i] != NULL; i++) {
949         riscv_cpu_validate_profile(cpu, riscv_profiles[i]);
950     }
951 }
952 
riscv_cpu_init_implied_exts_rules(void)953 static void riscv_cpu_init_implied_exts_rules(void)
954 {
955     RISCVCPUImpliedExtsRule *rule;
956 #ifndef CONFIG_USER_ONLY
957     MachineState *ms = MACHINE(qdev_get_machine());
958 #endif
959     static bool initialized;
960     int i;
961 
962     /* Implied rules only need to be initialized once. */
963     if (initialized) {
964         return;
965     }
966 
967     for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
968 #ifndef CONFIG_USER_ONLY
969         rule->enabled = bitmap_new(ms->smp.cpus);
970 #endif
971         g_hash_table_insert(misa_ext_implied_rules,
972                             GUINT_TO_POINTER(rule->ext), (gpointer)rule);
973     }
974 
975     for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
976 #ifndef CONFIG_USER_ONLY
977         rule->enabled = bitmap_new(ms->smp.cpus);
978 #endif
979         g_hash_table_insert(multi_ext_implied_rules,
980                             GUINT_TO_POINTER(rule->ext), (gpointer)rule);
981     }
982 
983     initialized = true;
984 }
985 
cpu_enable_implied_rule(RISCVCPU * cpu,RISCVCPUImpliedExtsRule * rule)986 static void cpu_enable_implied_rule(RISCVCPU *cpu,
987                                     RISCVCPUImpliedExtsRule *rule)
988 {
989     CPURISCVState *env = &cpu->env;
990     RISCVCPUImpliedExtsRule *ir;
991     bool enabled = false;
992     int i;
993 
994 #ifndef CONFIG_USER_ONLY
995     enabled = test_bit(cpu->env.mhartid, rule->enabled);
996 #endif
997 
998     if (!enabled) {
999         /* Enable the implied MISAs. */
1000         if (rule->implied_misa_exts) {
1001             for (i = 0; misa_bits[i] != 0; i++) {
1002                 if (rule->implied_misa_exts & misa_bits[i]) {
1003                     /*
1004                      * If the user disabled the misa_bit do not re-enable it
1005                      * and do not apply any implied rules related to it.
1006                      */
1007                     if (cpu_misa_ext_is_user_set(misa_bits[i]) &&
1008                         !(env->misa_ext & misa_bits[i])) {
1009                         continue;
1010                     }
1011 
1012                     riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]);
1013                     ir = g_hash_table_lookup(misa_ext_implied_rules,
1014                                              GUINT_TO_POINTER(misa_bits[i]));
1015 
1016                     if (ir) {
1017                         cpu_enable_implied_rule(cpu, ir);
1018                     }
1019                 }
1020             }
1021         }
1022 
1023         /* Enable the implied extensions. */
1024         for (i = 0;
1025              rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) {
1026             cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true);
1027 
1028             ir = g_hash_table_lookup(multi_ext_implied_rules,
1029                                      GUINT_TO_POINTER(
1030                                          rule->implied_multi_exts[i]));
1031 
1032             if (ir) {
1033                 cpu_enable_implied_rule(cpu, ir);
1034             }
1035         }
1036 
1037 #ifndef CONFIG_USER_ONLY
1038         bitmap_set(rule->enabled, cpu->env.mhartid, 1);
1039 #endif
1040     }
1041 }
1042 
1043 /* Zc extension has special implied rules that need to be handled separately. */
cpu_enable_zc_implied_rules(RISCVCPU * cpu)1044 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
1045 {
1046     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1047     CPURISCVState *env = &cpu->env;
1048 
1049     if (cpu->cfg.ext_zce) {
1050         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
1051         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
1052         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
1053         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
1054 
1055         if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
1056             cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
1057         }
1058     }
1059 
1060     /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */
1061     if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
1062         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
1063 
1064         if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
1065             cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
1066         }
1067 
1068         if (riscv_has_ext(env, RVD)) {
1069             cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true);
1070         }
1071     }
1072 }
1073 
riscv_cpu_enable_implied_rules(RISCVCPU * cpu)1074 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu)
1075 {
1076     RISCVCPUImpliedExtsRule *rule;
1077     int i;
1078 
1079     /* Enable the implied extensions for Zc. */
1080     cpu_enable_zc_implied_rules(cpu);
1081 
1082     /* Enable the implied MISAs. */
1083     for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
1084         if (riscv_has_ext(&cpu->env, rule->ext)) {
1085             cpu_enable_implied_rule(cpu, rule);
1086         }
1087     }
1088 
1089     /* Enable the implied extensions. */
1090     for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
1091         if (isa_ext_is_enabled(cpu, rule->ext)) {
1092             cpu_enable_implied_rule(cpu, rule);
1093         }
1094     }
1095 }
1096 
riscv_tcg_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1097 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1098 {
1099     CPURISCVState *env = &cpu->env;
1100     Error *local_err = NULL;
1101 
1102     riscv_cpu_init_implied_exts_rules();
1103     riscv_cpu_enable_implied_rules(cpu);
1104 
1105     riscv_cpu_validate_misa_priv(env, &local_err);
1106     if (local_err != NULL) {
1107         error_propagate(errp, local_err);
1108         return;
1109     }
1110 
1111     riscv_cpu_update_named_features(cpu);
1112     riscv_cpu_validate_profiles(cpu);
1113 
1114     if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
1115         /*
1116          * Enhanced PMP should only be available
1117          * on harts with PMP support
1118          */
1119         error_setg(errp, "Invalid configuration: Smepmp requires PMP support");
1120         return;
1121     }
1122 
1123     riscv_cpu_validate_set_extensions(cpu, &local_err);
1124     if (local_err != NULL) {
1125         error_propagate(errp, local_err);
1126         return;
1127     }
1128 #ifndef CONFIG_USER_ONLY
1129     if (cpu->cfg.pmu_mask) {
1130         riscv_pmu_init(cpu, &local_err);
1131         if (local_err != NULL) {
1132             error_propagate(errp, local_err);
1133             return;
1134         }
1135 
1136         if (cpu->cfg.ext_sscofpmf) {
1137             cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1138                                           riscv_pmu_timer_cb, cpu);
1139         }
1140     }
1141 #endif
1142 }
1143 
riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU * cpu)1144 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
1145 {
1146     GPtrArray *dynamic_decoders;
1147     dynamic_decoders = g_ptr_array_sized_new(decoder_table_size);
1148     for (size_t i = 0; i < decoder_table_size; ++i) {
1149         if (decoder_table[i].guard_func &&
1150             decoder_table[i].guard_func(&cpu->cfg)) {
1151             g_ptr_array_add(dynamic_decoders,
1152                             (gpointer)decoder_table[i].riscv_cpu_decode_fn);
1153         }
1154     }
1155 
1156     cpu->decoders = dynamic_decoders;
1157 }
1158 
riscv_cpu_tcg_compatible(RISCVCPU * cpu)1159 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu)
1160 {
1161     return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL;
1162 }
1163 
riscv_cpu_is_generic(Object * cpu_obj)1164 static bool riscv_cpu_is_generic(Object *cpu_obj)
1165 {
1166     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1167 }
1168 
1169 /*
1170  * We'll get here via the following path:
1171  *
1172  * riscv_cpu_realize()
1173  *   -> cpu_exec_realizefn()
1174  *      -> tcg_cpu_realize() (via accel_cpu_common_realize())
1175  */
riscv_tcg_cpu_realize(CPUState * cs,Error ** errp)1176 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
1177 {
1178     RISCVCPU *cpu = RISCV_CPU(cs);
1179 
1180     if (!riscv_cpu_tcg_compatible(cpu)) {
1181         g_autofree char *name = riscv_cpu_get_name(cpu);
1182         error_setg(errp, "'%s' CPU is not compatible with TCG acceleration",
1183                    name);
1184         return false;
1185     }
1186 
1187 #ifndef CONFIG_USER_ONLY
1188     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1189 
1190     if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) {
1191         /* Missing 128-bit aligned atomics */
1192         error_setg(errp,
1193                    "128-bit RISC-V currently does not work with Multi "
1194                    "Threaded TCG. Please use: -accel tcg,thread=single");
1195         return false;
1196     }
1197 
1198     CPURISCVState *env = &cpu->env;
1199 
1200     tcg_cflags_set(CPU(cs), CF_PCREL);
1201 
1202     if (cpu->cfg.ext_sstc) {
1203         riscv_timer_init(cpu);
1204     }
1205 
1206     /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
1207     if (riscv_has_ext(env, RVH)) {
1208         env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
1209     }
1210 #endif
1211 
1212     return true;
1213 }
1214 
1215 typedef struct RISCVCPUMisaExtConfig {
1216     target_ulong misa_bit;
1217     bool enabled;
1218 } RISCVCPUMisaExtConfig;
1219 
cpu_set_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1220 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1221                                  void *opaque, Error **errp)
1222 {
1223     const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1224     target_ulong misa_bit = misa_ext_cfg->misa_bit;
1225     RISCVCPU *cpu = RISCV_CPU(obj);
1226     CPURISCVState *env = &cpu->env;
1227     bool vendor_cpu = riscv_cpu_is_vendor(obj);
1228     bool prev_val, value;
1229 
1230     if (!visit_type_bool(v, name, &value, errp)) {
1231         return;
1232     }
1233 
1234     cpu_misa_ext_add_user_opt(misa_bit, value);
1235 
1236     prev_val = env->misa_ext & misa_bit;
1237 
1238     if (value == prev_val) {
1239         return;
1240     }
1241 
1242     if (value) {
1243         if (vendor_cpu) {
1244             g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1245             error_setg(errp, "'%s' CPU does not allow enabling extensions",
1246                        cpuname);
1247             return;
1248         }
1249 
1250         if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) {
1251             /*
1252              * Note: the 'priv_spec' command line option, if present,
1253              * will take precedence over this priv_ver bump.
1254              */
1255             env->priv_ver = PRIV_VERSION_1_12_0;
1256         }
1257     }
1258 
1259     riscv_cpu_write_misa_bit(cpu, misa_bit, value);
1260 }
1261 
cpu_get_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1262 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1263                                  void *opaque, Error **errp)
1264 {
1265     const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1266     target_ulong misa_bit = misa_ext_cfg->misa_bit;
1267     RISCVCPU *cpu = RISCV_CPU(obj);
1268     CPURISCVState *env = &cpu->env;
1269     bool value;
1270 
1271     value = env->misa_ext & misa_bit;
1272 
1273     visit_type_bool(v, name, &value, errp);
1274 }
1275 
1276 #define MISA_CFG(_bit, _enabled) \
1277     {.misa_bit = _bit, .enabled = _enabled}
1278 
1279 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
1280     MISA_CFG(RVA, true),
1281     MISA_CFG(RVC, true),
1282     MISA_CFG(RVD, true),
1283     MISA_CFG(RVF, true),
1284     MISA_CFG(RVI, true),
1285     MISA_CFG(RVE, false),
1286     MISA_CFG(RVM, true),
1287     MISA_CFG(RVS, true),
1288     MISA_CFG(RVU, true),
1289     MISA_CFG(RVH, true),
1290     MISA_CFG(RVV, false),
1291     MISA_CFG(RVG, false),
1292     MISA_CFG(RVB, false),
1293 };
1294 
1295 /*
1296  * We do not support user choice tracking for MISA
1297  * extensions yet because, so far, we do not silently
1298  * change MISA bits during realize() (RVG enables MISA
1299  * bits but the user is warned about it).
1300  */
riscv_cpu_add_misa_properties(Object * cpu_obj)1301 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1302 {
1303     bool use_def_vals = riscv_cpu_is_generic(cpu_obj);
1304     int i;
1305 
1306     for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1307         const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1308         int bit = misa_cfg->misa_bit;
1309         const char *name = riscv_get_misa_ext_name(bit);
1310         const char *desc = riscv_get_misa_ext_description(bit);
1311 
1312         /* Check if KVM already created the property */
1313         if (object_property_find(cpu_obj, name)) {
1314             continue;
1315         }
1316 
1317         object_property_add(cpu_obj, name, "bool",
1318                             cpu_get_misa_ext_cfg,
1319                             cpu_set_misa_ext_cfg,
1320                             NULL, (void *)misa_cfg);
1321         object_property_set_description(cpu_obj, name, desc);
1322         if (use_def_vals) {
1323             riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit,
1324                                      misa_cfg->enabled);
1325         }
1326     }
1327 }
1328 
cpu_set_profile(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1329 static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
1330                             void *opaque, Error **errp)
1331 {
1332     RISCVCPUProfile *profile = opaque;
1333     RISCVCPU *cpu = RISCV_CPU(obj);
1334     bool value;
1335     int i, ext_offset;
1336 
1337     if (riscv_cpu_is_vendor(obj)) {
1338         error_setg(errp, "Profile %s is not available for vendor CPUs",
1339                    profile->name);
1340         return;
1341     }
1342 
1343     if (cpu->env.misa_mxl != MXL_RV64) {
1344         error_setg(errp, "Profile %s only available for 64 bit CPUs",
1345                    profile->name);
1346         return;
1347     }
1348 
1349     if (!visit_type_bool(v, name, &value, errp)) {
1350         return;
1351     }
1352 
1353     profile->user_set = true;
1354     profile->enabled = value;
1355 
1356     if (profile->u_parent != NULL) {
1357         object_property_set_bool(obj, profile->u_parent->name,
1358                                  profile->enabled, NULL);
1359     }
1360 
1361     if (profile->s_parent != NULL) {
1362         object_property_set_bool(obj, profile->s_parent->name,
1363                                  profile->enabled, NULL);
1364     }
1365 
1366     if (profile->enabled) {
1367         cpu->env.priv_ver = profile->priv_spec;
1368     }
1369 
1370 #ifndef CONFIG_USER_ONLY
1371     if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
1372         object_property_set_bool(obj, "mmu", true, NULL);
1373         const char *satp_prop = satp_mode_str(profile->satp_mode,
1374                                               riscv_cpu_is_32bit(cpu));
1375         object_property_set_bool(obj, satp_prop, profile->enabled, NULL);
1376     }
1377 #endif
1378 
1379     for (i = 0; misa_bits[i] != 0; i++) {
1380         uint32_t bit = misa_bits[i];
1381 
1382         if  (!(profile->misa_ext & bit)) {
1383             continue;
1384         }
1385 
1386         if (bit == RVI && !profile->enabled) {
1387             /*
1388              * Disabling profiles will not disable the base
1389              * ISA RV64I.
1390              */
1391             continue;
1392         }
1393 
1394         cpu_misa_ext_add_user_opt(bit, profile->enabled);
1395         riscv_cpu_write_misa_bit(cpu, bit, profile->enabled);
1396     }
1397 
1398     for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
1399         ext_offset = profile->ext_offsets[i];
1400 
1401         if (profile->enabled) {
1402             if (cpu_cfg_offset_is_named_feat(ext_offset)) {
1403                 riscv_cpu_enable_named_feat(cpu, ext_offset);
1404             }
1405 
1406             cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset);
1407         }
1408 
1409         cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled);
1410         isa_ext_update_enabled(cpu, ext_offset, profile->enabled);
1411     }
1412 }
1413 
cpu_get_profile(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1414 static void cpu_get_profile(Object *obj, Visitor *v, const char *name,
1415                             void *opaque, Error **errp)
1416 {
1417     RISCVCPUProfile *profile = opaque;
1418     bool value = profile->enabled;
1419 
1420     visit_type_bool(v, name, &value, errp);
1421 }
1422 
riscv_cpu_add_profiles(Object * cpu_obj)1423 static void riscv_cpu_add_profiles(Object *cpu_obj)
1424 {
1425     for (int i = 0; riscv_profiles[i] != NULL; i++) {
1426         const RISCVCPUProfile *profile = riscv_profiles[i];
1427 
1428         object_property_add(cpu_obj, profile->name, "bool",
1429                             cpu_get_profile, cpu_set_profile,
1430                             NULL, (void *)profile);
1431 
1432         /*
1433          * CPUs might enable a profile right from the start.
1434          * Enable its mandatory extensions right away in this
1435          * case.
1436          */
1437         if (profile->enabled) {
1438             object_property_set_bool(cpu_obj, profile->name, true, NULL);
1439         }
1440     }
1441 }
1442 
cpu_ext_is_deprecated(const char * ext_name)1443 static bool cpu_ext_is_deprecated(const char *ext_name)
1444 {
1445     return isupper(ext_name[0]);
1446 }
1447 
1448 /*
1449  * String will be allocated in the heap. Caller is responsible
1450  * for freeing it.
1451  */
cpu_ext_to_lower(const char * ext_name)1452 static char *cpu_ext_to_lower(const char *ext_name)
1453 {
1454     char *ret = g_malloc0(strlen(ext_name) + 1);
1455 
1456     strcpy(ret, ext_name);
1457     ret[0] = tolower(ret[0]);
1458 
1459     return ret;
1460 }
1461 
cpu_set_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1462 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1463                                   void *opaque, Error **errp)
1464 {
1465     const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1466     RISCVCPU *cpu = RISCV_CPU(obj);
1467     bool vendor_cpu = riscv_cpu_is_vendor(obj);
1468     bool prev_val, value;
1469 
1470     if (!visit_type_bool(v, name, &value, errp)) {
1471         return;
1472     }
1473 
1474     if (cpu_ext_is_deprecated(multi_ext_cfg->name)) {
1475         g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name);
1476 
1477         warn_report("CPU property '%s' is deprecated. Please use '%s' instead",
1478                     multi_ext_cfg->name, lower);
1479     }
1480 
1481     cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value);
1482 
1483     prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset);
1484 
1485     if (value == prev_val) {
1486         return;
1487     }
1488 
1489     if (value && vendor_cpu) {
1490         g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1491         error_setg(errp, "'%s' CPU does not allow enabling extensions",
1492                    cpuname);
1493         return;
1494     }
1495 
1496     if (value) {
1497         cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset);
1498     }
1499 
1500     isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value);
1501 }
1502 
cpu_get_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1503 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1504                                   void *opaque, Error **errp)
1505 {
1506     const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1507     bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset);
1508 
1509     visit_type_bool(v, name, &value, errp);
1510 }
1511 
cpu_add_multi_ext_prop(Object * cpu_obj,const RISCVCPUMultiExtConfig * multi_cfg)1512 static void cpu_add_multi_ext_prop(Object *cpu_obj,
1513                                    const RISCVCPUMultiExtConfig *multi_cfg)
1514 {
1515     bool generic_cpu = riscv_cpu_is_generic(cpu_obj);
1516     bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name);
1517 
1518     object_property_add(cpu_obj, multi_cfg->name, "bool",
1519                         cpu_get_multi_ext_cfg,
1520                         cpu_set_multi_ext_cfg,
1521                         NULL, (void *)multi_cfg);
1522 
1523     if (!generic_cpu || deprecated_ext) {
1524         return;
1525     }
1526 
1527     /*
1528      * Set def val directly instead of using
1529      * object_property_set_bool() to save the set()
1530      * callback hash for user inputs.
1531      */
1532     isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset,
1533                            multi_cfg->enabled);
1534 }
1535 
riscv_cpu_add_multiext_prop_array(Object * obj,const RISCVCPUMultiExtConfig * array)1536 static void riscv_cpu_add_multiext_prop_array(Object *obj,
1537                                         const RISCVCPUMultiExtConfig *array)
1538 {
1539     const RISCVCPUMultiExtConfig *prop;
1540 
1541     g_assert(array);
1542 
1543     for (prop = array; prop && prop->name; prop++) {
1544         cpu_add_multi_ext_prop(obj, prop);
1545     }
1546 }
1547 
1548 /*
1549  * Add CPU properties with user-facing flags.
1550  *
1551  * This will overwrite existing env->misa_ext values with the
1552  * defaults set via riscv_cpu_add_misa_properties().
1553  */
riscv_cpu_add_user_properties(Object * obj)1554 static void riscv_cpu_add_user_properties(Object *obj)
1555 {
1556 #ifndef CONFIG_USER_ONLY
1557     riscv_add_satp_mode_properties(obj);
1558 #endif
1559 
1560     riscv_cpu_add_misa_properties(obj);
1561 
1562     riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions);
1563     riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts);
1564     riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts);
1565 
1566     riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts);
1567 
1568     riscv_cpu_add_profiles(obj);
1569 }
1570 
1571 /*
1572  * The 'max' type CPU will have all possible ratified
1573  * non-vendor extensions enabled.
1574  */
riscv_init_max_cpu_extensions(Object * obj)1575 static void riscv_init_max_cpu_extensions(Object *obj)
1576 {
1577     RISCVCPU *cpu = RISCV_CPU(obj);
1578     CPURISCVState *env = &cpu->env;
1579     const RISCVCPUMultiExtConfig *prop;
1580 
1581     /* Enable RVG and RVV that are disabled by default */
1582     riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV);
1583 
1584     for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1585         isa_ext_update_enabled(cpu, prop->offset, true);
1586     }
1587 
1588     /*
1589      * Some extensions can't be added without backward compatibilty concerns.
1590      * Disable those, the user can still opt in to them on the command line.
1591      */
1592     cpu->cfg.ext_svade = false;
1593 
1594     /* set vector version */
1595     env->vext_ver = VEXT_VERSION_1_00_0;
1596 
1597     /* Zfinx is not compatible with F. Disable it */
1598     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false);
1599     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false);
1600     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false);
1601     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false);
1602 
1603     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false);
1604     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false);
1605     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false);
1606 
1607     if (env->misa_mxl != MXL_RV32) {
1608         isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
1609     }
1610 
1611     /*
1612      * TODO: ext_smrnmi requires OpenSBI changes that our current
1613      * image does not have. Disable it for now.
1614      */
1615     if (cpu->cfg.ext_smrnmi) {
1616         isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
1617     }
1618 
1619     /*
1620      * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup
1621      * to avoid generating a double trap. OpenSBI does not currently support it,
1622      * disable it for now.
1623      */
1624     if (cpu->cfg.ext_smdbltrp) {
1625         isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
1626     }
1627 }
1628 
riscv_cpu_has_max_extensions(Object * cpu_obj)1629 static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
1630 {
1631     return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL;
1632 }
1633 
riscv_tcg_cpu_instance_init(CPUState * cs)1634 static void riscv_tcg_cpu_instance_init(CPUState *cs)
1635 {
1636     RISCVCPU *cpu = RISCV_CPU(cs);
1637     Object *obj = OBJECT(cpu);
1638 
1639     misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1640     multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1641 
1642     if (!misa_ext_implied_rules) {
1643         misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
1644     }
1645 
1646     if (!multi_ext_implied_rules) {
1647         multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
1648     }
1649 
1650     riscv_cpu_add_user_properties(obj);
1651 
1652     if (riscv_cpu_has_max_extensions(obj)) {
1653         riscv_init_max_cpu_extensions(obj);
1654     }
1655 }
1656 
riscv_tcg_cpu_accel_class_init(ObjectClass * oc,const void * data)1657 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data)
1658 {
1659     AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
1660 
1661     acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
1662     acc->cpu_target_realize = riscv_tcg_cpu_realize;
1663 }
1664 
1665 static const TypeInfo riscv_tcg_cpu_accel_type_info = {
1666     .name = ACCEL_CPU_NAME("tcg"),
1667 
1668     .parent = TYPE_ACCEL_CPU,
1669     .class_init = riscv_tcg_cpu_accel_class_init,
1670     .abstract = true,
1671 };
1672 
riscv_tcg_cpu_accel_register_types(void)1673 static void riscv_tcg_cpu_accel_register_types(void)
1674 {
1675     type_register_static(&riscv_tcg_cpu_accel_type_info);
1676 }
1677 type_init(riscv_tcg_cpu_accel_register_types);
1678