xref: /qemu/target/riscv/tcg/tcg-cpu.c (revision e240f6cc25917f3138d9e95e0343ae23b63a3f8c)
1 /*
2  * riscv TCG cpu class initialization
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "exec/translation-block.h"
22 #include "tcg-cpu.h"
23 #include "cpu.h"
24 #include "exec/target_page.h"
25 #include "internals.h"
26 #include "pmu.h"
27 #include "time_helper.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/accel.h"
31 #include "qemu/error-report.h"
32 #include "qemu/log.h"
33 #include "accel/accel-cpu-target.h"
34 #include "accel/tcg/cpu-ops.h"
35 #include "tcg/tcg.h"
36 #ifndef CONFIG_USER_ONLY
37 #include "hw/boards.h"
38 #include "system/tcg.h"
39 #include "exec/icount.h"
40 #endif
41 
42 /* Hash that stores user set extensions */
43 static GHashTable *multi_ext_user_opts;
44 static GHashTable *misa_ext_user_opts;
45 
46 static GHashTable *multi_ext_implied_rules;
47 static GHashTable *misa_ext_implied_rules;
48 
cpu_cfg_ext_is_user_set(uint32_t ext_offset)49 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
50 {
51     return g_hash_table_contains(multi_ext_user_opts,
52                                  GUINT_TO_POINTER(ext_offset));
53 }
54 
cpu_misa_ext_is_user_set(uint32_t misa_bit)55 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit)
56 {
57     return g_hash_table_contains(misa_ext_user_opts,
58                                  GUINT_TO_POINTER(misa_bit));
59 }
60 
cpu_cfg_ext_add_user_opt(uint32_t ext_offset,bool value)61 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value)
62 {
63     g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset),
64                         (gpointer)value);
65 }
66 
cpu_misa_ext_add_user_opt(uint32_t bit,bool value)67 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value)
68 {
69     g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit),
70                         (gpointer)value);
71 }
72 
riscv_cpu_write_misa_bit(RISCVCPU * cpu,uint32_t bit,bool enabled)73 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
74                                      bool enabled)
75 {
76     CPURISCVState *env = &cpu->env;
77 
78     if (enabled) {
79         env->misa_ext |= bit;
80         env->misa_ext_mask |= bit;
81     } else {
82         env->misa_ext &= ~bit;
83         env->misa_ext_mask &= ~bit;
84     }
85 }
86 
cpu_priv_ver_to_str(int priv_ver)87 static const char *cpu_priv_ver_to_str(int priv_ver)
88 {
89     const char *priv_spec_str = priv_spec_to_str(priv_ver);
90 
91     g_assert(priv_spec_str);
92 
93     return priv_spec_str;
94 }
95 
riscv_cpu_mmu_index(CPUState * cs,bool ifetch)96 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
97 {
98     return riscv_env_mmu_index(cpu_env(cs), ifetch);
99 }
100 
riscv_get_tb_cpu_state(CPUState * cs)101 static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs)
102 {
103     CPURISCVState *env = cpu_env(cs);
104     RISCVCPU *cpu = env_archcpu(env);
105     RISCVExtStatus fs, vs;
106     uint32_t flags = 0;
107     bool pm_signext = riscv_cpu_virt_mem_enabled(env);
108 
109     if (cpu->cfg.ext_zve32x) {
110         /*
111          * If env->vl equals to VLMAX, we can use generic vector operation
112          * expanders (GVEC) to accerlate the vector operations.
113          * However, as LMUL could be a fractional number. The maximum
114          * vector size can be operated might be less than 8 bytes,
115          * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
116          * only when maxsz >= 8 bytes.
117          */
118 
119         /* lmul encoded as in DisasContext::lmul */
120         int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
121         uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
122         uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
123         uint32_t maxsz = vlmax << vsew;
124         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
125                            (maxsz >= 8);
126         flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
127         flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
128         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
129                            FIELD_EX64(env->vtype, VTYPE, VLMUL));
130         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
131         flags = FIELD_DP32(flags, TB_FLAGS, VTA,
132                            FIELD_EX64(env->vtype, VTYPE, VTA));
133         flags = FIELD_DP32(flags, TB_FLAGS, VMA,
134                            FIELD_EX64(env->vtype, VTYPE, VMA));
135         flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
136     } else {
137         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
138     }
139 
140     if (cpu_get_fcfien(env)) {
141         /*
142          * For Forward CFI, only the expectation of a lpad at
143          * the start of the block is tracked via env->elp. env->elp
144          * is turned on during jalr translation.
145          */
146         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
147         flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
148     }
149 
150     if (cpu_get_bcfien(env)) {
151         flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
152     }
153 
154 #ifdef CONFIG_USER_ONLY
155     fs = EXT_STATUS_DIRTY;
156     vs = EXT_STATUS_DIRTY;
157 #else
158     flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
159 
160     flags |= riscv_env_mmu_index(env, 0);
161     fs = get_field(env->mstatus, MSTATUS_FS);
162     vs = get_field(env->mstatus, MSTATUS_VS);
163 
164     if (env->virt_enabled) {
165         flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
166         /*
167          * Merge DISABLED and !DIRTY states using MIN.
168          * We will set both fields when dirtying.
169          */
170         fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
171         vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
172     }
173 
174     /* With Zfinx, floating point is enabled/disabled by Smstateen. */
175     if (!riscv_has_ext(env, RVF)) {
176         fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
177              ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
178     }
179 
180     if (cpu->cfg.debug && !icount_enabled()) {
181         flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
182     }
183 #endif
184 
185     flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
186     flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
187     flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
188     flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
189     flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
190     flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
191 
192     return (TCGTBCPUState){
193         .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc,
194         .flags = flags
195     };
196 }
197 
riscv_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)198 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
199                                           const TranslationBlock *tb)
200 {
201     if (!(tb_cflags(tb) & CF_PCREL)) {
202         RISCVCPU *cpu = RISCV_CPU(cs);
203         CPURISCVState *env = &cpu->env;
204         RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
205 
206         tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
207 
208         if (xl == MXL_RV32) {
209             env->pc = (int32_t) tb->pc;
210         } else {
211             env->pc = tb->pc;
212         }
213     }
214 }
215 
riscv_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)216 static void riscv_restore_state_to_opc(CPUState *cs,
217                                        const TranslationBlock *tb,
218                                        const uint64_t *data)
219 {
220     RISCVCPU *cpu = RISCV_CPU(cs);
221     CPURISCVState *env = &cpu->env;
222     RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
223     target_ulong pc;
224 
225     if (tb_cflags(tb) & CF_PCREL) {
226         pc = (env->pc & TARGET_PAGE_MASK) | data[0];
227     } else {
228         pc = data[0];
229     }
230 
231     if (xl == MXL_RV32) {
232         env->pc = (int32_t)pc;
233     } else {
234         env->pc = pc;
235     }
236     env->bins = data[1];
237     env->excp_uw2 = data[2];
238 }
239 
240 #ifndef CONFIG_USER_ONLY
riscv_pointer_wrap(CPUState * cs,int mmu_idx,vaddr result,vaddr base)241 static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx,
242                                 vaddr result, vaddr base)
243 {
244     CPURISCVState *env = cpu_env(cs);
245     uint32_t pm_len;
246     bool pm_signext;
247 
248     if (cpu_address_xl(env) == MXL_RV32) {
249         return (uint32_t)result;
250     }
251 
252     pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env));
253     if (pm_len == 0) {
254         return result;
255     }
256 
257     pm_signext = riscv_cpu_virt_mem_enabled(env);
258     if (pm_signext) {
259         return sextract64(result, 0, 64 - pm_len);
260     }
261     return extract64(result, 0, 64 - pm_len);
262 }
263 #endif
264 
265 const TCGCPUOps riscv_tcg_ops = {
266     .mttcg_supported = true,
267     .guest_default_memory_order = 0,
268 
269     .initialize = riscv_translate_init,
270     .translate_code = riscv_translate_code,
271     .get_tb_cpu_state = riscv_get_tb_cpu_state,
272     .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
273     .restore_state_to_opc = riscv_restore_state_to_opc,
274     .mmu_index = riscv_cpu_mmu_index,
275 
276 #ifndef CONFIG_USER_ONLY
277     .tlb_fill = riscv_cpu_tlb_fill,
278     .pointer_wrap = riscv_pointer_wrap,
279     .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
280     .cpu_exec_halt = riscv_cpu_has_work,
281     .cpu_exec_reset = cpu_reset,
282     .do_interrupt = riscv_cpu_do_interrupt,
283     .do_transaction_failed = riscv_cpu_do_transaction_failed,
284     .do_unaligned_access = riscv_cpu_do_unaligned_access,
285     .debug_excp_handler = riscv_cpu_debug_excp_handler,
286     .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
287     .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
288 #endif /* !CONFIG_USER_ONLY */
289 };
290 
cpu_cfg_ext_get_min_version(uint32_t ext_offset)291 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset)
292 {
293     const RISCVIsaExtData *edata;
294 
295     for (edata = isa_edata_arr; edata && edata->name; edata++) {
296         if (edata->ext_enable_offset != ext_offset) {
297             continue;
298         }
299 
300         return edata->min_version;
301     }
302 
303     g_assert_not_reached();
304 }
305 
cpu_cfg_ext_get_name(uint32_t ext_offset)306 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset)
307 {
308     const RISCVCPUMultiExtConfig *feat;
309     const RISCVIsaExtData *edata;
310 
311     for (edata = isa_edata_arr; edata->name != NULL; edata++) {
312         if (edata->ext_enable_offset == ext_offset) {
313             return edata->name;
314         }
315     }
316 
317     for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
318         if (feat->offset == ext_offset) {
319             return feat->name;
320         }
321     }
322 
323     g_assert_not_reached();
324 }
325 
cpu_cfg_offset_is_named_feat(uint32_t ext_offset)326 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
327 {
328     const RISCVCPUMultiExtConfig *feat;
329 
330     for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
331         if (feat->offset == ext_offset) {
332             return true;
333         }
334     }
335 
336     return false;
337 }
338 
riscv_cpu_enable_named_feat(RISCVCPU * cpu,uint32_t feat_offset)339 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
340 {
341      /*
342       * All other named features are already enabled
343       * in riscv_tcg_cpu_instance_init().
344       */
345     switch (feat_offset) {
346     case CPU_CFG_OFFSET(ext_zic64b):
347         cpu->cfg.cbom_blocksize = 64;
348         cpu->cfg.cbop_blocksize = 64;
349         cpu->cfg.cboz_blocksize = 64;
350         break;
351     case CPU_CFG_OFFSET(ext_sha):
352         if (!cpu_misa_ext_is_user_set(RVH)) {
353             riscv_cpu_write_misa_bit(cpu, RVH, true);
354         }
355         /* fallthrough */
356     case CPU_CFG_OFFSET(ext_ssstateen):
357         cpu->cfg.ext_smstateen = true;
358         break;
359     }
360 }
361 
cpu_bump_multi_ext_priv_ver(CPURISCVState * env,uint32_t ext_offset)362 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
363                                         uint32_t ext_offset)
364 {
365     int ext_priv_ver;
366 
367     if (env->priv_ver == PRIV_VERSION_LATEST) {
368         return;
369     }
370 
371     ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
372 
373     if (env->priv_ver < ext_priv_ver) {
374         /*
375          * Note: the 'priv_spec' command line option, if present,
376          * will take precedence over this priv_ver bump.
377          */
378         env->priv_ver = ext_priv_ver;
379     }
380 }
381 
cpu_cfg_ext_auto_update(RISCVCPU * cpu,uint32_t ext_offset,bool value)382 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset,
383                                     bool value)
384 {
385     CPURISCVState *env = &cpu->env;
386     bool prev_val = isa_ext_is_enabled(cpu, ext_offset);
387     int min_version;
388 
389     if (prev_val == value) {
390         return;
391     }
392 
393     if (cpu_cfg_ext_is_user_set(ext_offset)) {
394         return;
395     }
396 
397     if (value && env->priv_ver != PRIV_VERSION_LATEST) {
398         /* Do not enable it if priv_ver is older than min_version */
399         min_version = cpu_cfg_ext_get_min_version(ext_offset);
400         if (env->priv_ver < min_version) {
401             return;
402         }
403     }
404 
405     isa_ext_update_enabled(cpu, ext_offset, value);
406 }
407 
riscv_cpu_validate_misa_priv(CPURISCVState * env,Error ** errp)408 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
409 {
410     if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
411         error_setg(errp, "H extension requires priv spec 1.12.0");
412         return;
413     }
414 }
415 
riscv_cpu_validate_v(CPURISCVState * env,RISCVCPUConfig * cfg,Error ** errp)416 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
417                                  Error **errp)
418 {
419     uint32_t vlen = cfg->vlenb << 3;
420 
421     if (vlen > RV_VLEN_MAX || vlen < 128) {
422         error_setg(errp,
423                    "Vector extension implementation only supports VLEN "
424                    "in the range [128, %d]", RV_VLEN_MAX);
425         return;
426     }
427 
428     if (cfg->elen > 64 || cfg->elen < 8) {
429         error_setg(errp,
430                    "Vector extension implementation only supports ELEN "
431                    "in the range [8, 64]");
432         return;
433     }
434 }
435 
riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU * cpu)436 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
437 {
438     CPURISCVState *env = &cpu->env;
439     const RISCVIsaExtData *edata;
440 
441     /* Force disable extensions if priv spec version does not match */
442     for (edata = isa_edata_arr; edata && edata->name; edata++) {
443         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) &&
444             (env->priv_ver < edata->min_version)) {
445             /*
446              * These two extensions are always enabled as they were supported
447              * by QEMU before they were added as extensions in the ISA.
448              */
449             if (!strcmp(edata->name, "zicntr") ||
450                 !strcmp(edata->name, "zihpm")) {
451                 continue;
452             }
453 
454             /*
455              * cpu.debug = true is marked as 'sdtrig', priv spec 1.12.
456              * Skip this warning since existing CPUs with older priv
457              * spec and debug = true will be impacted.
458              */
459             if (!strcmp(edata->name, "sdtrig")) {
460                 continue;
461             }
462 
463             isa_ext_update_enabled(cpu, edata->ext_enable_offset, false);
464 
465             /*
466              * Do not show user warnings for named features that users
467              * can't enable/disable in the command line. See commit
468              * 68c9e54bea for more info.
469              */
470             if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) {
471                 continue;
472             }
473 #ifndef CONFIG_USER_ONLY
474             warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
475                         " because privilege spec version does not match",
476                         edata->name, env->mhartid);
477 #else
478             warn_report("disabling %s extension because "
479                         "privilege spec version does not match",
480                         edata->name);
481 #endif
482         }
483     }
484 }
485 
riscv_cpu_update_named_features(RISCVCPU * cpu)486 static void riscv_cpu_update_named_features(RISCVCPU *cpu)
487 {
488     if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) {
489         cpu->cfg.has_priv_1_11 = true;
490     }
491 
492     if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) {
493         cpu->cfg.has_priv_1_12 = true;
494     }
495 
496     if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) {
497         cpu->cfg.has_priv_1_13 = true;
498     }
499 
500     cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
501                           cpu->cfg.cbop_blocksize == 64 &&
502                           cpu->cfg.cboz_blocksize == 64;
503 
504     cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen;
505 
506     cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
507                        cpu->cfg.ext_ssstateen;
508 
509     cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11;
510 }
511 
riscv_cpu_validate_g(RISCVCPU * cpu)512 static void riscv_cpu_validate_g(RISCVCPU *cpu)
513 {
514     const char *warn_msg = "RVG mandates disabled extension %s";
515     uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD};
516     bool send_warn = cpu_misa_ext_is_user_set(RVG);
517 
518     for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) {
519         uint32_t bit = g_misa_bits[i];
520 
521         if (riscv_has_ext(&cpu->env, bit)) {
522             continue;
523         }
524 
525         if (!cpu_misa_ext_is_user_set(bit)) {
526             riscv_cpu_write_misa_bit(cpu, bit, true);
527             continue;
528         }
529 
530         if (send_warn) {
531             warn_report(warn_msg, riscv_get_misa_ext_name(bit));
532         }
533     }
534 
535     if (!cpu->cfg.ext_zicsr) {
536         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) {
537             cpu->cfg.ext_zicsr = true;
538         } else if (send_warn) {
539             warn_report(warn_msg, "zicsr");
540         }
541     }
542 
543     if (!cpu->cfg.ext_zifencei) {
544         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) {
545             cpu->cfg.ext_zifencei = true;
546         } else if (send_warn) {
547             warn_report(warn_msg, "zifencei");
548         }
549     }
550 }
551 
riscv_cpu_validate_b(RISCVCPU * cpu)552 static void riscv_cpu_validate_b(RISCVCPU *cpu)
553 {
554     const char *warn_msg = "RVB mandates disabled extension %s";
555 
556     if (!cpu->cfg.ext_zba) {
557         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) {
558             cpu->cfg.ext_zba = true;
559         } else {
560             warn_report(warn_msg, "zba");
561         }
562     }
563 
564     if (!cpu->cfg.ext_zbb) {
565         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) {
566             cpu->cfg.ext_zbb = true;
567         } else {
568             warn_report(warn_msg, "zbb");
569         }
570     }
571 
572     if (!cpu->cfg.ext_zbs) {
573         if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) {
574             cpu->cfg.ext_zbs = true;
575         } else {
576             warn_report(warn_msg, "zbs");
577         }
578     }
579 }
580 
581 /*
582  * Check consistency between chosen extensions while setting
583  * cpu->cfg accordingly.
584  */
riscv_cpu_validate_set_extensions(RISCVCPU * cpu,Error ** errp)585 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
586 {
587     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
588     CPURISCVState *env = &cpu->env;
589     Error *local_err = NULL;
590 
591     if (riscv_has_ext(env, RVG)) {
592         riscv_cpu_validate_g(cpu);
593     }
594 
595     if (riscv_has_ext(env, RVB)) {
596         riscv_cpu_validate_b(cpu);
597     }
598 
599     if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
600         error_setg(errp,
601                    "I and E extensions are incompatible");
602         return;
603     }
604 
605     if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
606         error_setg(errp,
607                    "Either I or E extension must be set");
608         return;
609     }
610 
611     if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
612         error_setg(errp,
613                    "Setting S extension without U extension is illegal");
614         return;
615     }
616 
617     if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
618         error_setg(errp,
619                    "H depends on an I base integer ISA with 32 x registers");
620         return;
621     }
622 
623     if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
624         error_setg(errp, "H extension implicitly requires S-mode");
625         return;
626     }
627 
628     if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) {
629         error_setg(errp, "F extension requires Zicsr");
630         return;
631     }
632 
633     if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) {
634         error_setg(errp, "Zacas extension requires A extension");
635         return;
636     }
637 
638     if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
639         error_setg(errp, "Zawrs extension requires A extension");
640         return;
641     }
642 
643     if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
644         error_setg(errp, "Zfa extension requires F extension");
645         return;
646     }
647 
648     if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
649         error_setg(errp, "Zfh/Zfhmin extensions require F extension");
650         return;
651     }
652 
653     if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
654         error_setg(errp, "Zfbfmin extension depends on F extension");
655         return;
656     }
657 
658     if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
659         error_setg(errp, "D extension requires F extension");
660         return;
661     }
662 
663     if (riscv_has_ext(env, RVV)) {
664         riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
665         if (local_err != NULL) {
666             error_propagate(errp, local_err);
667             return;
668         }
669     }
670 
671     /* The Zve64d extension depends on the Zve64f extension */
672     if (cpu->cfg.ext_zve64d) {
673         if (!riscv_has_ext(env, RVD)) {
674             error_setg(errp, "Zve64d/V extensions require D extension");
675             return;
676         }
677     }
678 
679     /* The Zve32f extension depends on the Zve32x extension */
680     if (cpu->cfg.ext_zve32f) {
681         if (!riscv_has_ext(env, RVF)) {
682             error_setg(errp, "Zve32f/Zve64f extensions require F extension");
683             return;
684         }
685     }
686 
687     if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
688         error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
689         return;
690     }
691 
692     if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
693         error_setg(errp, "Zvfh extensions requires Zfhmin extension");
694         return;
695     }
696 
697     if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
698         error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
699         return;
700     }
701 
702     if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
703         error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
704         return;
705     }
706 
707     if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
708         error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
709         return;
710     }
711 
712     if (cpu->cfg.ext_zfinx) {
713         if (!cpu->cfg.ext_zicsr) {
714             error_setg(errp, "Zfinx extension requires Zicsr");
715             return;
716         }
717         if (riscv_has_ext(env, RVF)) {
718             error_setg(errp,
719                        "Zfinx cannot be supported together with F extension");
720             return;
721         }
722     }
723 
724     if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) {
725         error_setg(errp, "Zcmop extensions require Zca");
726         return;
727     }
728 
729     if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
730         error_setg(errp, "Zcf extension is only relevant to RV32");
731         return;
732     }
733 
734     if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
735         error_setg(errp, "Zcf extension requires F extension");
736         return;
737     }
738 
739     if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
740         error_setg(errp, "Zcd extension requires D extension");
741         return;
742     }
743 
744     if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
745          cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
746         error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
747                          "extension");
748         return;
749     }
750 
751     if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
752         error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
753                          "Zcd extension");
754         return;
755     }
756 
757     if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) {
758         error_setg(errp, "Zcmt extension requires Zicsr extension");
759         return;
760     }
761 
762     if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
763          cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
764          cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) {
765         error_setg(errp,
766                    "Vector crypto extensions require V or Zve* extensions");
767         return;
768     }
769 
770     if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) {
771         error_setg(
772             errp,
773             "Zvbc and Zvknhb extensions require V or Zve64x extensions");
774         return;
775     }
776 
777     if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) {
778         if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) {
779             error_setg(errp, "zicntr requires zicsr");
780             return;
781         }
782         cpu->cfg.ext_zicntr = false;
783     }
784 
785     if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) {
786         if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) {
787             error_setg(errp, "zihpm requires zicsr");
788             return;
789         }
790         cpu->cfg.ext_zihpm = false;
791     }
792 
793     if (cpu->cfg.ext_zicfiss) {
794         if (!cpu->cfg.ext_zicsr) {
795             error_setg(errp, "zicfiss extension requires zicsr extension");
796             return;
797         }
798         if (!riscv_has_ext(env, RVA)) {
799             error_setg(errp, "zicfiss extension requires A extension");
800             return;
801         }
802         if (!riscv_has_ext(env, RVS)) {
803             error_setg(errp, "zicfiss extension requires S");
804             return;
805         }
806         if (!cpu->cfg.ext_zimop) {
807             error_setg(errp, "zicfiss extension requires zimop extension");
808             return;
809         }
810         if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) {
811             error_setg(errp, "zicfiss with zca requires zcmop extension");
812             return;
813         }
814     }
815 
816     if (!cpu->cfg.ext_zihpm) {
817         cpu->cfg.pmu_mask = 0;
818         cpu->pmu_avail_ctrs = 0;
819     }
820 
821     if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) {
822         error_setg(errp, "zicfilp extension requires zicsr extension");
823         return;
824     }
825 
826     if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) {
827         error_setg(errp, "svukte is not supported for RV32");
828         return;
829     }
830 
831     if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) &&
832         (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) {
833         if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) ||
834             cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) {
835             error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind");
836             return;
837         }
838         cpu->cfg.ext_smctr = false;
839         cpu->cfg.ext_ssctr = false;
840     }
841 
842     if (cpu->cfg.ext_svrsw60t59b &&
843         (!cpu->cfg.mmu || mcc->def->misa_mxl_max == MXL_RV32)) {
844         error_setg(errp, "svrsw60t59b is not supported on RV32 and MMU-less platforms");
845         return;
846     }
847 
848     /*
849      * Disable isa extensions based on priv spec after we
850      * validated and set everything we need.
851      */
852     riscv_cpu_disable_priv_spec_isa_exts(cpu);
853 }
854 
855 #ifndef CONFIG_USER_ONLY
riscv_cpu_validate_profile_satp(RISCVCPU * cpu,RISCVCPUProfile * profile,bool send_warn)856 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
857                                             RISCVCPUProfile *profile,
858                                             bool send_warn)
859 {
860     int satp_max = cpu->cfg.max_satp_mode;
861 
862     assert(satp_max >= 0);
863     if (profile->satp_mode > satp_max) {
864         if (send_warn) {
865             bool is_32bit = riscv_cpu_is_32bit(cpu);
866             const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit);
867             const char *cur_satp = satp_mode_str(satp_max, is_32bit);
868 
869             warn_report("Profile %s requires satp mode %s, "
870                         "but satp mode %s was set", profile->name,
871                         req_satp, cur_satp);
872         }
873 
874         return false;
875     }
876 
877     return true;
878 }
879 #endif
880 
riscv_cpu_check_parent_profile(RISCVCPU * cpu,RISCVCPUProfile * profile,RISCVCPUProfile * parent)881 static void riscv_cpu_check_parent_profile(RISCVCPU *cpu,
882                                            RISCVCPUProfile *profile,
883                                            RISCVCPUProfile *parent)
884 {
885     if (!profile->present || !parent) {
886         return;
887     }
888 
889     profile->present = parent->present;
890 }
891 
riscv_cpu_validate_profile(RISCVCPU * cpu,RISCVCPUProfile * profile)892 static void riscv_cpu_validate_profile(RISCVCPU *cpu,
893                                        RISCVCPUProfile *profile)
894 {
895     CPURISCVState *env = &cpu->env;
896     const char *warn_msg = "Profile %s mandates disabled extension %s";
897     bool send_warn = profile->user_set && profile->enabled;
898     bool profile_impl = true;
899     int i;
900 
901 #ifndef CONFIG_USER_ONLY
902     if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
903         profile_impl = riscv_cpu_validate_profile_satp(cpu, profile,
904                                                        send_warn);
905     }
906 #endif
907 
908     if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
909         profile->priv_spec > env->priv_ver) {
910         profile_impl = false;
911 
912         if (send_warn) {
913             warn_report("Profile %s requires priv spec %s, "
914                         "but priv ver %s was set", profile->name,
915                         cpu_priv_ver_to_str(profile->priv_spec),
916                         cpu_priv_ver_to_str(env->priv_ver));
917         }
918     }
919 
920     for (i = 0; misa_bits[i] != 0; i++) {
921         uint32_t bit = misa_bits[i];
922 
923         if (!(profile->misa_ext & bit)) {
924             continue;
925         }
926 
927         if (!riscv_has_ext(&cpu->env, bit)) {
928             profile_impl = false;
929 
930             if (send_warn) {
931                 warn_report(warn_msg, profile->name,
932                             riscv_get_misa_ext_name(bit));
933             }
934         }
935     }
936 
937     for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
938         int ext_offset = profile->ext_offsets[i];
939 
940         if (!isa_ext_is_enabled(cpu, ext_offset)) {
941             profile_impl = false;
942 
943             if (send_warn) {
944                 warn_report(warn_msg, profile->name,
945                             cpu_cfg_ext_get_name(ext_offset));
946             }
947         }
948     }
949 
950     profile->present = profile_impl;
951 
952     riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent);
953     riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent);
954 }
955 
riscv_cpu_validate_profiles(RISCVCPU * cpu)956 static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
957 {
958     for (int i = 0; riscv_profiles[i] != NULL; i++) {
959         riscv_cpu_validate_profile(cpu, riscv_profiles[i]);
960     }
961 }
962 
riscv_cpu_init_implied_exts_rules(void)963 static void riscv_cpu_init_implied_exts_rules(void)
964 {
965     RISCVCPUImpliedExtsRule *rule;
966 #ifndef CONFIG_USER_ONLY
967     MachineState *ms = MACHINE(qdev_get_machine());
968 #endif
969     static bool initialized;
970     int i;
971 
972     /* Implied rules only need to be initialized once. */
973     if (initialized) {
974         return;
975     }
976 
977     for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
978 #ifndef CONFIG_USER_ONLY
979         rule->enabled = bitmap_new(ms->smp.cpus);
980 #endif
981         g_hash_table_insert(misa_ext_implied_rules,
982                             GUINT_TO_POINTER(rule->ext), (gpointer)rule);
983     }
984 
985     for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
986 #ifndef CONFIG_USER_ONLY
987         rule->enabled = bitmap_new(ms->smp.cpus);
988 #endif
989         g_hash_table_insert(multi_ext_implied_rules,
990                             GUINT_TO_POINTER(rule->ext), (gpointer)rule);
991     }
992 
993     initialized = true;
994 }
995 
cpu_enable_implied_rule(RISCVCPU * cpu,RISCVCPUImpliedExtsRule * rule)996 static void cpu_enable_implied_rule(RISCVCPU *cpu,
997                                     RISCVCPUImpliedExtsRule *rule)
998 {
999     CPURISCVState *env = &cpu->env;
1000     RISCVCPUImpliedExtsRule *ir;
1001     bool enabled = false;
1002     int i;
1003 
1004 #ifndef CONFIG_USER_ONLY
1005     enabled = test_bit(cpu->env.mhartid, rule->enabled);
1006 #endif
1007 
1008     if (!enabled) {
1009         /* Enable the implied MISAs. */
1010         if (rule->implied_misa_exts) {
1011             for (i = 0; misa_bits[i] != 0; i++) {
1012                 if (rule->implied_misa_exts & misa_bits[i]) {
1013                     /*
1014                      * If the user disabled the misa_bit do not re-enable it
1015                      * and do not apply any implied rules related to it.
1016                      */
1017                     if (cpu_misa_ext_is_user_set(misa_bits[i]) &&
1018                         !(env->misa_ext & misa_bits[i])) {
1019                         continue;
1020                     }
1021 
1022                     riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]);
1023                     ir = g_hash_table_lookup(misa_ext_implied_rules,
1024                                              GUINT_TO_POINTER(misa_bits[i]));
1025 
1026                     if (ir) {
1027                         cpu_enable_implied_rule(cpu, ir);
1028                     }
1029                 }
1030             }
1031         }
1032 
1033         /* Enable the implied extensions. */
1034         for (i = 0;
1035              rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) {
1036             cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true);
1037 
1038             ir = g_hash_table_lookup(multi_ext_implied_rules,
1039                                      GUINT_TO_POINTER(
1040                                          rule->implied_multi_exts[i]));
1041 
1042             if (ir) {
1043                 cpu_enable_implied_rule(cpu, ir);
1044             }
1045         }
1046 
1047 #ifndef CONFIG_USER_ONLY
1048         bitmap_set(rule->enabled, cpu->env.mhartid, 1);
1049 #endif
1050     }
1051 }
1052 
1053 /* Zc extension has special implied rules that need to be handled separately. */
cpu_enable_zc_implied_rules(RISCVCPU * cpu)1054 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
1055 {
1056     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1057     CPURISCVState *env = &cpu->env;
1058 
1059     if (cpu->cfg.ext_zce) {
1060         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
1061         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
1062         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
1063         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
1064 
1065         if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
1066             cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
1067         }
1068     }
1069 
1070     /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */
1071     if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
1072         cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
1073 
1074         if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
1075             cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
1076         }
1077 
1078         if (riscv_has_ext(env, RVD)) {
1079             cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true);
1080         }
1081     }
1082 }
1083 
riscv_cpu_enable_implied_rules(RISCVCPU * cpu)1084 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu)
1085 {
1086     RISCVCPUImpliedExtsRule *rule;
1087     int i;
1088 
1089     /* Enable the implied extensions for Zc. */
1090     cpu_enable_zc_implied_rules(cpu);
1091 
1092     /* Enable the implied MISAs. */
1093     for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
1094         if (riscv_has_ext(&cpu->env, rule->ext)) {
1095             cpu_enable_implied_rule(cpu, rule);
1096         }
1097     }
1098 
1099     /* Enable the implied extensions. */
1100     for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
1101         if (isa_ext_is_enabled(cpu, rule->ext)) {
1102             cpu_enable_implied_rule(cpu, rule);
1103         }
1104     }
1105 }
1106 
riscv_tcg_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1107 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1108 {
1109     CPURISCVState *env = &cpu->env;
1110     Error *local_err = NULL;
1111 
1112     riscv_cpu_init_implied_exts_rules();
1113     riscv_cpu_enable_implied_rules(cpu);
1114 
1115     riscv_cpu_validate_misa_priv(env, &local_err);
1116     if (local_err != NULL) {
1117         error_propagate(errp, local_err);
1118         return;
1119     }
1120 
1121     riscv_cpu_update_named_features(cpu);
1122     riscv_cpu_validate_profiles(cpu);
1123 
1124     if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
1125         /*
1126          * Enhanced PMP should only be available
1127          * on harts with PMP support
1128          */
1129         error_setg(errp, "Invalid configuration: Smepmp requires PMP support");
1130         return;
1131     }
1132 
1133     riscv_cpu_validate_set_extensions(cpu, &local_err);
1134     if (local_err != NULL) {
1135         error_propagate(errp, local_err);
1136         return;
1137     }
1138 #ifndef CONFIG_USER_ONLY
1139     if (cpu->cfg.pmu_mask) {
1140         riscv_pmu_init(cpu, &local_err);
1141         if (local_err != NULL) {
1142             error_propagate(errp, local_err);
1143             return;
1144         }
1145 
1146         if (cpu->cfg.ext_sscofpmf) {
1147             cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1148                                           riscv_pmu_timer_cb, cpu);
1149         }
1150     }
1151 #endif
1152 }
1153 
riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU * cpu)1154 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
1155 {
1156     GPtrArray *dynamic_decoders;
1157     dynamic_decoders = g_ptr_array_sized_new(decoder_table_size);
1158     for (size_t i = 0; i < decoder_table_size; ++i) {
1159         if (decoder_table[i].guard_func &&
1160             decoder_table[i].guard_func(&cpu->cfg)) {
1161             g_ptr_array_add(dynamic_decoders,
1162                             (gpointer)decoder_table[i].riscv_cpu_decode_fn);
1163         }
1164     }
1165 
1166     cpu->decoders = dynamic_decoders;
1167 }
1168 
riscv_cpu_tcg_compatible(RISCVCPU * cpu)1169 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu)
1170 {
1171     return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL;
1172 }
1173 
riscv_cpu_is_generic(Object * cpu_obj)1174 static bool riscv_cpu_is_generic(Object *cpu_obj)
1175 {
1176     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1177 }
1178 
riscv_cpu_set_profile(RISCVCPU * cpu,RISCVCPUProfile * profile,bool enabled)1179 static void riscv_cpu_set_profile(RISCVCPU *cpu,
1180                                   RISCVCPUProfile *profile,
1181                                   bool enabled)
1182 {
1183     int i, ext_offset;
1184 
1185     if (profile->u_parent != NULL) {
1186         riscv_cpu_set_profile(cpu, profile->u_parent, enabled);
1187     }
1188 
1189     if (profile->s_parent != NULL) {
1190         riscv_cpu_set_profile(cpu, profile->s_parent, enabled);
1191     }
1192 
1193     profile->enabled = enabled;
1194 
1195     if (profile->enabled) {
1196         cpu->env.priv_ver = profile->priv_spec;
1197 
1198 #ifndef CONFIG_USER_ONLY
1199         if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
1200             object_property_set_bool(OBJECT(cpu), "mmu", true, NULL);
1201             const char *satp_prop = satp_mode_str(profile->satp_mode,
1202                                                   riscv_cpu_is_32bit(cpu));
1203             object_property_set_bool(OBJECT(cpu), satp_prop, true, NULL);
1204         }
1205 #endif
1206     }
1207 
1208     for (i = 0; misa_bits[i] != 0; i++) {
1209         uint32_t bit = misa_bits[i];
1210 
1211         if  (!(profile->misa_ext & bit)) {
1212             continue;
1213         }
1214 
1215         if (bit == RVI && !profile->enabled) {
1216             /*
1217              * Disabling profiles will not disable the base
1218              * ISA RV64I.
1219              */
1220             continue;
1221         }
1222 
1223         cpu_misa_ext_add_user_opt(bit, profile->enabled);
1224         riscv_cpu_write_misa_bit(cpu, bit, profile->enabled);
1225     }
1226 
1227     for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
1228         ext_offset = profile->ext_offsets[i];
1229 
1230         if (profile->enabled) {
1231             if (cpu_cfg_offset_is_named_feat(ext_offset)) {
1232                 riscv_cpu_enable_named_feat(cpu, ext_offset);
1233             }
1234 
1235             cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset);
1236         }
1237 
1238         cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled);
1239         isa_ext_update_enabled(cpu, ext_offset, profile->enabled);
1240     }
1241 }
1242 
1243 /*
1244  * We'll get here via the following path:
1245  *
1246  * riscv_cpu_realize()
1247  *   -> cpu_exec_realizefn()
1248  *      -> tcg_cpu_realize() (via accel_cpu_common_realize())
1249  */
riscv_tcg_cpu_realize(CPUState * cs,Error ** errp)1250 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
1251 {
1252     RISCVCPU *cpu = RISCV_CPU(cs);
1253 
1254     if (!riscv_cpu_tcg_compatible(cpu)) {
1255         g_autofree char *name = riscv_cpu_get_name(cpu);
1256         error_setg(errp, "'%s' CPU is not compatible with TCG acceleration",
1257                    name);
1258         return false;
1259     }
1260 
1261 #ifndef CONFIG_USER_ONLY
1262     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1263 
1264     if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) {
1265         /* Missing 128-bit aligned atomics */
1266         error_setg(errp,
1267                    "128-bit RISC-V currently does not work with Multi "
1268                    "Threaded TCG. Please use: -accel tcg,thread=single");
1269         return false;
1270     }
1271 
1272     CPURISCVState *env = &cpu->env;
1273 
1274     tcg_cflags_set(CPU(cs), CF_PCREL);
1275 
1276     if (cpu->cfg.ext_sstc) {
1277         riscv_timer_init(cpu);
1278     }
1279 
1280     /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
1281     if (riscv_has_ext(env, RVH)) {
1282         env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
1283     }
1284 #endif
1285 
1286     return true;
1287 }
1288 
1289 typedef struct RISCVCPUMisaExtConfig {
1290     target_ulong misa_bit;
1291     bool enabled;
1292 } RISCVCPUMisaExtConfig;
1293 
cpu_set_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1294 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1295                                  void *opaque, Error **errp)
1296 {
1297     const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1298     target_ulong misa_bit = misa_ext_cfg->misa_bit;
1299     RISCVCPU *cpu = RISCV_CPU(obj);
1300     CPURISCVState *env = &cpu->env;
1301     bool vendor_cpu = riscv_cpu_is_vendor(obj);
1302     bool prev_val, value;
1303 
1304     if (!visit_type_bool(v, name, &value, errp)) {
1305         return;
1306     }
1307 
1308     cpu_misa_ext_add_user_opt(misa_bit, value);
1309 
1310     prev_val = env->misa_ext & misa_bit;
1311 
1312     if (value == prev_val) {
1313         return;
1314     }
1315 
1316     if (value) {
1317         if (vendor_cpu) {
1318             g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1319             error_setg(errp, "'%s' CPU does not allow enabling extensions",
1320                        cpuname);
1321             return;
1322         }
1323 
1324         if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) {
1325             /*
1326              * Note: the 'priv_spec' command line option, if present,
1327              * will take precedence over this priv_ver bump.
1328              */
1329             env->priv_ver = PRIV_VERSION_1_12_0;
1330         }
1331     }
1332 
1333     riscv_cpu_write_misa_bit(cpu, misa_bit, value);
1334 }
1335 
cpu_get_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1336 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1337                                  void *opaque, Error **errp)
1338 {
1339     const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1340     target_ulong misa_bit = misa_ext_cfg->misa_bit;
1341     RISCVCPU *cpu = RISCV_CPU(obj);
1342     CPURISCVState *env = &cpu->env;
1343     bool value;
1344 
1345     value = env->misa_ext & misa_bit;
1346 
1347     visit_type_bool(v, name, &value, errp);
1348 }
1349 
1350 #define MISA_CFG(_bit, _enabled) \
1351     {.misa_bit = _bit, .enabled = _enabled}
1352 
1353 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
1354     MISA_CFG(RVA, true),
1355     MISA_CFG(RVC, true),
1356     MISA_CFG(RVD, true),
1357     MISA_CFG(RVF, true),
1358     MISA_CFG(RVI, true),
1359     MISA_CFG(RVE, false),
1360     MISA_CFG(RVM, true),
1361     MISA_CFG(RVS, true),
1362     MISA_CFG(RVU, true),
1363     MISA_CFG(RVH, true),
1364     MISA_CFG(RVV, false),
1365     MISA_CFG(RVG, false),
1366     MISA_CFG(RVB, false),
1367 };
1368 
1369 /*
1370  * We do not support user choice tracking for MISA
1371  * extensions yet because, so far, we do not silently
1372  * change MISA bits during realize() (RVG enables MISA
1373  * bits but the user is warned about it).
1374  */
riscv_cpu_add_misa_properties(Object * cpu_obj)1375 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1376 {
1377     bool use_def_vals = riscv_cpu_is_generic(cpu_obj);
1378     int i;
1379 
1380     for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1381         const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1382         int bit = misa_cfg->misa_bit;
1383         const char *name = riscv_get_misa_ext_name(bit);
1384         const char *desc = riscv_get_misa_ext_description(bit);
1385 
1386         /* Check if KVM already created the property */
1387         if (object_property_find(cpu_obj, name)) {
1388             continue;
1389         }
1390 
1391         object_property_add(cpu_obj, name, "bool",
1392                             cpu_get_misa_ext_cfg,
1393                             cpu_set_misa_ext_cfg,
1394                             NULL, (void *)misa_cfg);
1395         object_property_set_description(cpu_obj, name, desc);
1396         if (use_def_vals) {
1397             riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit,
1398                                      misa_cfg->enabled);
1399         }
1400     }
1401 }
1402 
cpu_set_profile(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1403 static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
1404                             void *opaque, Error **errp)
1405 {
1406     RISCVCPUProfile *profile = opaque;
1407     RISCVCPU *cpu = RISCV_CPU(obj);
1408     bool value;
1409 
1410     if (riscv_cpu_is_vendor(obj)) {
1411         error_setg(errp, "Profile %s is not available for vendor CPUs",
1412                    profile->name);
1413         return;
1414     }
1415 
1416     if (cpu->env.misa_mxl != MXL_RV64) {
1417         error_setg(errp, "Profile %s only available for 64 bit CPUs",
1418                    profile->name);
1419         return;
1420     }
1421 
1422     if (!visit_type_bool(v, name, &value, errp)) {
1423         return;
1424     }
1425 
1426     profile->user_set = true;
1427 
1428     riscv_cpu_set_profile(cpu, profile, value);
1429 }
1430 
cpu_get_profile(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1431 static void cpu_get_profile(Object *obj, Visitor *v, const char *name,
1432                             void *opaque, Error **errp)
1433 {
1434     RISCVCPUProfile *profile = opaque;
1435     bool value = profile->enabled;
1436 
1437     visit_type_bool(v, name, &value, errp);
1438 }
1439 
riscv_cpu_add_profiles(Object * cpu_obj)1440 static void riscv_cpu_add_profiles(Object *cpu_obj)
1441 {
1442     for (int i = 0; riscv_profiles[i] != NULL; i++) {
1443         RISCVCPUProfile *profile = riscv_profiles[i];
1444 
1445         object_property_add(cpu_obj, profile->name, "bool",
1446                             cpu_get_profile, cpu_set_profile,
1447                             NULL, (void *)profile);
1448 
1449         /*
1450          * CPUs might enable a profile right from the start.
1451          * Enable its mandatory extensions right away in this
1452          * case.
1453          */
1454         if (profile->enabled) {
1455             riscv_cpu_set_profile(RISCV_CPU(cpu_obj), profile, true);
1456         }
1457     }
1458 }
1459 
cpu_set_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1460 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1461                                   void *opaque, Error **errp)
1462 {
1463     const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1464     RISCVCPU *cpu = RISCV_CPU(obj);
1465     bool vendor_cpu = riscv_cpu_is_vendor(obj);
1466     bool prev_val, value;
1467 
1468     if (!visit_type_bool(v, name, &value, errp)) {
1469         return;
1470     }
1471 
1472     cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value);
1473 
1474     prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset);
1475 
1476     if (value == prev_val) {
1477         return;
1478     }
1479 
1480     if (value && vendor_cpu) {
1481         g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1482         error_setg(errp, "'%s' CPU does not allow enabling extensions",
1483                    cpuname);
1484         return;
1485     }
1486 
1487     if (value) {
1488         cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset);
1489     }
1490 
1491     isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value);
1492 }
1493 
cpu_get_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1494 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1495                                   void *opaque, Error **errp)
1496 {
1497     const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1498     bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset);
1499 
1500     visit_type_bool(v, name, &value, errp);
1501 }
1502 
cpu_add_multi_ext_prop(Object * cpu_obj,const RISCVCPUMultiExtConfig * multi_cfg)1503 static void cpu_add_multi_ext_prop(Object *cpu_obj,
1504                                    const RISCVCPUMultiExtConfig *multi_cfg)
1505 {
1506     bool generic_cpu = riscv_cpu_is_generic(cpu_obj);
1507 
1508     object_property_add(cpu_obj, multi_cfg->name, "bool",
1509                         cpu_get_multi_ext_cfg,
1510                         cpu_set_multi_ext_cfg,
1511                         NULL, (void *)multi_cfg);
1512 
1513     if (!generic_cpu) {
1514         return;
1515     }
1516 
1517     /*
1518      * Set def val directly instead of using
1519      * object_property_set_bool() to save the set()
1520      * callback hash for user inputs.
1521      */
1522     isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset,
1523                            multi_cfg->enabled);
1524 }
1525 
riscv_cpu_add_multiext_prop_array(Object * obj,const RISCVCPUMultiExtConfig * array)1526 static void riscv_cpu_add_multiext_prop_array(Object *obj,
1527                                         const RISCVCPUMultiExtConfig *array)
1528 {
1529     const RISCVCPUMultiExtConfig *prop;
1530 
1531     g_assert(array);
1532 
1533     for (prop = array; prop && prop->name; prop++) {
1534         cpu_add_multi_ext_prop(obj, prop);
1535     }
1536 }
1537 
1538 /*
1539  * Add CPU properties with user-facing flags.
1540  *
1541  * This will overwrite existing env->misa_ext values with the
1542  * defaults set via riscv_cpu_add_misa_properties().
1543  */
riscv_cpu_add_user_properties(Object * obj)1544 static void riscv_cpu_add_user_properties(Object *obj)
1545 {
1546 #ifndef CONFIG_USER_ONLY
1547     riscv_add_satp_mode_properties(obj);
1548 #endif
1549 
1550     riscv_cpu_add_misa_properties(obj);
1551 
1552     riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions);
1553     riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts);
1554     riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts);
1555 
1556     riscv_cpu_add_profiles(obj);
1557 }
1558 
1559 /*
1560  * The 'max' type CPU will have all possible ratified
1561  * non-vendor extensions enabled.
1562  */
riscv_init_max_cpu_extensions(Object * obj)1563 static void riscv_init_max_cpu_extensions(Object *obj)
1564 {
1565     RISCVCPU *cpu = RISCV_CPU(obj);
1566     CPURISCVState *env = &cpu->env;
1567     const RISCVCPUMultiExtConfig *prop;
1568 
1569     /* Enable RVG and RVV that are disabled by default */
1570     riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV);
1571 
1572     for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1573         isa_ext_update_enabled(cpu, prop->offset, true);
1574     }
1575 
1576     /*
1577      * Some extensions can't be added without backward compatibilty concerns.
1578      * Disable those, the user can still opt in to them on the command line.
1579      */
1580     cpu->cfg.ext_svade = false;
1581 
1582     /* set vector version */
1583     env->vext_ver = VEXT_VERSION_1_00_0;
1584 
1585     /* Zfinx is not compatible with F. Disable it */
1586     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false);
1587     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false);
1588     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false);
1589     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false);
1590 
1591     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false);
1592     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false);
1593     isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false);
1594 
1595     if (env->misa_mxl != MXL_RV32) {
1596         isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
1597     } else {
1598         isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_svrsw60t59b), false);
1599     }
1600 
1601     /*
1602      * TODO: ext_smrnmi requires OpenSBI changes that our current
1603      * image does not have. Disable it for now.
1604      */
1605     if (cpu->cfg.ext_smrnmi) {
1606         isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
1607     }
1608 
1609     /*
1610      * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup
1611      * to avoid generating a double trap. OpenSBI does not currently support it,
1612      * disable it for now.
1613      */
1614     if (cpu->cfg.ext_smdbltrp) {
1615         isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
1616     }
1617 }
1618 
riscv_cpu_has_max_extensions(Object * cpu_obj)1619 static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
1620 {
1621     return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL;
1622 }
1623 
riscv_tcg_cpu_instance_init(CPUState * cs)1624 static void riscv_tcg_cpu_instance_init(CPUState *cs)
1625 {
1626     RISCVCPU *cpu = RISCV_CPU(cs);
1627     Object *obj = OBJECT(cpu);
1628 
1629     misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1630     multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1631 
1632     if (!misa_ext_implied_rules) {
1633         misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
1634     }
1635 
1636     if (!multi_ext_implied_rules) {
1637         multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
1638     }
1639 
1640     riscv_cpu_add_user_properties(obj);
1641 
1642     if (riscv_cpu_has_max_extensions(obj)) {
1643         riscv_init_max_cpu_extensions(obj);
1644     }
1645 }
1646 
riscv_tcg_cpu_accel_class_init(ObjectClass * oc,const void * data)1647 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data)
1648 {
1649     AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
1650 
1651     acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
1652     acc->cpu_target_realize = riscv_tcg_cpu_realize;
1653 }
1654 
1655 static const TypeInfo riscv_tcg_cpu_accel_type_info = {
1656     .name = ACCEL_CPU_NAME("tcg"),
1657 
1658     .parent = TYPE_ACCEL_CPU,
1659     .class_init = riscv_tcg_cpu_accel_class_init,
1660     .abstract = true,
1661 };
1662 
riscv_tcg_cpu_accel_register_types(void)1663 static void riscv_tcg_cpu_accel_register_types(void)
1664 {
1665     type_register_static(&riscv_tcg_cpu_accel_type_info);
1666 }
1667 type_init(riscv_tcg_cpu_accel_register_types);
1668