xref: /qemu/target/i386/tcg/tcg-cpu.c (revision 3072961b6edc99abfbd87caac3de29bb58a52ccf)
1 /*
2  * i386 TCG cpu class initialization
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "helper-tcg.h"
23 #include "qemu/accel.h"
24 #include "accel/accel-cpu-target.h"
25 #include "exec/translation-block.h"
26 #include "exec/target_page.h"
27 #include "accel/tcg/cpu-ops.h"
28 #include "tcg-cpu.h"
29 
30 /* Frob eflags into and out of the CPU temporary format.  */
31 
x86_cpu_exec_enter(CPUState * cs)32 static void x86_cpu_exec_enter(CPUState *cs)
33 {
34     X86CPU *cpu = X86_CPU(cs);
35     CPUX86State *env = &cpu->env;
36 
37     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
38     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
39     CC_OP = CC_OP_EFLAGS;
40     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
41 }
42 
x86_cpu_exec_exit(CPUState * cs)43 static void x86_cpu_exec_exit(CPUState *cs)
44 {
45     X86CPU *cpu = X86_CPU(cs);
46     CPUX86State *env = &cpu->env;
47 
48     env->eflags = cpu_compute_eflags(env);
49 }
50 
x86_get_tb_cpu_state(CPUState * cs)51 static TCGTBCPUState x86_get_tb_cpu_state(CPUState *cs)
52 {
53     CPUX86State *env = cpu_env(cs);
54     uint32_t flags, cs_base;
55     vaddr pc;
56 
57     flags = env->hflags |
58         (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
59     if (env->hflags & HF_CS64_MASK) {
60         cs_base = 0;
61         pc = env->eip;
62     } else {
63         cs_base = env->segs[R_CS].base;
64         pc = (uint32_t)(cs_base + env->eip);
65     }
66 
67     return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base };
68 }
69 
x86_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)70 static void x86_cpu_synchronize_from_tb(CPUState *cs,
71                                         const TranslationBlock *tb)
72 {
73     /* The instruction pointer is always up to date with CF_PCREL. */
74     if (!(tb_cflags(tb) & CF_PCREL)) {
75         CPUX86State *env = cpu_env(cs);
76 
77         if (tb->flags & HF_CS64_MASK) {
78             env->eip = tb->pc;
79         } else {
80             env->eip = (uint32_t)(tb->pc - tb->cs_base);
81         }
82     }
83 }
84 
x86_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)85 static void x86_restore_state_to_opc(CPUState *cs,
86                                      const TranslationBlock *tb,
87                                      const uint64_t *data)
88 {
89     X86CPU *cpu = X86_CPU(cs);
90     CPUX86State *env = &cpu->env;
91     int cc_op = data[1];
92     uint64_t new_pc;
93 
94     if (tb_cflags(tb) & CF_PCREL) {
95         /*
96          * data[0] in PC-relative TBs is also a linear address, i.e. an address with
97          * the CS base added, because it is not guaranteed that EIP bits 12 and higher
98          * stay the same across the translation block.  Add the CS base back before
99          * replacing the low bits, and subtract it below just like for !CF_PCREL.
100          */
101         uint64_t pc = env->eip + tb->cs_base;
102         new_pc = (pc & TARGET_PAGE_MASK) | data[0];
103     } else {
104         new_pc = data[0];
105     }
106     if (tb->flags & HF_CS64_MASK) {
107         env->eip = new_pc;
108     } else {
109         env->eip = (uint32_t)(new_pc - tb->cs_base);
110     }
111 
112     if (cc_op != CC_OP_DYNAMIC) {
113         env->cc_op = cc_op;
114     }
115 }
116 
x86_mmu_index_pl(CPUX86State * env,unsigned pl)117 int x86_mmu_index_pl(CPUX86State *env, unsigned pl)
118 {
119     int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1;
120     int mmu_index_base =
121         pl == 3 ? MMU_USER64_IDX :
122         !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
123         (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
124 
125     return mmu_index_base + mmu_index_32;
126 }
127 
x86_cpu_mmu_index(CPUState * cs,bool ifetch)128 static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
129 {
130     CPUX86State *env = cpu_env(cs);
131     return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK);
132 }
133 
134 #ifndef CONFIG_USER_ONLY
x86_debug_check_breakpoint(CPUState * cs)135 static bool x86_debug_check_breakpoint(CPUState *cs)
136 {
137     X86CPU *cpu = X86_CPU(cs);
138     CPUX86State *env = &cpu->env;
139 
140     /* RF disables all architectural breakpoints. */
141     return !(env->eflags & RF_MASK);
142 }
143 
x86_cpu_exec_reset(CPUState * cs)144 static void x86_cpu_exec_reset(CPUState *cs)
145 {
146     CPUArchState *env = cpu_env(cs);
147 
148     cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
149     do_cpu_init(env_archcpu(env));
150     cs->exception_index = EXCP_HALTED;
151 }
152 
x86_pointer_wrap(CPUState * cs,int mmu_idx,vaddr result,vaddr base)153 static vaddr x86_pointer_wrap(CPUState *cs, int mmu_idx,
154                               vaddr result, vaddr base)
155 {
156     return cpu_env(cs)->hflags & HF_CS64_MASK ? result : (uint32_t)result;
157 }
158 #endif
159 
160 const TCGCPUOps x86_tcg_ops = {
161     .mttcg_supported = true,
162     .precise_smc = true,
163     /*
164      * The x86 has a strong memory model with some store-after-load re-ordering
165      */
166     .guest_default_memory_order = TCG_MO_ALL & ~TCG_MO_ST_LD,
167     .initialize = tcg_x86_init,
168     .translate_code = x86_translate_code,
169     .get_tb_cpu_state = x86_get_tb_cpu_state,
170     .synchronize_from_tb = x86_cpu_synchronize_from_tb,
171     .restore_state_to_opc = x86_restore_state_to_opc,
172     .mmu_index = x86_cpu_mmu_index,
173     .cpu_exec_enter = x86_cpu_exec_enter,
174     .cpu_exec_exit = x86_cpu_exec_exit,
175 #ifdef CONFIG_USER_ONLY
176     .fake_user_interrupt = x86_cpu_do_interrupt,
177     .record_sigsegv = x86_cpu_record_sigsegv,
178     .record_sigbus = x86_cpu_record_sigbus,
179 #else
180     .tlb_fill = x86_cpu_tlb_fill,
181     .pointer_wrap = x86_pointer_wrap,
182     .do_interrupt = x86_cpu_do_interrupt,
183     .cpu_exec_halt = x86_cpu_exec_halt,
184     .cpu_exec_interrupt = x86_cpu_exec_interrupt,
185     .cpu_exec_reset = x86_cpu_exec_reset,
186     .do_unaligned_access = x86_cpu_do_unaligned_access,
187     .debug_excp_handler = breakpoint_handler,
188     .debug_check_breakpoint = x86_debug_check_breakpoint,
189     .need_replay_interrupt = x86_need_replay_interrupt,
190 #endif /* !CONFIG_USER_ONLY */
191 };
192 
x86_tcg_cpu_xsave_init(void)193 static void x86_tcg_cpu_xsave_init(void)
194 {
195 #define XO(bit, field) \
196     x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
197 
198     XO(XSTATE_FP_BIT, legacy);
199     XO(XSTATE_SSE_BIT, legacy);
200     XO(XSTATE_YMM_BIT, avx_state);
201     XO(XSTATE_BNDREGS_BIT, bndreg_state);
202     XO(XSTATE_BNDCSR_BIT, bndcsr_state);
203     XO(XSTATE_OPMASK_BIT, opmask_state);
204     XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state);
205     XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state);
206     XO(XSTATE_PKRU_BIT, pkru_state);
207 
208 #undef XO
209 }
210 
211 /*
212  * TCG-specific defaults that override cpudef models when using TCG.
213  * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
214  */
215 static PropValue x86_tcg_default_props[] = {
216     { "vme", "off" },
217     { NULL, NULL },
218 };
219 
x86_tcg_cpu_instance_init(CPUState * cs)220 static void x86_tcg_cpu_instance_init(CPUState *cs)
221 {
222     X86CPU *cpu = X86_CPU(cs);
223     X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
224 
225     if (xcc->model) {
226         /* Special cases not set in the X86CPUDefinition structs: */
227         x86_cpu_apply_props(cpu, x86_tcg_default_props);
228     }
229 
230     x86_tcg_cpu_xsave_init();
231 }
232 
x86_tcg_cpu_accel_class_init(ObjectClass * oc,const void * data)233 static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data)
234 {
235     AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
236 
237 #ifndef CONFIG_USER_ONLY
238     acc->cpu_target_realize = tcg_cpu_realizefn;
239 #endif /* CONFIG_USER_ONLY */
240 
241     acc->cpu_instance_init = x86_tcg_cpu_instance_init;
242 }
243 static const TypeInfo x86_tcg_cpu_accel_type_info = {
244     .name = ACCEL_CPU_NAME("tcg"),
245 
246     .parent = TYPE_ACCEL_CPU,
247     .class_init = x86_tcg_cpu_accel_class_init,
248     .abstract = true,
249 };
x86_tcg_cpu_accel_register_types(void)250 static void x86_tcg_cpu_accel_register_types(void)
251 {
252     type_register_static(&x86_tcg_cpu_accel_type_info);
253 }
254 type_init(x86_tcg_cpu_accel_register_types);
255