xref: /qemu/target/i386/tcg/tcg-cpu.c (revision cc944932ecef3b7a56ae62d89dd92fb9e56c5cc8)
1 /*
2  * i386 TCG cpu class initialization
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "helper-tcg.h"
23 #include "qemu/accel.h"
24 #include "accel/accel-cpu-target.h"
25 #include "exec/translation-block.h"
26 
27 #include "tcg-cpu.h"
28 
29 /* Frob eflags into and out of the CPU temporary format.  */
30 
31 static void x86_cpu_exec_enter(CPUState *cs)
32 {
33     X86CPU *cpu = X86_CPU(cs);
34     CPUX86State *env = &cpu->env;
35 
36     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
37     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
38     CC_OP = CC_OP_EFLAGS;
39     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
40 }
41 
42 static void x86_cpu_exec_exit(CPUState *cs)
43 {
44     X86CPU *cpu = X86_CPU(cs);
45     CPUX86State *env = &cpu->env;
46 
47     env->eflags = cpu_compute_eflags(env);
48 }
49 
50 static void x86_cpu_synchronize_from_tb(CPUState *cs,
51                                         const TranslationBlock *tb)
52 {
53     /* The instruction pointer is always up to date with CF_PCREL. */
54     if (!(tb_cflags(tb) & CF_PCREL)) {
55         CPUX86State *env = cpu_env(cs);
56 
57         if (tb->flags & HF_CS64_MASK) {
58             env->eip = tb->pc;
59         } else {
60             env->eip = (uint32_t)(tb->pc - tb->cs_base);
61         }
62     }
63 }
64 
65 static void x86_restore_state_to_opc(CPUState *cs,
66                                      const TranslationBlock *tb,
67                                      const uint64_t *data)
68 {
69     X86CPU *cpu = X86_CPU(cs);
70     CPUX86State *env = &cpu->env;
71     int cc_op = data[1];
72     uint64_t new_pc;
73 
74     if (tb_cflags(tb) & CF_PCREL) {
75         /*
76          * data[0] in PC-relative TBs is also a linear address, i.e. an address with
77          * the CS base added, because it is not guaranteed that EIP bits 12 and higher
78          * stay the same across the translation block.  Add the CS base back before
79          * replacing the low bits, and subtract it below just like for !CF_PCREL.
80          */
81         uint64_t pc = env->eip + tb->cs_base;
82         new_pc = (pc & TARGET_PAGE_MASK) | data[0];
83     } else {
84         new_pc = data[0];
85     }
86     if (tb->flags & HF_CS64_MASK) {
87         env->eip = new_pc;
88     } else {
89         env->eip = (uint32_t)(new_pc - tb->cs_base);
90     }
91 
92     if (cc_op != CC_OP_DYNAMIC) {
93         env->cc_op = cc_op;
94     }
95 }
96 
97 int x86_mmu_index_pl(CPUX86State *env, unsigned pl)
98 {
99     int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1;
100     int mmu_index_base =
101         pl == 3 ? MMU_USER64_IDX :
102         !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
103         (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
104 
105     return mmu_index_base + mmu_index_32;
106 }
107 
108 static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
109 {
110     CPUX86State *env = cpu_env(cs);
111     return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK);
112 }
113 
114 #ifndef CONFIG_USER_ONLY
115 static bool x86_debug_check_breakpoint(CPUState *cs)
116 {
117     X86CPU *cpu = X86_CPU(cs);
118     CPUX86State *env = &cpu->env;
119 
120     /* RF disables all architectural breakpoints. */
121     return !(env->eflags & RF_MASK);
122 }
123 #endif
124 
125 #include "accel/tcg/cpu-ops.h"
126 
127 static const TCGCPUOps x86_tcg_ops = {
128     .initialize = tcg_x86_init,
129     .translate_code = x86_translate_code,
130     .synchronize_from_tb = x86_cpu_synchronize_from_tb,
131     .restore_state_to_opc = x86_restore_state_to_opc,
132     .mmu_index = x86_cpu_mmu_index,
133     .cpu_exec_enter = x86_cpu_exec_enter,
134     .cpu_exec_exit = x86_cpu_exec_exit,
135 #ifdef CONFIG_USER_ONLY
136     .fake_user_interrupt = x86_cpu_do_interrupt,
137     .record_sigsegv = x86_cpu_record_sigsegv,
138     .record_sigbus = x86_cpu_record_sigbus,
139 #else
140     .tlb_fill = x86_cpu_tlb_fill,
141     .do_interrupt = x86_cpu_do_interrupt,
142     .cpu_exec_halt = x86_cpu_exec_halt,
143     .cpu_exec_interrupt = x86_cpu_exec_interrupt,
144     .do_unaligned_access = x86_cpu_do_unaligned_access,
145     .debug_excp_handler = breakpoint_handler,
146     .debug_check_breakpoint = x86_debug_check_breakpoint,
147     .need_replay_interrupt = x86_need_replay_interrupt,
148 #endif /* !CONFIG_USER_ONLY */
149 };
150 
151 static void x86_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
152 {
153     /* for x86, all cpus use the same set of operations */
154     cc->tcg_ops = &x86_tcg_ops;
155 }
156 
157 static void x86_tcg_cpu_class_init(CPUClass *cc)
158 {
159     cc->init_accel_cpu = x86_tcg_cpu_init_ops;
160 }
161 
162 static void x86_tcg_cpu_xsave_init(void)
163 {
164 #define XO(bit, field) \
165     x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
166 
167     XO(XSTATE_FP_BIT, legacy);
168     XO(XSTATE_SSE_BIT, legacy);
169     XO(XSTATE_YMM_BIT, avx_state);
170     XO(XSTATE_BNDREGS_BIT, bndreg_state);
171     XO(XSTATE_BNDCSR_BIT, bndcsr_state);
172     XO(XSTATE_OPMASK_BIT, opmask_state);
173     XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state);
174     XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state);
175     XO(XSTATE_PKRU_BIT, pkru_state);
176 
177 #undef XO
178 }
179 
180 /*
181  * TCG-specific defaults that override cpudef models when using TCG.
182  * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
183  */
184 static PropValue x86_tcg_default_props[] = {
185     { "vme", "off" },
186     { NULL, NULL },
187 };
188 
189 static void x86_tcg_cpu_instance_init(CPUState *cs)
190 {
191     X86CPU *cpu = X86_CPU(cs);
192     X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
193 
194     if (xcc->model) {
195         /* Special cases not set in the X86CPUDefinition structs: */
196         x86_cpu_apply_props(cpu, x86_tcg_default_props);
197     }
198 
199     x86_tcg_cpu_xsave_init();
200 }
201 
202 static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
203 {
204     AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
205 
206 #ifndef CONFIG_USER_ONLY
207     acc->cpu_target_realize = tcg_cpu_realizefn;
208 #endif /* CONFIG_USER_ONLY */
209 
210     acc->cpu_class_init = x86_tcg_cpu_class_init;
211     acc->cpu_instance_init = x86_tcg_cpu_instance_init;
212 }
213 static const TypeInfo x86_tcg_cpu_accel_type_info = {
214     .name = ACCEL_CPU_NAME("tcg"),
215 
216     .parent = TYPE_ACCEL_CPU,
217     .class_init = x86_tcg_cpu_accel_class_init,
218     .abstract = true,
219 };
220 static void x86_tcg_cpu_accel_register_types(void)
221 {
222     type_register_static(&x86_tcg_cpu_accel_type_info);
223 }
224 type_init(x86_tcg_cpu_accel_register_types);
225