xref: /qemu/cpu-target.c (revision 1405d7e60d8c98a28b29885f70da4f2e4407fbc6)
1 /*
2  * Target-specific parts of the CPU object
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qapi/error.h"
22 #include "qemu/error-report.h"
23 #include "qemu/qemu-print.h"
24 #include "migration/vmstate.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "hw/core/sysemu-cpu-ops.h"
27 #endif
28 #include "system/accel-ops.h"
29 #include "system/cpus.h"
30 #include "system/tcg.h"
31 #include "exec/tswap.h"
32 #include "exec/replay-core.h"
33 #include "exec/cpu-common.h"
34 #include "exec/cputlb.h"
35 #include "exec/exec-all.h"
36 #include "exec/tb-flush.h"
37 #include "exec/log.h"
38 #include "accel/accel-cpu-target.h"
39 #include "trace/trace-root.h"
40 #include "qemu/accel.h"
41 #include "hw/core/cpu.h"
42 
43 #ifndef CONFIG_USER_ONLY
44 static int cpu_common_post_load(void *opaque, int version_id)
45 {
46     if (tcg_enabled()) {
47         CPUState *cpu = opaque;
48 
49         /*
50          * 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
51          * version_id is increased.
52          */
53         cpu->interrupt_request &= ~0x01;
54 
55         tlb_flush(cpu);
56 
57         /*
58          * loadvm has just updated the content of RAM, bypassing the
59          * usual mechanisms that ensure we flush TBs for writes to
60          * memory we've translated code from. So we must flush all TBs,
61          * which will now be stale.
62          */
63         tb_flush(cpu);
64     }
65 
66     return 0;
67 }
68 
69 static int cpu_common_pre_load(void *opaque)
70 {
71     CPUState *cpu = opaque;
72 
73     cpu->exception_index = -1;
74 
75     return 0;
76 }
77 
78 static bool cpu_common_exception_index_needed(void *opaque)
79 {
80     CPUState *cpu = opaque;
81 
82     return tcg_enabled() && cpu->exception_index != -1;
83 }
84 
85 static const VMStateDescription vmstate_cpu_common_exception_index = {
86     .name = "cpu_common/exception_index",
87     .version_id = 1,
88     .minimum_version_id = 1,
89     .needed = cpu_common_exception_index_needed,
90     .fields = (const VMStateField[]) {
91         VMSTATE_INT32(exception_index, CPUState),
92         VMSTATE_END_OF_LIST()
93     }
94 };
95 
96 static bool cpu_common_crash_occurred_needed(void *opaque)
97 {
98     CPUState *cpu = opaque;
99 
100     return cpu->crash_occurred;
101 }
102 
103 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
104     .name = "cpu_common/crash_occurred",
105     .version_id = 1,
106     .minimum_version_id = 1,
107     .needed = cpu_common_crash_occurred_needed,
108     .fields = (const VMStateField[]) {
109         VMSTATE_BOOL(crash_occurred, CPUState),
110         VMSTATE_END_OF_LIST()
111     }
112 };
113 
114 const VMStateDescription vmstate_cpu_common = {
115     .name = "cpu_common",
116     .version_id = 1,
117     .minimum_version_id = 1,
118     .pre_load = cpu_common_pre_load,
119     .post_load = cpu_common_post_load,
120     .fields = (const VMStateField[]) {
121         VMSTATE_UINT32(halted, CPUState),
122         VMSTATE_UINT32(interrupt_request, CPUState),
123         VMSTATE_END_OF_LIST()
124     },
125     .subsections = (const VMStateDescription * const []) {
126         &vmstate_cpu_common_exception_index,
127         &vmstate_cpu_common_crash_occurred,
128         NULL
129     }
130 };
131 #endif
132 
133 bool cpu_exec_realizefn(CPUState *cpu, Error **errp)
134 {
135     if (!accel_cpu_common_realize(cpu, errp)) {
136         return false;
137     }
138 
139     /* Wait until cpu initialization complete before exposing cpu. */
140     cpu_list_add(cpu);
141 
142 #ifdef CONFIG_USER_ONLY
143     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
144            qdev_get_vmsd(DEVICE(cpu))->unmigratable);
145 #else
146     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
147         vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
148     }
149     if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
150         vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
151     }
152 #endif /* CONFIG_USER_ONLY */
153 
154     return true;
155 }
156 
157 void cpu_exec_unrealizefn(CPUState *cpu)
158 {
159 #ifndef CONFIG_USER_ONLY
160     CPUClass *cc = CPU_GET_CLASS(cpu);
161 
162     if (cc->sysemu_ops->legacy_vmsd != NULL) {
163         vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
164     }
165     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
166         vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
167     }
168 #endif
169 
170     cpu_list_remove(cpu);
171     /*
172      * Now that the vCPU has been removed from the RCU list, we can call
173      * accel_cpu_common_unrealize, which may free fields using call_rcu.
174      */
175     accel_cpu_common_unrealize(cpu);
176 }
177 
178 char *cpu_model_from_type(const char *typename)
179 {
180     const char *suffix = "-" CPU_RESOLVING_TYPE;
181 
182     if (!object_class_by_name(typename)) {
183         return NULL;
184     }
185 
186     if (g_str_has_suffix(typename, suffix)) {
187         return g_strndup(typename, strlen(typename) - strlen(suffix));
188     }
189 
190     return g_strdup(typename);
191 }
192 
193 const char *parse_cpu_option(const char *cpu_option)
194 {
195     ObjectClass *oc;
196     CPUClass *cc;
197     gchar **model_pieces;
198     const char *cpu_type;
199 
200     model_pieces = g_strsplit(cpu_option, ",", 2);
201     if (!model_pieces[0]) {
202         error_report("-cpu option cannot be empty");
203         exit(1);
204     }
205 
206     oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
207     if (oc == NULL) {
208         error_report("unable to find CPU model '%s'", model_pieces[0]);
209         g_strfreev(model_pieces);
210         exit(EXIT_FAILURE);
211     }
212 
213     cpu_type = object_class_get_name(oc);
214     cc = CPU_CLASS(oc);
215     cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
216     g_strfreev(model_pieces);
217     return cpu_type;
218 }
219 
220 #ifndef cpu_list
221 static void cpu_list_entry(gpointer data, gpointer user_data)
222 {
223     CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data));
224     const char *typename = object_class_get_name(OBJECT_CLASS(data));
225     g_autofree char *model = cpu_model_from_type(typename);
226 
227     if (cc->deprecation_note) {
228         qemu_printf("  %s (deprecated)\n", model);
229     } else {
230         qemu_printf("  %s\n", model);
231     }
232 }
233 
234 static void cpu_list(void)
235 {
236     GSList *list;
237 
238     list = object_class_get_list_sorted(TYPE_CPU, false);
239     qemu_printf("Available CPUs:\n");
240     g_slist_foreach(list, cpu_list_entry, NULL);
241     g_slist_free(list);
242 }
243 #endif
244 
245 void list_cpus(void)
246 {
247     cpu_list();
248 }
249 
250 /* enable or disable single step mode. EXCP_DEBUG is returned by the
251    CPU loop after each instruction */
252 void cpu_single_step(CPUState *cpu, int enabled)
253 {
254     if (cpu->singlestep_enabled != enabled) {
255         cpu->singlestep_enabled = enabled;
256 
257 #if !defined(CONFIG_USER_ONLY)
258         const AccelOpsClass *ops = cpus_get_accel();
259         if (ops->update_guest_debug) {
260             ops->update_guest_debug(cpu);
261         }
262 #endif
263 
264         trace_breakpoint_singlestep(cpu->cpu_index, enabled);
265     }
266 }
267 
268 void cpu_abort(CPUState *cpu, const char *fmt, ...)
269 {
270     va_list ap;
271     va_list ap2;
272 
273     va_start(ap, fmt);
274     va_copy(ap2, ap);
275     fprintf(stderr, "qemu: fatal: ");
276     vfprintf(stderr, fmt, ap);
277     fprintf(stderr, "\n");
278     cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
279     if (qemu_log_separate()) {
280         FILE *logfile = qemu_log_trylock();
281         if (logfile) {
282             fprintf(logfile, "qemu: fatal: ");
283             vfprintf(logfile, fmt, ap2);
284             fprintf(logfile, "\n");
285             cpu_dump_state(cpu, logfile, CPU_DUMP_FPU | CPU_DUMP_CCOP);
286             qemu_log_unlock(logfile);
287         }
288     }
289     va_end(ap2);
290     va_end(ap);
291     replay_finish();
292 #if defined(CONFIG_USER_ONLY)
293     {
294         struct sigaction act;
295         sigfillset(&act.sa_mask);
296         act.sa_handler = SIG_DFL;
297         act.sa_flags = 0;
298         sigaction(SIGABRT, &act, NULL);
299     }
300 #endif
301     abort();
302 }
303 
304 bool target_words_bigendian(void)
305 {
306     return TARGET_BIG_ENDIAN;
307 }
308 
309 const char *target_name(void)
310 {
311     return TARGET_NAME;
312 }
313