1 /*
2 * QEMU CPU model (system specific)
3 *
4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "system/address-spaces.h"
24 #include "exec/cputlb.h"
25 #include "system/memory.h"
26 #include "exec/tb-flush.h"
27 #include "exec/tswap.h"
28 #include "hw/qdev-core.h"
29 #include "hw/qdev-properties.h"
30 #include "hw/core/sysemu-cpu-ops.h"
31 #include "migration/vmstate.h"
32 #include "system/tcg.h"
33
cpu_has_work(CPUState * cpu)34 bool cpu_has_work(CPUState *cpu)
35 {
36 return cpu->cc->sysemu_ops->has_work(cpu);
37 }
38
cpu_paging_enabled(const CPUState * cpu)39 bool cpu_paging_enabled(const CPUState *cpu)
40 {
41 if (cpu->cc->sysemu_ops->get_paging_enabled) {
42 return cpu->cc->sysemu_ops->get_paging_enabled(cpu);
43 }
44
45 return false;
46 }
47
cpu_get_memory_mapping(CPUState * cpu,MemoryMappingList * list,Error ** errp)48 bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
49 Error **errp)
50 {
51 if (cpu->cc->sysemu_ops->get_memory_mapping) {
52 return cpu->cc->sysemu_ops->get_memory_mapping(cpu, list, errp);
53 }
54
55 error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
56 return false;
57 }
58
cpu_get_phys_page_attrs_debug(CPUState * cpu,vaddr addr,MemTxAttrs * attrs)59 hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
60 MemTxAttrs *attrs)
61 {
62 hwaddr paddr;
63
64 if (cpu->cc->sysemu_ops->get_phys_page_attrs_debug) {
65 paddr = cpu->cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr,
66 attrs);
67 } else {
68 /* Fallback for CPUs which don't implement the _attrs_ hook */
69 *attrs = MEMTXATTRS_UNSPECIFIED;
70 paddr = cpu->cc->sysemu_ops->get_phys_page_debug(cpu, addr);
71 }
72 /* Indicate that this is a debug access. */
73 attrs->debug = 1;
74 return paddr;
75 }
76
cpu_get_phys_page_debug(CPUState * cpu,vaddr addr)77 hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
78 {
79 MemTxAttrs attrs = {};
80
81 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
82 }
83
cpu_asidx_from_attrs(CPUState * cpu,MemTxAttrs attrs)84 int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
85 {
86 int ret = 0;
87
88 if (cpu->cc->sysemu_ops->asidx_from_attrs) {
89 ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
90 assert(ret < cpu->num_ases && ret >= 0);
91 }
92 return ret;
93 }
94
cpu_write_elf32_qemunote(WriteCoreDumpFunction f,CPUState * cpu,void * opaque)95 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
96 void *opaque)
97 {
98 if (!cpu->cc->sysemu_ops->write_elf32_qemunote) {
99 return 0;
100 }
101 return (*cpu->cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque);
102 }
103
cpu_write_elf32_note(WriteCoreDumpFunction f,CPUState * cpu,int cpuid,void * opaque)104 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
105 int cpuid, void *opaque)
106 {
107 if (!cpu->cc->sysemu_ops->write_elf32_note) {
108 return -1;
109 }
110 return (*cpu->cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque);
111 }
112
cpu_write_elf64_qemunote(WriteCoreDumpFunction f,CPUState * cpu,void * opaque)113 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
114 void *opaque)
115 {
116 if (!cpu->cc->sysemu_ops->write_elf64_qemunote) {
117 return 0;
118 }
119 return (*cpu->cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque);
120 }
121
cpu_write_elf64_note(WriteCoreDumpFunction f,CPUState * cpu,int cpuid,void * opaque)122 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
123 int cpuid, void *opaque)
124 {
125 if (!cpu->cc->sysemu_ops->write_elf64_note) {
126 return -1;
127 }
128 return (*cpu->cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque);
129 }
130
cpu_virtio_is_big_endian(CPUState * cpu)131 bool cpu_virtio_is_big_endian(CPUState *cpu)
132 {
133 if (cpu->cc->sysemu_ops->virtio_is_big_endian) {
134 return cpu->cc->sysemu_ops->virtio_is_big_endian(cpu);
135 }
136 return target_big_endian();
137 }
138
cpu_get_crash_info(CPUState * cpu)139 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
140 {
141 GuestPanicInformation *res = NULL;
142
143 if (cpu->cc->sysemu_ops->get_crash_info) {
144 res = cpu->cc->sysemu_ops->get_crash_info(cpu);
145 }
146 return res;
147 }
148
149 static const Property cpu_system_props[] = {
150 /*
151 * Create a memory property for system CPU object, so users can
152 * wire up its memory. The default if no link is set up is to use
153 * the system address space.
154 */
155 DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
156 MemoryRegion *),
157 };
158
cpu_get_start_powered_off(Object * obj,Error ** errp)159 static bool cpu_get_start_powered_off(Object *obj, Error **errp)
160 {
161 CPUState *cpu = CPU(obj);
162 return cpu->start_powered_off;
163 }
164
cpu_set_start_powered_off(Object * obj,bool value,Error ** errp)165 static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
166 {
167 CPUState *cpu = CPU(obj);
168 cpu->start_powered_off = value;
169 }
170
cpu_class_init_props(DeviceClass * dc)171 void cpu_class_init_props(DeviceClass *dc)
172 {
173 ObjectClass *oc = OBJECT_CLASS(dc);
174
175 /*
176 * We can't use DEFINE_PROP_BOOL in the Property array for this
177 * property, because we want this to be settable after realize.
178 */
179 object_class_property_add_bool(oc, "start-powered-off",
180 cpu_get_start_powered_off,
181 cpu_set_start_powered_off);
182
183 device_class_set_props(dc, cpu_system_props);
184 }
185
cpu_exec_class_post_init(CPUClass * cc)186 void cpu_exec_class_post_init(CPUClass *cc)
187 {
188 /* Check mandatory SysemuCPUOps handlers */
189 g_assert(cc->sysemu_ops->has_work);
190 }
191
cpu_exec_initfn(CPUState * cpu)192 void cpu_exec_initfn(CPUState *cpu)
193 {
194 cpu->memory = get_system_memory();
195 object_ref(OBJECT(cpu->memory));
196 }
197
cpu_common_post_load(void * opaque,int version_id)198 static int cpu_common_post_load(void *opaque, int version_id)
199 {
200 if (tcg_enabled()) {
201 CPUState *cpu = opaque;
202
203 /*
204 * 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
205 * version_id is increased.
206 */
207 cpu->interrupt_request &= ~0x01;
208
209 tlb_flush(cpu);
210
211 /*
212 * loadvm has just updated the content of RAM, bypassing the
213 * usual mechanisms that ensure we flush TBs for writes to
214 * memory we've translated code from. So we must flush all TBs,
215 * which will now be stale.
216 */
217 tb_flush(cpu);
218 }
219
220 return 0;
221 }
222
cpu_common_pre_load(void * opaque)223 static int cpu_common_pre_load(void *opaque)
224 {
225 CPUState *cpu = opaque;
226
227 cpu->exception_index = -1;
228
229 return 0;
230 }
231
cpu_common_exception_index_needed(void * opaque)232 static bool cpu_common_exception_index_needed(void *opaque)
233 {
234 CPUState *cpu = opaque;
235
236 return tcg_enabled() && cpu->exception_index != -1;
237 }
238
239 static const VMStateDescription vmstate_cpu_common_exception_index = {
240 .name = "cpu_common/exception_index",
241 .version_id = 1,
242 .minimum_version_id = 1,
243 .needed = cpu_common_exception_index_needed,
244 .fields = (const VMStateField[]) {
245 VMSTATE_INT32(exception_index, CPUState),
246 VMSTATE_END_OF_LIST()
247 }
248 };
249
cpu_common_crash_occurred_needed(void * opaque)250 static bool cpu_common_crash_occurred_needed(void *opaque)
251 {
252 CPUState *cpu = opaque;
253
254 return cpu->crash_occurred;
255 }
256
257 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
258 .name = "cpu_common/crash_occurred",
259 .version_id = 1,
260 .minimum_version_id = 1,
261 .needed = cpu_common_crash_occurred_needed,
262 .fields = (const VMStateField[]) {
263 VMSTATE_BOOL(crash_occurred, CPUState),
264 VMSTATE_END_OF_LIST()
265 }
266 };
267
268 const VMStateDescription vmstate_cpu_common = {
269 .name = "cpu_common",
270 .version_id = 1,
271 .minimum_version_id = 1,
272 .pre_load = cpu_common_pre_load,
273 .post_load = cpu_common_post_load,
274 .fields = (const VMStateField[]) {
275 VMSTATE_UINT32(halted, CPUState),
276 VMSTATE_UINT32(interrupt_request, CPUState),
277 VMSTATE_END_OF_LIST()
278 },
279 .subsections = (const VMStateDescription * const []) {
280 &vmstate_cpu_common_exception_index,
281 &vmstate_cpu_common_crash_occurred,
282 NULL
283 }
284 };
285
cpu_vmstate_register(CPUState * cpu)286 void cpu_vmstate_register(CPUState *cpu)
287 {
288 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
289 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
290 }
291 if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
292 vmstate_register(NULL, cpu->cpu_index,
293 cpu->cc->sysemu_ops->legacy_vmsd, cpu);
294 }
295 }
296
cpu_vmstate_unregister(CPUState * cpu)297 void cpu_vmstate_unregister(CPUState *cpu)
298 {
299 if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
300 vmstate_unregister(NULL, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
301 }
302 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
303 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
304 }
305 }
306