1 /* 2 * QEMU CPU model 3 * 4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qapi/error.h" 23 #include "hw/core/cpu.h" 24 #include "system/hw_accel.h" 25 #include "qemu/log.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/lockcnt.h" 28 #include "qemu/error-report.h" 29 #include "qemu/qemu-print.h" 30 #include "qemu/target-info.h" 31 #include "exec/log.h" 32 #include "exec/gdbstub.h" 33 #include "system/tcg.h" 34 #include "hw/boards.h" 35 #include "hw/qdev-properties.h" 36 #include "trace.h" 37 #ifdef CONFIG_PLUGIN 38 #include "qemu/plugin.h" 39 #endif 40 41 CPUState *cpu_by_arch_id(int64_t id) 42 { 43 CPUState *cpu; 44 45 CPU_FOREACH(cpu) { 46 if (cpu->cc->get_arch_id(cpu) == id) { 47 return cpu; 48 } 49 } 50 return NULL; 51 } 52 53 bool cpu_exists(int64_t id) 54 { 55 return !!cpu_by_arch_id(id); 56 } 57 58 CPUState *cpu_create(const char *typename) 59 { 60 Error *err = NULL; 61 CPUState *cpu = CPU(object_new(typename)); 62 if (!qdev_realize(DEVICE(cpu), NULL, &err)) { 63 error_report_err(err); 64 object_unref(OBJECT(cpu)); 65 exit(EXIT_FAILURE); 66 } 67 return cpu; 68 } 69 70 /* Resetting the IRQ comes from across the code base so we take the 71 * BQL here if we need to. cpu_interrupt assumes it is held.*/ 72 void cpu_reset_interrupt(CPUState *cpu, int mask) 73 { 74 bool need_lock = !bql_locked(); 75 76 if (need_lock) { 77 bql_lock(); 78 } 79 cpu->interrupt_request &= ~mask; 80 if (need_lock) { 81 bql_unlock(); 82 } 83 } 84 85 void cpu_exit(CPUState *cpu) 86 { 87 qatomic_set(&cpu->exit_request, 1); 88 /* Ensure cpu_exec will see the exit request after TCG has exited. */ 89 smp_wmb(); 90 qatomic_set(&cpu->neg.icount_decr.u16.high, -1); 91 } 92 93 static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) 94 { 95 return 0; 96 } 97 98 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg) 99 { 100 return 0; 101 } 102 103 void cpu_dump_state(CPUState *cpu, FILE *f, int flags) 104 { 105 if (cpu->cc->dump_state) { 106 cpu_synchronize_state(cpu); 107 cpu->cc->dump_state(cpu, f, flags); 108 } 109 } 110 111 void cpu_reset(CPUState *cpu) 112 { 113 device_cold_reset(DEVICE(cpu)); 114 115 trace_cpu_reset(cpu->cpu_index); 116 } 117 118 static void cpu_common_reset_hold(Object *obj, ResetType type) 119 { 120 CPUState *cpu = CPU(obj); 121 122 if (qemu_loglevel_mask(CPU_LOG_RESET)) { 123 qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); 124 log_cpu_state(cpu, cpu->cc->reset_dump_flags); 125 } 126 127 cpu->interrupt_request = 0; 128 cpu->halted = cpu->start_powered_off; 129 cpu->mem_io_pc = 0; 130 cpu->icount_extra = 0; 131 qatomic_set(&cpu->neg.icount_decr.u32, 0); 132 cpu->neg.can_do_io = true; 133 cpu->exception_index = -1; 134 cpu->crash_occurred = false; 135 cpu->cflags_next_tb = -1; 136 137 cpu_exec_reset_hold(cpu); 138 } 139 140 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) 141 { 142 ObjectClass *oc; 143 CPUClass *cc; 144 145 oc = object_class_by_name(typename); 146 cc = CPU_CLASS(oc); 147 assert(cc->class_by_name); 148 assert(cpu_model); 149 oc = cc->class_by_name(cpu_model); 150 if (object_class_dynamic_cast(oc, typename) && 151 !object_class_is_abstract(oc)) { 152 return oc; 153 } 154 155 return NULL; 156 } 157 158 char *cpu_model_from_type(const char *typename) 159 { 160 g_autofree char *suffix = g_strdup_printf("-%s", target_cpu_type()); 161 162 if (!object_class_by_name(typename)) { 163 return NULL; 164 } 165 166 if (g_str_has_suffix(typename, suffix)) { 167 return g_strndup(typename, strlen(typename) - strlen(suffix)); 168 } 169 170 return g_strdup(typename); 171 } 172 173 static void cpu_common_parse_features(const char *typename, char *features, 174 Error **errp) 175 { 176 char *val; 177 static bool cpu_globals_initialized; 178 /* Single "key=value" string being parsed */ 179 char *featurestr = features ? strtok(features, ",") : NULL; 180 181 /* should be called only once, catch invalid users */ 182 assert(!cpu_globals_initialized); 183 cpu_globals_initialized = true; 184 185 while (featurestr) { 186 val = strchr(featurestr, '='); 187 if (val) { 188 GlobalProperty *prop = g_new0(typeof(*prop), 1); 189 *val = 0; 190 val++; 191 prop->driver = typename; 192 prop->property = g_strdup(featurestr); 193 prop->value = g_strdup(val); 194 qdev_prop_register_global(prop); 195 } else { 196 error_setg(errp, "Expected key=value format, found %s.", 197 featurestr); 198 return; 199 } 200 featurestr = strtok(NULL, ","); 201 } 202 } 203 204 const char *parse_cpu_option(const char *cpu_option) 205 { 206 ObjectClass *oc; 207 CPUClass *cc; 208 gchar **model_pieces; 209 const char *cpu_type; 210 211 model_pieces = g_strsplit(cpu_option, ",", 2); 212 if (!model_pieces[0]) { 213 error_report("-cpu option cannot be empty"); 214 exit(1); 215 } 216 217 oc = cpu_class_by_name(target_cpu_type(), model_pieces[0]); 218 if (oc == NULL) { 219 error_report("unable to find CPU model '%s'", model_pieces[0]); 220 g_strfreev(model_pieces); 221 exit(EXIT_FAILURE); 222 } 223 224 cpu_type = object_class_get_name(oc); 225 cc = CPU_CLASS(oc); 226 cc->parse_features(cpu_type, model_pieces[1], &error_fatal); 227 g_strfreev(model_pieces); 228 return cpu_type; 229 } 230 231 bool cpu_exec_realizefn(CPUState *cpu, Error **errp) 232 { 233 if (!accel_cpu_common_realize(cpu, errp)) { 234 return false; 235 } 236 237 gdb_init_cpu(cpu); 238 239 /* Wait until cpu initialization complete before exposing cpu. */ 240 cpu_list_add(cpu); 241 242 cpu_vmstate_register(cpu); 243 244 return true; 245 } 246 247 static void cpu_common_realizefn(DeviceState *dev, Error **errp) 248 { 249 CPUState *cpu = CPU(dev); 250 Object *machine = qdev_get_machine(); 251 252 /* qdev_get_machine() can return something that's not TYPE_MACHINE 253 * if this is one of the user-only emulators; in that case there's 254 * no need to check the ignore_memory_transaction_failures board flag. 255 */ 256 if (object_dynamic_cast(machine, TYPE_MACHINE)) { 257 MachineClass *mc = MACHINE_GET_CLASS(machine); 258 259 if (mc) { 260 cpu->ignore_memory_transaction_failures = 261 mc->ignore_memory_transaction_failures; 262 } 263 } 264 265 if (dev->hotplugged) { 266 cpu_synchronize_post_init(cpu); 267 cpu_resume(cpu); 268 } 269 270 /* NOTE: latest generic point where the cpu is fully realized */ 271 } 272 273 static void cpu_common_unrealizefn(DeviceState *dev) 274 { 275 CPUState *cpu = CPU(dev); 276 277 /* Call the plugin hook before clearing the cpu is fully unrealized */ 278 #ifdef CONFIG_PLUGIN 279 if (tcg_enabled()) { 280 qemu_plugin_vcpu_exit_hook(cpu); 281 } 282 #endif 283 284 /* NOTE: latest generic point before the cpu is fully unrealized */ 285 cpu_exec_unrealizefn(cpu); 286 } 287 288 void cpu_exec_unrealizefn(CPUState *cpu) 289 { 290 cpu_vmstate_unregister(cpu); 291 292 cpu_list_remove(cpu); 293 /* 294 * Now that the vCPU has been removed from the RCU list, we can call 295 * accel_cpu_common_unrealize, which may free fields using call_rcu. 296 */ 297 accel_cpu_common_unrealize(cpu); 298 } 299 300 static void cpu_common_initfn(Object *obj) 301 { 302 CPUState *cpu = CPU(obj); 303 304 cpu_exec_class_post_init(CPU_GET_CLASS(obj)); 305 306 /* cache the cpu class for the hotpath */ 307 cpu->cc = CPU_GET_CLASS(cpu); 308 309 cpu->cpu_index = UNASSIGNED_CPU_INDEX; 310 cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX; 311 cpu->as = NULL; 312 cpu->num_ases = 0; 313 /* user-mode doesn't have configurable SMP topology */ 314 /* the default value is changed by qemu_init_vcpu() for system-mode */ 315 cpu->nr_threads = 1; 316 317 /* allocate storage for thread info, initialise condition variables */ 318 cpu->thread = g_new0(QemuThread, 1); 319 cpu->halt_cond = g_new0(QemuCond, 1); 320 qemu_cond_init(cpu->halt_cond); 321 322 qemu_mutex_init(&cpu->work_mutex); 323 qemu_lockcnt_init(&cpu->in_ioctl_lock); 324 QSIMPLEQ_INIT(&cpu->work_list); 325 QTAILQ_INIT(&cpu->breakpoints); 326 QTAILQ_INIT(&cpu->watchpoints); 327 328 cpu_exec_initfn(cpu); 329 330 /* 331 * Plugin initialization must wait until the cpu start executing 332 * code, but we must queue this work before the threads are 333 * created to ensure we don't race. 334 */ 335 #ifdef CONFIG_PLUGIN 336 if (tcg_enabled()) { 337 cpu->plugin_state = qemu_plugin_create_vcpu_state(); 338 qemu_plugin_vcpu_init_hook(cpu); 339 } 340 #endif 341 } 342 343 static void cpu_common_finalize(Object *obj) 344 { 345 CPUState *cpu = CPU(obj); 346 347 #ifdef CONFIG_PLUGIN 348 if (tcg_enabled()) { 349 g_free(cpu->plugin_state); 350 } 351 #endif 352 free_queued_cpu_work(cpu); 353 /* If cleanup didn't happen in context to gdb_unregister_coprocessor_all */ 354 if (cpu->gdb_regs) { 355 g_array_free(cpu->gdb_regs, TRUE); 356 } 357 qemu_lockcnt_destroy(&cpu->in_ioctl_lock); 358 qemu_mutex_destroy(&cpu->work_mutex); 359 qemu_cond_destroy(cpu->halt_cond); 360 g_free(cpu->halt_cond); 361 g_free(cpu->thread); 362 } 363 364 static int64_t cpu_common_get_arch_id(CPUState *cpu) 365 { 366 return cpu->cpu_index; 367 } 368 369 static void cpu_common_class_init(ObjectClass *klass, const void *data) 370 { 371 DeviceClass *dc = DEVICE_CLASS(klass); 372 ResettableClass *rc = RESETTABLE_CLASS(klass); 373 CPUClass *k = CPU_CLASS(klass); 374 375 k->parse_features = cpu_common_parse_features; 376 k->get_arch_id = cpu_common_get_arch_id; 377 k->gdb_read_register = cpu_common_gdb_read_register; 378 k->gdb_write_register = cpu_common_gdb_write_register; 379 set_bit(DEVICE_CATEGORY_CPU, dc->categories); 380 dc->realize = cpu_common_realizefn; 381 dc->unrealize = cpu_common_unrealizefn; 382 rc->phases.hold = cpu_common_reset_hold; 383 cpu_class_init_props(dc); 384 /* 385 * Reason: CPUs still need special care by board code: wiring up 386 * IRQs, adding reset handlers, halting non-first CPUs, ... 387 */ 388 dc->user_creatable = false; 389 } 390 391 static const TypeInfo cpu_type_info = { 392 .name = TYPE_CPU, 393 .parent = TYPE_DEVICE, 394 .instance_size = sizeof(CPUState), 395 .instance_init = cpu_common_initfn, 396 .instance_finalize = cpu_common_finalize, 397 .abstract = true, 398 .class_size = sizeof(CPUClass), 399 .class_init = cpu_common_class_init, 400 }; 401 402 static void cpu_register_types(void) 403 { 404 type_register_static(&cpu_type_info); 405 } 406 407 type_init(cpu_register_types) 408 409 static void cpu_list_entry(gpointer data, gpointer user_data) 410 { 411 CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data)); 412 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 413 g_autofree char *model = cpu_model_from_type(typename); 414 415 if (cc->deprecation_note) { 416 qemu_printf(" %s (deprecated)\n", model); 417 } else { 418 qemu_printf(" %s\n", model); 419 } 420 } 421 422 void list_cpus(void) 423 { 424 CPUClass *cc = CPU_CLASS(object_class_by_name(target_cpu_type())); 425 426 if (cc->list_cpus) { 427 cc->list_cpus(); 428 } else { 429 GSList *list; 430 431 list = object_class_get_list_sorted(TYPE_CPU, false); 432 qemu_printf("Available CPUs:\n"); 433 g_slist_foreach(list, cpu_list_entry, NULL); 434 g_slist_free(list); 435 } 436 } 437