1*d9f24bf5SPaolo Bonzini /* 2*d9f24bf5SPaolo Bonzini * Target-specific parts of the CPU object 3*d9f24bf5SPaolo Bonzini * 4*d9f24bf5SPaolo Bonzini * Copyright (c) 2003 Fabrice Bellard 5*d9f24bf5SPaolo Bonzini * 6*d9f24bf5SPaolo Bonzini * This library is free software; you can redistribute it and/or 7*d9f24bf5SPaolo Bonzini * modify it under the terms of the GNU Lesser General Public 8*d9f24bf5SPaolo Bonzini * License as published by the Free Software Foundation; either 9*d9f24bf5SPaolo Bonzini * version 2 of the License, or (at your option) any later version. 10*d9f24bf5SPaolo Bonzini * 11*d9f24bf5SPaolo Bonzini * This library is distributed in the hope that it will be useful, 12*d9f24bf5SPaolo Bonzini * but WITHOUT ANY WARRANTY; without even the implied warranty of 13*d9f24bf5SPaolo Bonzini * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14*d9f24bf5SPaolo Bonzini * Lesser General Public License for more details. 15*d9f24bf5SPaolo Bonzini * 16*d9f24bf5SPaolo Bonzini * You should have received a copy of the GNU Lesser General Public 17*d9f24bf5SPaolo Bonzini * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18*d9f24bf5SPaolo Bonzini */ 19*d9f24bf5SPaolo Bonzini 20*d9f24bf5SPaolo Bonzini #include "qemu/osdep.h" 21*d9f24bf5SPaolo Bonzini #include "qemu-common.h" 22*d9f24bf5SPaolo Bonzini #include "qapi/error.h" 23*d9f24bf5SPaolo Bonzini 24*d9f24bf5SPaolo Bonzini #include "exec/target_page.h" 25*d9f24bf5SPaolo Bonzini #include "hw/qdev-core.h" 26*d9f24bf5SPaolo Bonzini #include "hw/qdev-properties.h" 27*d9f24bf5SPaolo Bonzini #include "qemu/error-report.h" 28*d9f24bf5SPaolo Bonzini #include "migration/vmstate.h" 29*d9f24bf5SPaolo Bonzini #ifdef CONFIG_USER_ONLY 30*d9f24bf5SPaolo Bonzini #include "qemu.h" 31*d9f24bf5SPaolo Bonzini #else 32*d9f24bf5SPaolo Bonzini #include "exec/address-spaces.h" 33*d9f24bf5SPaolo Bonzini #endif 34*d9f24bf5SPaolo Bonzini #include "sysemu/tcg.h" 35*d9f24bf5SPaolo Bonzini #include "sysemu/kvm.h" 36*d9f24bf5SPaolo Bonzini #include "sysemu/replay.h" 37*d9f24bf5SPaolo Bonzini #include "translate-all.h" 38*d9f24bf5SPaolo Bonzini #include "exec/log.h" 39*d9f24bf5SPaolo Bonzini 40*d9f24bf5SPaolo Bonzini uintptr_t qemu_host_page_size; 41*d9f24bf5SPaolo Bonzini intptr_t qemu_host_page_mask; 42*d9f24bf5SPaolo Bonzini 43*d9f24bf5SPaolo Bonzini #ifndef CONFIG_USER_ONLY 44*d9f24bf5SPaolo Bonzini static int cpu_common_post_load(void *opaque, int version_id) 45*d9f24bf5SPaolo Bonzini { 46*d9f24bf5SPaolo Bonzini CPUState *cpu = opaque; 47*d9f24bf5SPaolo Bonzini 48*d9f24bf5SPaolo Bonzini /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the 49*d9f24bf5SPaolo Bonzini version_id is increased. */ 50*d9f24bf5SPaolo Bonzini cpu->interrupt_request &= ~0x01; 51*d9f24bf5SPaolo Bonzini tlb_flush(cpu); 52*d9f24bf5SPaolo Bonzini 53*d9f24bf5SPaolo Bonzini /* loadvm has just updated the content of RAM, bypassing the 54*d9f24bf5SPaolo Bonzini * usual mechanisms that ensure we flush TBs for writes to 55*d9f24bf5SPaolo Bonzini * memory we've translated code from. So we must flush all TBs, 56*d9f24bf5SPaolo Bonzini * which will now be stale. 57*d9f24bf5SPaolo Bonzini */ 58*d9f24bf5SPaolo Bonzini tb_flush(cpu); 59*d9f24bf5SPaolo Bonzini 60*d9f24bf5SPaolo Bonzini return 0; 61*d9f24bf5SPaolo Bonzini } 62*d9f24bf5SPaolo Bonzini 63*d9f24bf5SPaolo Bonzini static int cpu_common_pre_load(void *opaque) 64*d9f24bf5SPaolo Bonzini { 65*d9f24bf5SPaolo Bonzini CPUState *cpu = opaque; 66*d9f24bf5SPaolo Bonzini 67*d9f24bf5SPaolo Bonzini cpu->exception_index = -1; 68*d9f24bf5SPaolo Bonzini 69*d9f24bf5SPaolo Bonzini return 0; 70*d9f24bf5SPaolo Bonzini } 71*d9f24bf5SPaolo Bonzini 72*d9f24bf5SPaolo Bonzini static bool cpu_common_exception_index_needed(void *opaque) 73*d9f24bf5SPaolo Bonzini { 74*d9f24bf5SPaolo Bonzini CPUState *cpu = opaque; 75*d9f24bf5SPaolo Bonzini 76*d9f24bf5SPaolo Bonzini return tcg_enabled() && cpu->exception_index != -1; 77*d9f24bf5SPaolo Bonzini } 78*d9f24bf5SPaolo Bonzini 79*d9f24bf5SPaolo Bonzini static const VMStateDescription vmstate_cpu_common_exception_index = { 80*d9f24bf5SPaolo Bonzini .name = "cpu_common/exception_index", 81*d9f24bf5SPaolo Bonzini .version_id = 1, 82*d9f24bf5SPaolo Bonzini .minimum_version_id = 1, 83*d9f24bf5SPaolo Bonzini .needed = cpu_common_exception_index_needed, 84*d9f24bf5SPaolo Bonzini .fields = (VMStateField[]) { 85*d9f24bf5SPaolo Bonzini VMSTATE_INT32(exception_index, CPUState), 86*d9f24bf5SPaolo Bonzini VMSTATE_END_OF_LIST() 87*d9f24bf5SPaolo Bonzini } 88*d9f24bf5SPaolo Bonzini }; 89*d9f24bf5SPaolo Bonzini 90*d9f24bf5SPaolo Bonzini static bool cpu_common_crash_occurred_needed(void *opaque) 91*d9f24bf5SPaolo Bonzini { 92*d9f24bf5SPaolo Bonzini CPUState *cpu = opaque; 93*d9f24bf5SPaolo Bonzini 94*d9f24bf5SPaolo Bonzini return cpu->crash_occurred; 95*d9f24bf5SPaolo Bonzini } 96*d9f24bf5SPaolo Bonzini 97*d9f24bf5SPaolo Bonzini static const VMStateDescription vmstate_cpu_common_crash_occurred = { 98*d9f24bf5SPaolo Bonzini .name = "cpu_common/crash_occurred", 99*d9f24bf5SPaolo Bonzini .version_id = 1, 100*d9f24bf5SPaolo Bonzini .minimum_version_id = 1, 101*d9f24bf5SPaolo Bonzini .needed = cpu_common_crash_occurred_needed, 102*d9f24bf5SPaolo Bonzini .fields = (VMStateField[]) { 103*d9f24bf5SPaolo Bonzini VMSTATE_BOOL(crash_occurred, CPUState), 104*d9f24bf5SPaolo Bonzini VMSTATE_END_OF_LIST() 105*d9f24bf5SPaolo Bonzini } 106*d9f24bf5SPaolo Bonzini }; 107*d9f24bf5SPaolo Bonzini 108*d9f24bf5SPaolo Bonzini const VMStateDescription vmstate_cpu_common = { 109*d9f24bf5SPaolo Bonzini .name = "cpu_common", 110*d9f24bf5SPaolo Bonzini .version_id = 1, 111*d9f24bf5SPaolo Bonzini .minimum_version_id = 1, 112*d9f24bf5SPaolo Bonzini .pre_load = cpu_common_pre_load, 113*d9f24bf5SPaolo Bonzini .post_load = cpu_common_post_load, 114*d9f24bf5SPaolo Bonzini .fields = (VMStateField[]) { 115*d9f24bf5SPaolo Bonzini VMSTATE_UINT32(halted, CPUState), 116*d9f24bf5SPaolo Bonzini VMSTATE_UINT32(interrupt_request, CPUState), 117*d9f24bf5SPaolo Bonzini VMSTATE_END_OF_LIST() 118*d9f24bf5SPaolo Bonzini }, 119*d9f24bf5SPaolo Bonzini .subsections = (const VMStateDescription*[]) { 120*d9f24bf5SPaolo Bonzini &vmstate_cpu_common_exception_index, 121*d9f24bf5SPaolo Bonzini &vmstate_cpu_common_crash_occurred, 122*d9f24bf5SPaolo Bonzini NULL 123*d9f24bf5SPaolo Bonzini } 124*d9f24bf5SPaolo Bonzini }; 125*d9f24bf5SPaolo Bonzini #endif 126*d9f24bf5SPaolo Bonzini 127*d9f24bf5SPaolo Bonzini void cpu_exec_unrealizefn(CPUState *cpu) 128*d9f24bf5SPaolo Bonzini { 129*d9f24bf5SPaolo Bonzini CPUClass *cc = CPU_GET_CLASS(cpu); 130*d9f24bf5SPaolo Bonzini 131*d9f24bf5SPaolo Bonzini tlb_destroy(cpu); 132*d9f24bf5SPaolo Bonzini cpu_list_remove(cpu); 133*d9f24bf5SPaolo Bonzini 134*d9f24bf5SPaolo Bonzini #ifdef CONFIG_USER_ONLY 135*d9f24bf5SPaolo Bonzini assert(cc->vmsd == NULL); 136*d9f24bf5SPaolo Bonzini #else 137*d9f24bf5SPaolo Bonzini if (cc->vmsd != NULL) { 138*d9f24bf5SPaolo Bonzini vmstate_unregister(NULL, cc->vmsd, cpu); 139*d9f24bf5SPaolo Bonzini } 140*d9f24bf5SPaolo Bonzini if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { 141*d9f24bf5SPaolo Bonzini vmstate_unregister(NULL, &vmstate_cpu_common, cpu); 142*d9f24bf5SPaolo Bonzini } 143*d9f24bf5SPaolo Bonzini tcg_iommu_free_notifier_list(cpu); 144*d9f24bf5SPaolo Bonzini #endif 145*d9f24bf5SPaolo Bonzini } 146*d9f24bf5SPaolo Bonzini 147*d9f24bf5SPaolo Bonzini Property cpu_common_props[] = { 148*d9f24bf5SPaolo Bonzini #ifndef CONFIG_USER_ONLY 149*d9f24bf5SPaolo Bonzini /* Create a memory property for softmmu CPU object, 150*d9f24bf5SPaolo Bonzini * so users can wire up its memory. (This can't go in hw/core/cpu.c 151*d9f24bf5SPaolo Bonzini * because that file is compiled only once for both user-mode 152*d9f24bf5SPaolo Bonzini * and system builds.) The default if no link is set up is to use 153*d9f24bf5SPaolo Bonzini * the system address space. 154*d9f24bf5SPaolo Bonzini */ 155*d9f24bf5SPaolo Bonzini DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, 156*d9f24bf5SPaolo Bonzini MemoryRegion *), 157*d9f24bf5SPaolo Bonzini #endif 158*d9f24bf5SPaolo Bonzini DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false), 159*d9f24bf5SPaolo Bonzini DEFINE_PROP_END_OF_LIST(), 160*d9f24bf5SPaolo Bonzini }; 161*d9f24bf5SPaolo Bonzini 162*d9f24bf5SPaolo Bonzini void cpu_exec_initfn(CPUState *cpu) 163*d9f24bf5SPaolo Bonzini { 164*d9f24bf5SPaolo Bonzini cpu->as = NULL; 165*d9f24bf5SPaolo Bonzini cpu->num_ases = 0; 166*d9f24bf5SPaolo Bonzini 167*d9f24bf5SPaolo Bonzini #ifndef CONFIG_USER_ONLY 168*d9f24bf5SPaolo Bonzini cpu->thread_id = qemu_get_thread_id(); 169*d9f24bf5SPaolo Bonzini cpu->memory = get_system_memory(); 170*d9f24bf5SPaolo Bonzini object_ref(OBJECT(cpu->memory)); 171*d9f24bf5SPaolo Bonzini #endif 172*d9f24bf5SPaolo Bonzini } 173*d9f24bf5SPaolo Bonzini 174*d9f24bf5SPaolo Bonzini void cpu_exec_realizefn(CPUState *cpu, Error **errp) 175*d9f24bf5SPaolo Bonzini { 176*d9f24bf5SPaolo Bonzini CPUClass *cc = CPU_GET_CLASS(cpu); 177*d9f24bf5SPaolo Bonzini static bool tcg_target_initialized; 178*d9f24bf5SPaolo Bonzini 179*d9f24bf5SPaolo Bonzini cpu_list_add(cpu); 180*d9f24bf5SPaolo Bonzini 181*d9f24bf5SPaolo Bonzini if (tcg_enabled() && !tcg_target_initialized) { 182*d9f24bf5SPaolo Bonzini tcg_target_initialized = true; 183*d9f24bf5SPaolo Bonzini cc->tcg_initialize(); 184*d9f24bf5SPaolo Bonzini } 185*d9f24bf5SPaolo Bonzini tlb_init(cpu); 186*d9f24bf5SPaolo Bonzini 187*d9f24bf5SPaolo Bonzini qemu_plugin_vcpu_init_hook(cpu); 188*d9f24bf5SPaolo Bonzini 189*d9f24bf5SPaolo Bonzini #ifdef CONFIG_USER_ONLY 190*d9f24bf5SPaolo Bonzini assert(cc->vmsd == NULL); 191*d9f24bf5SPaolo Bonzini #else /* !CONFIG_USER_ONLY */ 192*d9f24bf5SPaolo Bonzini if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { 193*d9f24bf5SPaolo Bonzini vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); 194*d9f24bf5SPaolo Bonzini } 195*d9f24bf5SPaolo Bonzini if (cc->vmsd != NULL) { 196*d9f24bf5SPaolo Bonzini vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu); 197*d9f24bf5SPaolo Bonzini } 198*d9f24bf5SPaolo Bonzini 199*d9f24bf5SPaolo Bonzini tcg_iommu_init_notifier_list(cpu); 200*d9f24bf5SPaolo Bonzini #endif 201*d9f24bf5SPaolo Bonzini } 202*d9f24bf5SPaolo Bonzini 203*d9f24bf5SPaolo Bonzini const char *parse_cpu_option(const char *cpu_option) 204*d9f24bf5SPaolo Bonzini { 205*d9f24bf5SPaolo Bonzini ObjectClass *oc; 206*d9f24bf5SPaolo Bonzini CPUClass *cc; 207*d9f24bf5SPaolo Bonzini gchar **model_pieces; 208*d9f24bf5SPaolo Bonzini const char *cpu_type; 209*d9f24bf5SPaolo Bonzini 210*d9f24bf5SPaolo Bonzini model_pieces = g_strsplit(cpu_option, ",", 2); 211*d9f24bf5SPaolo Bonzini if (!model_pieces[0]) { 212*d9f24bf5SPaolo Bonzini error_report("-cpu option cannot be empty"); 213*d9f24bf5SPaolo Bonzini exit(1); 214*d9f24bf5SPaolo Bonzini } 215*d9f24bf5SPaolo Bonzini 216*d9f24bf5SPaolo Bonzini oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]); 217*d9f24bf5SPaolo Bonzini if (oc == NULL) { 218*d9f24bf5SPaolo Bonzini error_report("unable to find CPU model '%s'", model_pieces[0]); 219*d9f24bf5SPaolo Bonzini g_strfreev(model_pieces); 220*d9f24bf5SPaolo Bonzini exit(EXIT_FAILURE); 221*d9f24bf5SPaolo Bonzini } 222*d9f24bf5SPaolo Bonzini 223*d9f24bf5SPaolo Bonzini cpu_type = object_class_get_name(oc); 224*d9f24bf5SPaolo Bonzini cc = CPU_CLASS(oc); 225*d9f24bf5SPaolo Bonzini cc->parse_features(cpu_type, model_pieces[1], &error_fatal); 226*d9f24bf5SPaolo Bonzini g_strfreev(model_pieces); 227*d9f24bf5SPaolo Bonzini return cpu_type; 228*d9f24bf5SPaolo Bonzini } 229*d9f24bf5SPaolo Bonzini 230*d9f24bf5SPaolo Bonzini #if defined(CONFIG_USER_ONLY) 231*d9f24bf5SPaolo Bonzini void tb_invalidate_phys_addr(target_ulong addr) 232*d9f24bf5SPaolo Bonzini { 233*d9f24bf5SPaolo Bonzini mmap_lock(); 234*d9f24bf5SPaolo Bonzini tb_invalidate_phys_page_range(addr, addr + 1); 235*d9f24bf5SPaolo Bonzini mmap_unlock(); 236*d9f24bf5SPaolo Bonzini } 237*d9f24bf5SPaolo Bonzini 238*d9f24bf5SPaolo Bonzini static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) 239*d9f24bf5SPaolo Bonzini { 240*d9f24bf5SPaolo Bonzini tb_invalidate_phys_addr(pc); 241*d9f24bf5SPaolo Bonzini } 242*d9f24bf5SPaolo Bonzini #else 243*d9f24bf5SPaolo Bonzini void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) 244*d9f24bf5SPaolo Bonzini { 245*d9f24bf5SPaolo Bonzini ram_addr_t ram_addr; 246*d9f24bf5SPaolo Bonzini MemoryRegion *mr; 247*d9f24bf5SPaolo Bonzini hwaddr l = 1; 248*d9f24bf5SPaolo Bonzini 249*d9f24bf5SPaolo Bonzini if (!tcg_enabled()) { 250*d9f24bf5SPaolo Bonzini return; 251*d9f24bf5SPaolo Bonzini } 252*d9f24bf5SPaolo Bonzini 253*d9f24bf5SPaolo Bonzini RCU_READ_LOCK_GUARD(); 254*d9f24bf5SPaolo Bonzini mr = address_space_translate(as, addr, &addr, &l, false, attrs); 255*d9f24bf5SPaolo Bonzini if (!(memory_region_is_ram(mr) 256*d9f24bf5SPaolo Bonzini || memory_region_is_romd(mr))) { 257*d9f24bf5SPaolo Bonzini return; 258*d9f24bf5SPaolo Bonzini } 259*d9f24bf5SPaolo Bonzini ram_addr = memory_region_get_ram_addr(mr) + addr; 260*d9f24bf5SPaolo Bonzini tb_invalidate_phys_page_range(ram_addr, ram_addr + 1); 261*d9f24bf5SPaolo Bonzini } 262*d9f24bf5SPaolo Bonzini 263*d9f24bf5SPaolo Bonzini static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) 264*d9f24bf5SPaolo Bonzini { 265*d9f24bf5SPaolo Bonzini /* 266*d9f24bf5SPaolo Bonzini * There may not be a virtual to physical translation for the pc 267*d9f24bf5SPaolo Bonzini * right now, but there may exist cached TB for this pc. 268*d9f24bf5SPaolo Bonzini * Flush the whole TB cache to force re-translation of such TBs. 269*d9f24bf5SPaolo Bonzini * This is heavyweight, but we're debugging anyway. 270*d9f24bf5SPaolo Bonzini */ 271*d9f24bf5SPaolo Bonzini tb_flush(cpu); 272*d9f24bf5SPaolo Bonzini } 273*d9f24bf5SPaolo Bonzini #endif 274*d9f24bf5SPaolo Bonzini 275*d9f24bf5SPaolo Bonzini /* Add a breakpoint. */ 276*d9f24bf5SPaolo Bonzini int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, 277*d9f24bf5SPaolo Bonzini CPUBreakpoint **breakpoint) 278*d9f24bf5SPaolo Bonzini { 279*d9f24bf5SPaolo Bonzini CPUBreakpoint *bp; 280*d9f24bf5SPaolo Bonzini 281*d9f24bf5SPaolo Bonzini bp = g_malloc(sizeof(*bp)); 282*d9f24bf5SPaolo Bonzini 283*d9f24bf5SPaolo Bonzini bp->pc = pc; 284*d9f24bf5SPaolo Bonzini bp->flags = flags; 285*d9f24bf5SPaolo Bonzini 286*d9f24bf5SPaolo Bonzini /* keep all GDB-injected breakpoints in front */ 287*d9f24bf5SPaolo Bonzini if (flags & BP_GDB) { 288*d9f24bf5SPaolo Bonzini QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); 289*d9f24bf5SPaolo Bonzini } else { 290*d9f24bf5SPaolo Bonzini QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); 291*d9f24bf5SPaolo Bonzini } 292*d9f24bf5SPaolo Bonzini 293*d9f24bf5SPaolo Bonzini breakpoint_invalidate(cpu, pc); 294*d9f24bf5SPaolo Bonzini 295*d9f24bf5SPaolo Bonzini if (breakpoint) { 296*d9f24bf5SPaolo Bonzini *breakpoint = bp; 297*d9f24bf5SPaolo Bonzini } 298*d9f24bf5SPaolo Bonzini return 0; 299*d9f24bf5SPaolo Bonzini } 300*d9f24bf5SPaolo Bonzini 301*d9f24bf5SPaolo Bonzini /* Remove a specific breakpoint. */ 302*d9f24bf5SPaolo Bonzini int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) 303*d9f24bf5SPaolo Bonzini { 304*d9f24bf5SPaolo Bonzini CPUBreakpoint *bp; 305*d9f24bf5SPaolo Bonzini 306*d9f24bf5SPaolo Bonzini QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 307*d9f24bf5SPaolo Bonzini if (bp->pc == pc && bp->flags == flags) { 308*d9f24bf5SPaolo Bonzini cpu_breakpoint_remove_by_ref(cpu, bp); 309*d9f24bf5SPaolo Bonzini return 0; 310*d9f24bf5SPaolo Bonzini } 311*d9f24bf5SPaolo Bonzini } 312*d9f24bf5SPaolo Bonzini return -ENOENT; 313*d9f24bf5SPaolo Bonzini } 314*d9f24bf5SPaolo Bonzini 315*d9f24bf5SPaolo Bonzini /* Remove a specific breakpoint by reference. */ 316*d9f24bf5SPaolo Bonzini void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) 317*d9f24bf5SPaolo Bonzini { 318*d9f24bf5SPaolo Bonzini QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); 319*d9f24bf5SPaolo Bonzini 320*d9f24bf5SPaolo Bonzini breakpoint_invalidate(cpu, breakpoint->pc); 321*d9f24bf5SPaolo Bonzini 322*d9f24bf5SPaolo Bonzini g_free(breakpoint); 323*d9f24bf5SPaolo Bonzini } 324*d9f24bf5SPaolo Bonzini 325*d9f24bf5SPaolo Bonzini /* Remove all matching breakpoints. */ 326*d9f24bf5SPaolo Bonzini void cpu_breakpoint_remove_all(CPUState *cpu, int mask) 327*d9f24bf5SPaolo Bonzini { 328*d9f24bf5SPaolo Bonzini CPUBreakpoint *bp, *next; 329*d9f24bf5SPaolo Bonzini 330*d9f24bf5SPaolo Bonzini QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { 331*d9f24bf5SPaolo Bonzini if (bp->flags & mask) { 332*d9f24bf5SPaolo Bonzini cpu_breakpoint_remove_by_ref(cpu, bp); 333*d9f24bf5SPaolo Bonzini } 334*d9f24bf5SPaolo Bonzini } 335*d9f24bf5SPaolo Bonzini } 336*d9f24bf5SPaolo Bonzini 337*d9f24bf5SPaolo Bonzini /* enable or disable single step mode. EXCP_DEBUG is returned by the 338*d9f24bf5SPaolo Bonzini CPU loop after each instruction */ 339*d9f24bf5SPaolo Bonzini void cpu_single_step(CPUState *cpu, int enabled) 340*d9f24bf5SPaolo Bonzini { 341*d9f24bf5SPaolo Bonzini if (cpu->singlestep_enabled != enabled) { 342*d9f24bf5SPaolo Bonzini cpu->singlestep_enabled = enabled; 343*d9f24bf5SPaolo Bonzini if (kvm_enabled()) { 344*d9f24bf5SPaolo Bonzini kvm_update_guest_debug(cpu, 0); 345*d9f24bf5SPaolo Bonzini } else { 346*d9f24bf5SPaolo Bonzini /* must flush all the translated code to avoid inconsistencies */ 347*d9f24bf5SPaolo Bonzini /* XXX: only flush what is necessary */ 348*d9f24bf5SPaolo Bonzini tb_flush(cpu); 349*d9f24bf5SPaolo Bonzini } 350*d9f24bf5SPaolo Bonzini } 351*d9f24bf5SPaolo Bonzini } 352*d9f24bf5SPaolo Bonzini 353*d9f24bf5SPaolo Bonzini void cpu_abort(CPUState *cpu, const char *fmt, ...) 354*d9f24bf5SPaolo Bonzini { 355*d9f24bf5SPaolo Bonzini va_list ap; 356*d9f24bf5SPaolo Bonzini va_list ap2; 357*d9f24bf5SPaolo Bonzini 358*d9f24bf5SPaolo Bonzini va_start(ap, fmt); 359*d9f24bf5SPaolo Bonzini va_copy(ap2, ap); 360*d9f24bf5SPaolo Bonzini fprintf(stderr, "qemu: fatal: "); 361*d9f24bf5SPaolo Bonzini vfprintf(stderr, fmt, ap); 362*d9f24bf5SPaolo Bonzini fprintf(stderr, "\n"); 363*d9f24bf5SPaolo Bonzini cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP); 364*d9f24bf5SPaolo Bonzini if (qemu_log_separate()) { 365*d9f24bf5SPaolo Bonzini FILE *logfile = qemu_log_lock(); 366*d9f24bf5SPaolo Bonzini qemu_log("qemu: fatal: "); 367*d9f24bf5SPaolo Bonzini qemu_log_vprintf(fmt, ap2); 368*d9f24bf5SPaolo Bonzini qemu_log("\n"); 369*d9f24bf5SPaolo Bonzini log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); 370*d9f24bf5SPaolo Bonzini qemu_log_flush(); 371*d9f24bf5SPaolo Bonzini qemu_log_unlock(logfile); 372*d9f24bf5SPaolo Bonzini qemu_log_close(); 373*d9f24bf5SPaolo Bonzini } 374*d9f24bf5SPaolo Bonzini va_end(ap2); 375*d9f24bf5SPaolo Bonzini va_end(ap); 376*d9f24bf5SPaolo Bonzini replay_finish(); 377*d9f24bf5SPaolo Bonzini #if defined(CONFIG_USER_ONLY) 378*d9f24bf5SPaolo Bonzini { 379*d9f24bf5SPaolo Bonzini struct sigaction act; 380*d9f24bf5SPaolo Bonzini sigfillset(&act.sa_mask); 381*d9f24bf5SPaolo Bonzini act.sa_handler = SIG_DFL; 382*d9f24bf5SPaolo Bonzini act.sa_flags = 0; 383*d9f24bf5SPaolo Bonzini sigaction(SIGABRT, &act, NULL); 384*d9f24bf5SPaolo Bonzini } 385*d9f24bf5SPaolo Bonzini #endif 386*d9f24bf5SPaolo Bonzini abort(); 387*d9f24bf5SPaolo Bonzini } 388*d9f24bf5SPaolo Bonzini 389*d9f24bf5SPaolo Bonzini /* physical memory access (slow version, mainly for debug) */ 390*d9f24bf5SPaolo Bonzini #if defined(CONFIG_USER_ONLY) 391*d9f24bf5SPaolo Bonzini int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, 392*d9f24bf5SPaolo Bonzini void *ptr, target_ulong len, bool is_write) 393*d9f24bf5SPaolo Bonzini { 394*d9f24bf5SPaolo Bonzini int flags; 395*d9f24bf5SPaolo Bonzini target_ulong l, page; 396*d9f24bf5SPaolo Bonzini void * p; 397*d9f24bf5SPaolo Bonzini uint8_t *buf = ptr; 398*d9f24bf5SPaolo Bonzini 399*d9f24bf5SPaolo Bonzini while (len > 0) { 400*d9f24bf5SPaolo Bonzini page = addr & TARGET_PAGE_MASK; 401*d9f24bf5SPaolo Bonzini l = (page + TARGET_PAGE_SIZE) - addr; 402*d9f24bf5SPaolo Bonzini if (l > len) 403*d9f24bf5SPaolo Bonzini l = len; 404*d9f24bf5SPaolo Bonzini flags = page_get_flags(page); 405*d9f24bf5SPaolo Bonzini if (!(flags & PAGE_VALID)) 406*d9f24bf5SPaolo Bonzini return -1; 407*d9f24bf5SPaolo Bonzini if (is_write) { 408*d9f24bf5SPaolo Bonzini if (!(flags & PAGE_WRITE)) 409*d9f24bf5SPaolo Bonzini return -1; 410*d9f24bf5SPaolo Bonzini /* XXX: this code should not depend on lock_user */ 411*d9f24bf5SPaolo Bonzini if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) 412*d9f24bf5SPaolo Bonzini return -1; 413*d9f24bf5SPaolo Bonzini memcpy(p, buf, l); 414*d9f24bf5SPaolo Bonzini unlock_user(p, addr, l); 415*d9f24bf5SPaolo Bonzini } else { 416*d9f24bf5SPaolo Bonzini if (!(flags & PAGE_READ)) 417*d9f24bf5SPaolo Bonzini return -1; 418*d9f24bf5SPaolo Bonzini /* XXX: this code should not depend on lock_user */ 419*d9f24bf5SPaolo Bonzini if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 420*d9f24bf5SPaolo Bonzini return -1; 421*d9f24bf5SPaolo Bonzini memcpy(buf, p, l); 422*d9f24bf5SPaolo Bonzini unlock_user(p, addr, 0); 423*d9f24bf5SPaolo Bonzini } 424*d9f24bf5SPaolo Bonzini len -= l; 425*d9f24bf5SPaolo Bonzini buf += l; 426*d9f24bf5SPaolo Bonzini addr += l; 427*d9f24bf5SPaolo Bonzini } 428*d9f24bf5SPaolo Bonzini return 0; 429*d9f24bf5SPaolo Bonzini } 430*d9f24bf5SPaolo Bonzini #endif 431*d9f24bf5SPaolo Bonzini 432*d9f24bf5SPaolo Bonzini bool target_words_bigendian(void) 433*d9f24bf5SPaolo Bonzini { 434*d9f24bf5SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN) 435*d9f24bf5SPaolo Bonzini return true; 436*d9f24bf5SPaolo Bonzini #else 437*d9f24bf5SPaolo Bonzini return false; 438*d9f24bf5SPaolo Bonzini #endif 439*d9f24bf5SPaolo Bonzini } 440*d9f24bf5SPaolo Bonzini 441*d9f24bf5SPaolo Bonzini void page_size_init(void) 442*d9f24bf5SPaolo Bonzini { 443*d9f24bf5SPaolo Bonzini /* NOTE: we can always suppose that qemu_host_page_size >= 444*d9f24bf5SPaolo Bonzini TARGET_PAGE_SIZE */ 445*d9f24bf5SPaolo Bonzini if (qemu_host_page_size == 0) { 446*d9f24bf5SPaolo Bonzini qemu_host_page_size = qemu_real_host_page_size; 447*d9f24bf5SPaolo Bonzini } 448*d9f24bf5SPaolo Bonzini if (qemu_host_page_size < TARGET_PAGE_SIZE) { 449*d9f24bf5SPaolo Bonzini qemu_host_page_size = TARGET_PAGE_SIZE; 450*d9f24bf5SPaolo Bonzini } 451*d9f24bf5SPaolo Bonzini qemu_host_page_mask = -(intptr_t)qemu_host_page_size; 452*d9f24bf5SPaolo Bonzini } 453