1 /* 2 * QEMU CPU model 3 * 4 * Copyright (c) 2012 SUSE LINUX Products GmbH 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 */ 20 #ifndef QEMU_CPU_H 21 #define QEMU_CPU_H 22 23 #include "hw/qdev-core.h" 24 #include "disas/dis-asm.h" 25 #include "exec/breakpoint.h" 26 #include "exec/hwaddr.h" 27 #include "exec/vaddr.h" 28 #include "exec/memattrs.h" 29 #include "exec/mmu-access-type.h" 30 #include "exec/tlb-common.h" 31 #include "qapi/qapi-types-machine.h" 32 #include "qapi/qapi-types-run-state.h" 33 #include "qemu/bitmap.h" 34 #include "qemu/rcu_queue.h" 35 #include "qemu/queue.h" 36 #include "qemu/lockcnt.h" 37 #include "qemu/thread.h" 38 #include "qom/object.h" 39 40 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, 41 void *opaque); 42 43 /** 44 * SECTION:cpu 45 * @section_id: QEMU-cpu 46 * @title: CPU Class 47 * @short_description: Base class for all CPUs 48 */ 49 50 #define TYPE_CPU "cpu" 51 52 /* Since this macro is used a lot in hot code paths and in conjunction with 53 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using 54 * an unchecked cast. 55 */ 56 #define CPU(obj) ((CPUState *)(obj)) 57 58 /* 59 * The class checkers bring in CPU_GET_CLASS() which is potentially 60 * expensive given the eventual call to 61 * object_class_dynamic_cast_assert(). Because of this the CPUState 62 * has a cached value for the class in cs->cc which is set up in 63 * cpu_exec_realizefn() for use in hot code paths. 64 */ 65 typedef struct CPUClass CPUClass; 66 DECLARE_CLASS_CHECKERS(CPUClass, CPU, 67 TYPE_CPU) 68 69 /** 70 * OBJECT_DECLARE_CPU_TYPE: 71 * @CpuInstanceType: instance struct name 72 * @CpuClassType: class struct name 73 * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators 74 * 75 * This macro is typically used in "cpu-qom.h" header file, and will: 76 * 77 * - create the typedefs for the CPU object and class structs 78 * - register the type for use with g_autoptr 79 * - provide three standard type cast functions 80 * 81 * The object struct and class struct need to be declared manually. 82 */ 83 #define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \ 84 typedef struct ArchCPU CpuInstanceType; \ 85 OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME); 86 87 typedef struct CPUWatchpoint CPUWatchpoint; 88 89 /* see physmem.c */ 90 struct CPUAddressSpace; 91 92 /* see accel/tcg/tb-jmp-cache.h */ 93 struct CPUJumpCache; 94 95 /* see accel-cpu.h */ 96 struct AccelCPUClass; 97 98 /* see sysemu-cpu-ops.h */ 99 struct SysemuCPUOps; 100 101 /** 102 * CPUClass: 103 * @class_by_name: Callback to map -cpu command line model name to an 104 * instantiatable CPU type. 105 * @parse_features: Callback to parse command line arguments. 106 * @reset_dump_flags: #CPUDumpFlags to use for reset logging. 107 * @has_work: Callback for checking if there is work to do. 108 * @mmu_index: Callback for choosing softmmu mmu index; 109 * may be used internally by memory_rw_debug without TCG. 110 * @memory_rw_debug: Callback for GDB memory access. 111 * @dump_state: Callback for dumping state. 112 * @query_cpu_fast: 113 * Fill in target specific information for the "query-cpus-fast" 114 * QAPI call. 115 * @get_arch_id: Callback for getting architecture-dependent CPU ID. 116 * @set_pc: Callback for setting the Program Counter register. This 117 * should have the semantics used by the target architecture when 118 * setting the PC from a source such as an ELF file entry point; 119 * for example on Arm it will also set the Thumb mode bit based 120 * on the least significant bit of the new PC value. 121 * If the target behaviour here is anything other than "set 122 * the PC register to the value passed in" then the target must 123 * also implement the synchronize_from_tb hook. 124 * @get_pc: Callback for getting the Program Counter register. 125 * As above, with the semantics of the target architecture. 126 * @gdb_read_register: Callback for letting GDB read a register. 127 * @gdb_write_register: Callback for letting GDB write a register. 128 * @gdb_adjust_breakpoint: Callback for adjusting the address of a 129 * breakpoint. Used by AVR to handle a gdb mis-feature with 130 * its Harvard architecture split code and data. 131 * @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer 132 * from @gdb_core_xml_file. 133 * @gdb_core_xml_file: File name for core registers GDB XML description. 134 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop 135 * before the insn which triggers a watchpoint rather than after it. 136 * @gdb_arch_name: Optional callback that returns the architecture name known 137 * to GDB. The caller must free the returned string with g_free. 138 * @disas_set_info: Setup architecture specific components of disassembly info 139 * @adjust_watchpoint_address: Perform a target-specific adjustment to an 140 * address before attempting to match it against watchpoints. 141 * @deprecation_note: If this CPUClass is deprecated, this field provides 142 * related information. 143 * 144 * Represents a CPU family or model. 145 */ 146 struct CPUClass { 147 /*< private >*/ 148 DeviceClass parent_class; 149 /*< public >*/ 150 151 ObjectClass *(*class_by_name)(const char *cpu_model); 152 void (*parse_features)(const char *typename, char *str, Error **errp); 153 154 bool (*has_work)(CPUState *cpu); 155 int (*mmu_index)(CPUState *cpu, bool ifetch); 156 int (*memory_rw_debug)(CPUState *cpu, vaddr addr, 157 uint8_t *buf, int len, bool is_write); 158 void (*dump_state)(CPUState *cpu, FILE *, int flags); 159 void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value); 160 int64_t (*get_arch_id)(CPUState *cpu); 161 void (*set_pc)(CPUState *cpu, vaddr value); 162 vaddr (*get_pc)(CPUState *cpu); 163 int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg); 164 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); 165 vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr); 166 167 const char *gdb_core_xml_file; 168 const gchar * (*gdb_arch_name)(CPUState *cpu); 169 170 void (*disas_set_info)(CPUState *cpu, disassemble_info *info); 171 172 const char *deprecation_note; 173 struct AccelCPUClass *accel_cpu; 174 175 /* when system emulation is not available, this pointer is NULL */ 176 const struct SysemuCPUOps *sysemu_ops; 177 178 /* when TCG is not available, this pointer is NULL */ 179 const TCGCPUOps *tcg_ops; 180 181 /* 182 * if not NULL, this is called in order for the CPUClass to initialize 183 * class data that depends on the accelerator, see accel/accel-common.c. 184 */ 185 void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc); 186 187 /* 188 * Keep non-pointer data at the end to minimize holes. 189 */ 190 int reset_dump_flags; 191 int gdb_num_core_regs; 192 bool gdb_stop_before_watchpoint; 193 }; 194 195 /* 196 * Fix the number of mmu modes to 16, which is also the maximum 197 * supported by the softmmu tlb api. 198 */ 199 #define NB_MMU_MODES 16 200 201 /* Use a fully associative victim tlb of 8 entries. */ 202 #define CPU_VTLB_SIZE 8 203 204 /* 205 * The full TLB entry, which is not accessed by generated TCG code, 206 * so the layout is not as critical as that of CPUTLBEntry. This is 207 * also why we don't want to combine the two structs. 208 */ 209 struct CPUTLBEntryFull { 210 /* 211 * @xlat_section contains: 212 * - in the lower TARGET_PAGE_BITS, a physical section number 213 * - with the lower TARGET_PAGE_BITS masked off, an offset which 214 * must be added to the virtual address to obtain: 215 * + the ram_addr_t of the target RAM (if the physical section 216 * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) 217 * + the offset within the target MemoryRegion (otherwise) 218 */ 219 hwaddr xlat_section; 220 221 /* 222 * @phys_addr contains the physical address in the address space 223 * given by cpu_asidx_from_attrs(cpu, @attrs). 224 */ 225 hwaddr phys_addr; 226 227 /* @attrs contains the memory transaction attributes for the page. */ 228 MemTxAttrs attrs; 229 230 /* @prot contains the complete protections for the page. */ 231 uint8_t prot; 232 233 /* @lg_page_size contains the log2 of the page size. */ 234 uint8_t lg_page_size; 235 236 /* Additional tlb flags requested by tlb_fill. */ 237 uint8_t tlb_fill_flags; 238 239 /* 240 * Additional tlb flags for use by the slow path. If non-zero, 241 * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW. 242 */ 243 uint8_t slow_flags[MMU_ACCESS_COUNT]; 244 245 /* 246 * Allow target-specific additions to this structure. 247 * This may be used to cache items from the guest cpu 248 * page tables for later use by the implementation. 249 */ 250 union { 251 /* 252 * Cache the attrs and shareability fields from the page table entry. 253 * 254 * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2]. 255 * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format. 256 * For shareability and guarded, as in the SH and GP fields respectively 257 * of the VMSAv8-64 PTEs. 258 */ 259 struct { 260 uint8_t pte_attrs; 261 uint8_t shareability; 262 bool guarded; 263 } arm; 264 } extra; 265 }; 266 267 /* 268 * Data elements that are per MMU mode, minus the bits accessed by 269 * the TCG fast path. 270 */ 271 typedef struct CPUTLBDesc { 272 /* 273 * Describe a region covering all of the large pages allocated 274 * into the tlb. When any page within this region is flushed, 275 * we must flush the entire tlb. The region is matched if 276 * (addr & large_page_mask) == large_page_addr. 277 */ 278 vaddr large_page_addr; 279 vaddr large_page_mask; 280 /* host time (in ns) at the beginning of the time window */ 281 int64_t window_begin_ns; 282 /* maximum number of entries observed in the window */ 283 size_t window_max_entries; 284 size_t n_used_entries; 285 /* The next index to use in the tlb victim table. */ 286 size_t vindex; 287 /* The tlb victim table, in two parts. */ 288 CPUTLBEntry vtable[CPU_VTLB_SIZE]; 289 CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; 290 CPUTLBEntryFull *fulltlb; 291 } CPUTLBDesc; 292 293 /* 294 * Data elements that are shared between all MMU modes. 295 */ 296 typedef struct CPUTLBCommon { 297 /* Serialize updates to f.table and d.vtable, and others as noted. */ 298 QemuSpin lock; 299 /* 300 * Within dirty, for each bit N, modifications have been made to 301 * mmu_idx N since the last time that mmu_idx was flushed. 302 * Protected by tlb_c.lock. 303 */ 304 uint16_t dirty; 305 /* 306 * Statistics. These are not lock protected, but are read and 307 * written atomically. This allows the monitor to print a snapshot 308 * of the stats without interfering with the cpu. 309 */ 310 size_t full_flush_count; 311 size_t part_flush_count; 312 size_t elide_flush_count; 313 } CPUTLBCommon; 314 315 /* 316 * The entire softmmu tlb, for all MMU modes. 317 * The meaning of each of the MMU modes is defined in the target code. 318 * Since this is placed within CPUNegativeOffsetState, the smallest 319 * negative offsets are at the end of the struct. 320 */ 321 typedef struct CPUTLB { 322 #ifdef CONFIG_TCG 323 CPUTLBCommon c; 324 CPUTLBDesc d[NB_MMU_MODES]; 325 CPUTLBDescFast f[NB_MMU_MODES]; 326 #endif 327 } CPUTLB; 328 329 /* 330 * Low 16 bits: number of cycles left, used only in icount mode. 331 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs 332 * for this CPU and return to its top level loop (even in non-icount mode). 333 * This allows a single read-compare-cbranch-write sequence to test 334 * for both decrementer underflow and exceptions. 335 */ 336 typedef union IcountDecr { 337 uint32_t u32; 338 struct { 339 #if HOST_BIG_ENDIAN 340 uint16_t high; 341 uint16_t low; 342 #else 343 uint16_t low; 344 uint16_t high; 345 #endif 346 } u16; 347 } IcountDecr; 348 349 /** 350 * CPUNegativeOffsetState: Elements of CPUState most efficiently accessed 351 * from CPUArchState, via small negative offsets. 352 * @can_do_io: True if memory-mapped IO is allowed. 353 * @plugin_mem_cbs: active plugin memory callbacks 354 * @plugin_mem_value_low: 64 lower bits of latest accessed mem value. 355 * @plugin_mem_value_high: 64 higher bits of latest accessed mem value. 356 */ 357 typedef struct CPUNegativeOffsetState { 358 CPUTLB tlb; 359 #ifdef CONFIG_PLUGIN 360 /* 361 * The callback pointer are accessed via TCG (see gen_empty_mem_helper). 362 */ 363 GArray *plugin_mem_cbs; 364 uint64_t plugin_mem_value_low; 365 uint64_t plugin_mem_value_high; 366 #endif 367 IcountDecr icount_decr; 368 bool can_do_io; 369 } CPUNegativeOffsetState; 370 371 struct KVMState; 372 struct kvm_run; 373 374 /* work queue */ 375 376 /* The union type allows passing of 64 bit target pointers on 32 bit 377 * hosts in a single parameter 378 */ 379 typedef union { 380 int host_int; 381 unsigned long host_ulong; 382 void *host_ptr; 383 vaddr target_ptr; 384 } run_on_cpu_data; 385 386 #define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)}) 387 #define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)}) 388 #define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)}) 389 #define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)}) 390 #define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL) 391 392 typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data); 393 394 struct qemu_work_item; 395 396 #define CPU_UNSET_NUMA_NODE_ID -1 397 398 /** 399 * struct CPUState - common state of one CPU core or thread. 400 * 401 * @cpu_index: CPU index (informative). 402 * @cluster_index: Identifies which cluster this CPU is in. 403 * For boards which don't define clusters or for "loose" CPUs not assigned 404 * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will 405 * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER 406 * QOM parent. 407 * Under TCG this value is propagated to @tcg_cflags. 408 * See TranslationBlock::TCG CF_CLUSTER_MASK. 409 * @tcg_cflags: Pre-computed cflags for this cpu. 410 * @nr_threads: Number of threads within this CPU core. 411 * @thread: Host thread details, only live once @created is #true 412 * @sem: WIN32 only semaphore used only for qtest 413 * @thread_id: native thread id of vCPU, only live once @created is #true 414 * @running: #true if CPU is currently running (lockless). 415 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; 416 * valid under cpu_list_lock. 417 * @created: Indicates whether the CPU thread has been successfully created. 418 * @halt_cond: condition variable sleeping threads can wait on. 419 * @interrupt_request: Indicates a pending interrupt request. 420 * @halted: Nonzero if the CPU is in suspended state. 421 * @stop: Indicates a pending stop request. 422 * @stopped: Indicates the CPU has been artificially stopped. 423 * @unplug: Indicates a pending CPU unplug request. 424 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU 425 * @singlestep_enabled: Flags for single-stepping. 426 * @icount_extra: Instructions until next timer event. 427 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the 428 * AddressSpaces this CPU has) 429 * @num_ases: number of CPUAddressSpaces in @cpu_ases 430 * @as: Pointer to the first AddressSpace, for the convenience of targets which 431 * only have a single AddressSpace 432 * @gdb_regs: Additional GDB registers. 433 * @gdb_num_regs: Number of total registers accessible to GDB. 434 * @gdb_num_g_regs: Number of registers in GDB 'g' packets. 435 * @node: QTAILQ of CPUs sharing TB cache. 436 * @opaque: User data. 437 * @mem_io_pc: Host Program Counter at which the memory was accessed. 438 * @accel: Pointer to accelerator specific state. 439 * @kvm_fd: vCPU file descriptor for KVM. 440 * @work_mutex: Lock to prevent multiple access to @work_list. 441 * @work_list: List of pending asynchronous work. 442 * @plugin_state: per-CPU plugin state 443 * @ignore_memory_transaction_failures: Cached copy of the MachineState 444 * flag of the same name: allows the board to suppress calling of the 445 * CPU do_transaction_failed hook function. 446 * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty 447 * ring is enabled. 448 * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU 449 * dirty ring structure. 450 * 451 * @neg_align: The CPUState is the common part of a concrete ArchCPU 452 * which is allocated when an individual CPU instance is created. As 453 * such care is taken is ensure there is no gap between between 454 * CPUState and CPUArchState within ArchCPU. 455 * 456 * @neg: The architectural register state ("cpu_env") immediately follows 457 * CPUState in ArchCPU and is passed to TCG code. The @neg structure holds 458 * some common TCG CPU variables which are accessed with a negative offset 459 * from cpu_env. 460 */ 461 struct CPUState { 462 /*< private >*/ 463 DeviceState parent_obj; 464 /* cache to avoid expensive CPU_GET_CLASS */ 465 CPUClass *cc; 466 /*< public >*/ 467 468 int nr_threads; 469 470 struct QemuThread *thread; 471 #ifdef _WIN32 472 QemuSemaphore sem; 473 #endif 474 int thread_id; 475 bool running, has_waiter; 476 struct QemuCond *halt_cond; 477 bool thread_kicked; 478 bool created; 479 bool stop; 480 bool stopped; 481 482 /* Should CPU start in powered-off state? */ 483 bool start_powered_off; 484 485 bool unplug; 486 bool crash_occurred; 487 bool exit_request; 488 int exclusive_context_count; 489 uint32_t cflags_next_tb; 490 /* updates protected by BQL */ 491 uint32_t interrupt_request; 492 int singlestep_enabled; 493 int64_t icount_budget; 494 int64_t icount_extra; 495 uint64_t random_seed; 496 sigjmp_buf jmp_env; 497 498 QemuMutex work_mutex; 499 QSIMPLEQ_HEAD(, qemu_work_item) work_list; 500 501 struct CPUAddressSpace *cpu_ases; 502 int cpu_ases_count; 503 int num_ases; 504 AddressSpace *as; 505 MemoryRegion *memory; 506 507 struct CPUJumpCache *tb_jmp_cache; 508 509 GArray *gdb_regs; 510 int gdb_num_regs; 511 int gdb_num_g_regs; 512 QTAILQ_ENTRY(CPUState) node; 513 514 /* ice debug support */ 515 QTAILQ_HEAD(, CPUBreakpoint) breakpoints; 516 517 QTAILQ_HEAD(, CPUWatchpoint) watchpoints; 518 CPUWatchpoint *watchpoint_hit; 519 520 void *opaque; 521 522 /* In order to avoid passing too many arguments to the MMIO helpers, 523 * we store some rarely used information in the CPU context. 524 */ 525 uintptr_t mem_io_pc; 526 527 /* Only used in KVM */ 528 int kvm_fd; 529 struct KVMState *kvm_state; 530 struct kvm_run *kvm_run; 531 struct kvm_dirty_gfn *kvm_dirty_gfns; 532 uint32_t kvm_fetch_index; 533 uint64_t dirty_pages; 534 int kvm_vcpu_stats_fd; 535 bool vcpu_dirty; 536 537 /* Use by accel-block: CPU is executing an ioctl() */ 538 QemuLockCnt in_ioctl_lock; 539 540 #ifdef CONFIG_PLUGIN 541 CPUPluginState *plugin_state; 542 #endif 543 544 /* TODO Move common fields from CPUArchState here. */ 545 int cpu_index; 546 int cluster_index; 547 uint32_t tcg_cflags; 548 uint32_t halted; 549 int32_t exception_index; 550 551 AccelCPUState *accel; 552 553 /* Used to keep track of an outstanding cpu throttle thread for migration 554 * autoconverge 555 */ 556 bool throttle_thread_scheduled; 557 558 /* 559 * Sleep throttle_us_per_full microseconds once dirty ring is full 560 * if dirty page rate limit is enabled. 561 */ 562 int64_t throttle_us_per_full; 563 564 bool ignore_memory_transaction_failures; 565 566 /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */ 567 bool prctl_unalign_sigbus; 568 569 /* track IOMMUs whose translations we've cached in the TCG TLB */ 570 GArray *iommu_notifiers; 571 572 /* 573 * MUST BE LAST in order to minimize the displacement to CPUArchState. 574 */ 575 char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16); 576 CPUNegativeOffsetState neg; 577 }; 578 579 /* Validate placement of CPUNegativeOffsetState. */ 580 QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) != 581 sizeof(CPUState) - sizeof(CPUNegativeOffsetState)); 582 583 static inline CPUArchState *cpu_env(CPUState *cpu) 584 { 585 /* We validate that CPUArchState follows CPUState in cpu-all.h. */ 586 return (CPUArchState *)(cpu + 1); 587 } 588 589 typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ; 590 extern CPUTailQ cpus_queue; 591 592 #define first_cpu QTAILQ_FIRST_RCU(&cpus_queue) 593 #define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node) 594 #define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus_queue, node) 595 #define CPU_FOREACH_SAFE(cpu, next_cpu) \ 596 QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus_queue, node, next_cpu) 597 598 extern __thread CPUState *current_cpu; 599 600 /** 601 * qemu_tcg_mttcg_enabled: 602 * Check whether we are running MultiThread TCG or not. 603 * 604 * Returns: %true if we are in MTTCG mode %false otherwise. 605 */ 606 extern bool mttcg_enabled; 607 #define qemu_tcg_mttcg_enabled() (mttcg_enabled) 608 609 /** 610 * cpu_paging_enabled: 611 * @cpu: The CPU whose state is to be inspected. 612 * 613 * Returns: %true if paging is enabled, %false otherwise. 614 */ 615 bool cpu_paging_enabled(const CPUState *cpu); 616 617 /** 618 * cpu_get_memory_mapping: 619 * @cpu: The CPU whose memory mappings are to be obtained. 620 * @list: Where to write the memory mappings to. 621 * @errp: Pointer for reporting an #Error. 622 * 623 * Returns: %true on success, %false otherwise. 624 */ 625 bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 626 Error **errp); 627 628 #if !defined(CONFIG_USER_ONLY) 629 630 /** 631 * cpu_write_elf64_note: 632 * @f: pointer to a function that writes memory to a file 633 * @cpu: The CPU whose memory is to be dumped 634 * @cpuid: ID number of the CPU 635 * @opaque: pointer to the CPUState struct 636 */ 637 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 638 int cpuid, void *opaque); 639 640 /** 641 * cpu_write_elf64_qemunote: 642 * @f: pointer to a function that writes memory to a file 643 * @cpu: The CPU whose memory is to be dumped 644 * @cpuid: ID number of the CPU 645 * @opaque: pointer to the CPUState struct 646 */ 647 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 648 void *opaque); 649 650 /** 651 * cpu_write_elf32_note: 652 * @f: pointer to a function that writes memory to a file 653 * @cpu: The CPU whose memory is to be dumped 654 * @cpuid: ID number of the CPU 655 * @opaque: pointer to the CPUState struct 656 */ 657 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 658 int cpuid, void *opaque); 659 660 /** 661 * cpu_write_elf32_qemunote: 662 * @f: pointer to a function that writes memory to a file 663 * @cpu: The CPU whose memory is to be dumped 664 * @cpuid: ID number of the CPU 665 * @opaque: pointer to the CPUState struct 666 */ 667 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 668 void *opaque); 669 670 /** 671 * cpu_get_crash_info: 672 * @cpu: The CPU to get crash information for 673 * 674 * Gets the previously saved crash information. 675 * Caller is responsible for freeing the data. 676 */ 677 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu); 678 679 #endif /* !CONFIG_USER_ONLY */ 680 681 /** 682 * CPUDumpFlags: 683 * @CPU_DUMP_CODE: 684 * @CPU_DUMP_FPU: dump FPU register state, not just integer 685 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state 686 * @CPU_DUMP_VPU: dump VPU registers 687 */ 688 enum CPUDumpFlags { 689 CPU_DUMP_CODE = 0x00010000, 690 CPU_DUMP_FPU = 0x00020000, 691 CPU_DUMP_CCOP = 0x00040000, 692 CPU_DUMP_VPU = 0x00080000, 693 }; 694 695 /** 696 * cpu_dump_state: 697 * @cpu: The CPU whose state is to be dumped. 698 * @f: If non-null, dump to this stream, else to current print sink. 699 * 700 * Dumps CPU state. 701 */ 702 void cpu_dump_state(CPUState *cpu, FILE *f, int flags); 703 704 #ifndef CONFIG_USER_ONLY 705 /** 706 * cpu_get_phys_page_attrs_debug: 707 * @cpu: The CPU to obtain the physical page address for. 708 * @addr: The virtual address. 709 * @attrs: Updated on return with the memory transaction attributes to use 710 * for this access. 711 * 712 * Obtains the physical page corresponding to a virtual one, together 713 * with the corresponding memory transaction attributes to use for the access. 714 * Use it only for debugging because no protection checks are done. 715 * 716 * Returns: Corresponding physical page address or -1 if no page found. 717 */ 718 hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 719 MemTxAttrs *attrs); 720 721 /** 722 * cpu_get_phys_page_debug: 723 * @cpu: The CPU to obtain the physical page address for. 724 * @addr: The virtual address. 725 * 726 * Obtains the physical page corresponding to a virtual one. 727 * Use it only for debugging because no protection checks are done. 728 * 729 * Returns: Corresponding physical page address or -1 if no page found. 730 */ 731 hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 732 733 /** cpu_asidx_from_attrs: 734 * @cpu: CPU 735 * @attrs: memory transaction attributes 736 * 737 * Returns the address space index specifying the CPU AddressSpace 738 * to use for a memory access with the given transaction attributes. 739 */ 740 int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs); 741 742 /** 743 * cpu_virtio_is_big_endian: 744 * @cpu: CPU 745 746 * Returns %true if a CPU which supports runtime configurable endianness 747 * is currently big-endian. 748 */ 749 bool cpu_virtio_is_big_endian(CPUState *cpu); 750 751 #endif /* CONFIG_USER_ONLY */ 752 753 /** 754 * cpu_list_add: 755 * @cpu: The CPU to be added to the list of CPUs. 756 */ 757 void cpu_list_add(CPUState *cpu); 758 759 /** 760 * cpu_list_remove: 761 * @cpu: The CPU to be removed from the list of CPUs. 762 */ 763 void cpu_list_remove(CPUState *cpu); 764 765 /** 766 * cpu_reset: 767 * @cpu: The CPU whose state is to be reset. 768 */ 769 void cpu_reset(CPUState *cpu); 770 771 /** 772 * cpu_class_by_name: 773 * @typename: The CPU base type. 774 * @cpu_model: The model string without any parameters. 775 * 776 * Looks up a concrete CPU #ObjectClass matching name @cpu_model. 777 * 778 * Returns: A concrete #CPUClass or %NULL if no matching class is found 779 * or if the matching class is abstract. 780 */ 781 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model); 782 783 /** 784 * cpu_model_from_type: 785 * @typename: The CPU type name 786 * 787 * Extract the CPU model name from the CPU type name. The 788 * CPU type name is either the combination of the CPU model 789 * name and suffix, or same to the CPU model name. 790 * 791 * Returns: CPU model name or NULL if the CPU class doesn't exist 792 * The user should g_free() the string once no longer needed. 793 */ 794 char *cpu_model_from_type(const char *typename); 795 796 /** 797 * cpu_create: 798 * @typename: The CPU type. 799 * 800 * Instantiates a CPU and realizes the CPU. 801 * 802 * Returns: A #CPUState or %NULL if an error occurred. 803 */ 804 CPUState *cpu_create(const char *typename); 805 806 /** 807 * parse_cpu_option: 808 * @cpu_option: The -cpu option including optional parameters. 809 * 810 * processes optional parameters and registers them as global properties 811 * 812 * Returns: type of CPU to create or prints error and terminates process 813 * if an error occurred. 814 */ 815 const char *parse_cpu_option(const char *cpu_option); 816 817 /** 818 * cpu_has_work: 819 * @cpu: The vCPU to check. 820 * 821 * Checks whether the CPU has work to do. 822 * 823 * Returns: %true if the CPU has work, %false otherwise. 824 */ 825 static inline bool cpu_has_work(CPUState *cpu) 826 { 827 CPUClass *cc = CPU_GET_CLASS(cpu); 828 829 g_assert(cc->has_work); 830 return cc->has_work(cpu); 831 } 832 833 /** 834 * qemu_cpu_is_self: 835 * @cpu: The vCPU to check against. 836 * 837 * Checks whether the caller is executing on the vCPU thread. 838 * 839 * Returns: %true if called from @cpu's thread, %false otherwise. 840 */ 841 bool qemu_cpu_is_self(CPUState *cpu); 842 843 /** 844 * qemu_cpu_kick: 845 * @cpu: The vCPU to kick. 846 * 847 * Kicks @cpu's thread. 848 */ 849 void qemu_cpu_kick(CPUState *cpu); 850 851 /** 852 * cpu_is_stopped: 853 * @cpu: The CPU to check. 854 * 855 * Checks whether the CPU is stopped. 856 * 857 * Returns: %true if run state is not running or if artificially stopped; 858 * %false otherwise. 859 */ 860 bool cpu_is_stopped(CPUState *cpu); 861 862 /** 863 * do_run_on_cpu: 864 * @cpu: The vCPU to run on. 865 * @func: The function to be executed. 866 * @data: Data to pass to the function. 867 * @mutex: Mutex to release while waiting for @func to run. 868 * 869 * Used internally in the implementation of run_on_cpu. 870 */ 871 void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, 872 QemuMutex *mutex); 873 874 /** 875 * run_on_cpu: 876 * @cpu: The vCPU to run on. 877 * @func: The function to be executed. 878 * @data: Data to pass to the function. 879 * 880 * Schedules the function @func for execution on the vCPU @cpu. 881 */ 882 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 883 884 /** 885 * async_run_on_cpu: 886 * @cpu: The vCPU to run on. 887 * @func: The function to be executed. 888 * @data: Data to pass to the function. 889 * 890 * Schedules the function @func for execution on the vCPU @cpu asynchronously. 891 */ 892 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 893 894 /** 895 * async_safe_run_on_cpu: 896 * @cpu: The vCPU to run on. 897 * @func: The function to be executed. 898 * @data: Data to pass to the function. 899 * 900 * Schedules the function @func for execution on the vCPU @cpu asynchronously, 901 * while all other vCPUs are sleeping. 902 * 903 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the 904 * BQL. 905 */ 906 void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 907 908 /** 909 * cpu_in_exclusive_context() 910 * @cpu: The vCPU to check 911 * 912 * Returns true if @cpu is an exclusive context, for example running 913 * something which has previously been queued via async_safe_run_on_cpu(). 914 */ 915 static inline bool cpu_in_exclusive_context(const CPUState *cpu) 916 { 917 return cpu->exclusive_context_count; 918 } 919 920 /** 921 * qemu_get_cpu: 922 * @index: The CPUState@cpu_index value of the CPU to obtain. 923 * 924 * Gets a CPU matching @index. 925 * 926 * Returns: The CPU or %NULL if there is no matching CPU. 927 */ 928 CPUState *qemu_get_cpu(int index); 929 930 /** 931 * cpu_exists: 932 * @id: Guest-exposed CPU ID to lookup. 933 * 934 * Search for CPU with specified ID. 935 * 936 * Returns: %true - CPU is found, %false - CPU isn't found. 937 */ 938 bool cpu_exists(int64_t id); 939 940 /** 941 * cpu_by_arch_id: 942 * @id: Guest-exposed CPU ID of the CPU to obtain. 943 * 944 * Get a CPU with matching @id. 945 * 946 * Returns: The CPU or %NULL if there is no matching CPU. 947 */ 948 CPUState *cpu_by_arch_id(int64_t id); 949 950 /** 951 * cpu_interrupt: 952 * @cpu: The CPU to set an interrupt on. 953 * @mask: The interrupts to set. 954 * 955 * Invokes the interrupt handler. 956 */ 957 958 void cpu_interrupt(CPUState *cpu, int mask); 959 960 /** 961 * cpu_set_pc: 962 * @cpu: The CPU to set the program counter for. 963 * @addr: Program counter value. 964 * 965 * Sets the program counter for a CPU. 966 */ 967 static inline void cpu_set_pc(CPUState *cpu, vaddr addr) 968 { 969 CPUClass *cc = CPU_GET_CLASS(cpu); 970 971 cc->set_pc(cpu, addr); 972 } 973 974 /** 975 * cpu_reset_interrupt: 976 * @cpu: The CPU to clear the interrupt on. 977 * @mask: The interrupt mask to clear. 978 * 979 * Resets interrupts on the vCPU @cpu. 980 */ 981 void cpu_reset_interrupt(CPUState *cpu, int mask); 982 983 /** 984 * cpu_exit: 985 * @cpu: The CPU to exit. 986 * 987 * Requests the CPU @cpu to exit execution. 988 */ 989 void cpu_exit(CPUState *cpu); 990 991 /** 992 * cpu_pause: 993 * @cpu: The CPU to pause. 994 * 995 * Pauses CPU, i.e. puts CPU into stopped state. 996 */ 997 void cpu_pause(CPUState *cpu); 998 999 /** 1000 * cpu_resume: 1001 * @cpu: The CPU to resume. 1002 * 1003 * Resumes CPU, i.e. puts CPU into runnable state. 1004 */ 1005 void cpu_resume(CPUState *cpu); 1006 1007 /** 1008 * cpu_remove_sync: 1009 * @cpu: The CPU to remove. 1010 * 1011 * Requests the CPU to be removed and waits till it is removed. 1012 */ 1013 void cpu_remove_sync(CPUState *cpu); 1014 1015 /** 1016 * free_queued_cpu_work() - free all items on CPU work queue 1017 * @cpu: The CPU which work queue to free. 1018 */ 1019 void free_queued_cpu_work(CPUState *cpu); 1020 1021 /** 1022 * process_queued_cpu_work() - process all items on CPU work queue 1023 * @cpu: The CPU which work queue to process. 1024 */ 1025 void process_queued_cpu_work(CPUState *cpu); 1026 1027 /** 1028 * cpu_exec_start: 1029 * @cpu: The CPU for the current thread. 1030 * 1031 * Record that a CPU has started execution and can be interrupted with 1032 * cpu_exit. 1033 */ 1034 void cpu_exec_start(CPUState *cpu); 1035 1036 /** 1037 * cpu_exec_end: 1038 * @cpu: The CPU for the current thread. 1039 * 1040 * Record that a CPU has stopped execution and exclusive sections 1041 * can be executed without interrupting it. 1042 */ 1043 void cpu_exec_end(CPUState *cpu); 1044 1045 /** 1046 * start_exclusive: 1047 * 1048 * Wait for a concurrent exclusive section to end, and then start 1049 * a section of work that is run while other CPUs are not running 1050 * between cpu_exec_start and cpu_exec_end. CPUs that are running 1051 * cpu_exec are exited immediately. CPUs that call cpu_exec_start 1052 * during the exclusive section go to sleep until this CPU calls 1053 * end_exclusive. 1054 */ 1055 void start_exclusive(void); 1056 1057 /** 1058 * end_exclusive: 1059 * 1060 * Concludes an exclusive execution section started by start_exclusive. 1061 */ 1062 void end_exclusive(void); 1063 1064 /** 1065 * qemu_init_vcpu: 1066 * @cpu: The vCPU to initialize. 1067 * 1068 * Initializes a vCPU. 1069 */ 1070 void qemu_init_vcpu(CPUState *cpu); 1071 1072 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ 1073 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ 1074 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ 1075 1076 /** 1077 * cpu_single_step: 1078 * @cpu: CPU to the flags for. 1079 * @enabled: Flags to enable. 1080 * 1081 * Enables or disables single-stepping for @cpu. 1082 */ 1083 void cpu_single_step(CPUState *cpu, int enabled); 1084 1085 /* Breakpoint/watchpoint flags */ 1086 #define BP_MEM_READ 0x01 1087 #define BP_MEM_WRITE 0x02 1088 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) 1089 #define BP_STOP_BEFORE_ACCESS 0x04 1090 /* 0x08 currently unused */ 1091 #define BP_GDB 0x10 1092 #define BP_CPU 0x20 1093 #define BP_ANY (BP_GDB | BP_CPU) 1094 #define BP_HIT_SHIFT 6 1095 #define BP_WATCHPOINT_HIT_READ (BP_MEM_READ << BP_HIT_SHIFT) 1096 #define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT) 1097 #define BP_WATCHPOINT_HIT (BP_MEM_ACCESS << BP_HIT_SHIFT) 1098 1099 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, 1100 CPUBreakpoint **breakpoint); 1101 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); 1102 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); 1103 void cpu_breakpoint_remove_all(CPUState *cpu, int mask); 1104 1105 /* Return true if PC matches an installed breakpoint. */ 1106 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) 1107 { 1108 CPUBreakpoint *bp; 1109 1110 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { 1111 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 1112 if (bp->pc == pc && (bp->flags & mask)) { 1113 return true; 1114 } 1115 } 1116 } 1117 return false; 1118 } 1119 1120 #if defined(CONFIG_USER_ONLY) 1121 static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 1122 int flags, CPUWatchpoint **watchpoint) 1123 { 1124 return -ENOSYS; 1125 } 1126 1127 static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, 1128 vaddr len, int flags) 1129 { 1130 return -ENOSYS; 1131 } 1132 1133 static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, 1134 CPUWatchpoint *wp) 1135 { 1136 } 1137 1138 static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 1139 { 1140 } 1141 #else 1142 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 1143 int flags, CPUWatchpoint **watchpoint); 1144 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, 1145 vaddr len, int flags); 1146 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); 1147 void cpu_watchpoint_remove_all(CPUState *cpu, int mask); 1148 #endif 1149 1150 /** 1151 * cpu_get_address_space: 1152 * @cpu: CPU to get address space from 1153 * @asidx: index identifying which address space to get 1154 * 1155 * Return the requested address space of this CPU. @asidx 1156 * specifies which address space to read. 1157 */ 1158 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); 1159 1160 G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...) 1161 G_GNUC_PRINTF(2, 3); 1162 1163 /* $(top_srcdir)/cpu.c */ 1164 void cpu_class_init_props(DeviceClass *dc); 1165 void cpu_exec_initfn(CPUState *cpu); 1166 bool cpu_exec_realizefn(CPUState *cpu, Error **errp); 1167 void cpu_exec_unrealizefn(CPUState *cpu); 1168 void cpu_exec_reset_hold(CPUState *cpu); 1169 1170 const char *target_name(void); 1171 1172 #ifdef COMPILING_PER_TARGET 1173 1174 #ifndef CONFIG_USER_ONLY 1175 1176 extern const VMStateDescription vmstate_cpu_common; 1177 1178 #define VMSTATE_CPU() { \ 1179 .name = "parent_obj", \ 1180 .size = sizeof(CPUState), \ 1181 .vmsd = &vmstate_cpu_common, \ 1182 .flags = VMS_STRUCT, \ 1183 .offset = 0, \ 1184 } 1185 #endif /* !CONFIG_USER_ONLY */ 1186 1187 #endif /* COMPILING_PER_TARGET */ 1188 1189 #define UNASSIGNED_CPU_INDEX -1 1190 #define UNASSIGNED_CLUSTER_INDEX -1 1191 1192 #endif 1193