1 /* 2 * CPU interfaces that are target independent. 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * SPDX-License-Identifier: LGPL-2.1+ 7 */ 8 #ifndef CPU_COMMON_H 9 #define CPU_COMMON_H 10 11 #include "exec/vaddr.h" 12 #include "exec/hwaddr.h" 13 #include "hw/core/cpu.h" 14 #include "tcg/debug-assert.h" 15 #include "exec/page-protection.h" 16 17 #define EXCP_INTERRUPT 0x10000 /* async interruption */ 18 #define EXCP_HLT 0x10001 /* hlt instruction reached */ 19 #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ 20 #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ 21 #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ 22 #define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ 23 24 void cpu_exec_init_all(void); 25 void cpu_exec_step_atomic(CPUState *cpu); 26 27 #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size()) 28 29 /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ 30 extern QemuMutex qemu_cpu_list_lock; 31 void qemu_init_cpu_list(void); 32 void cpu_list_lock(void); 33 void cpu_list_unlock(void); 34 unsigned int cpu_list_generation_id_get(void); 35 36 int cpu_get_free_index(void); 37 38 void tcg_iommu_init_notifier_list(CPUState *cpu); 39 void tcg_iommu_free_notifier_list(CPUState *cpu); 40 41 enum device_endian { 42 DEVICE_NATIVE_ENDIAN, 43 DEVICE_BIG_ENDIAN, 44 DEVICE_LITTLE_ENDIAN, 45 }; 46 47 #if HOST_BIG_ENDIAN 48 #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN 49 #else 50 #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN 51 #endif 52 53 /* address in the RAM (different from a physical address) */ 54 #if defined(CONFIG_XEN_BACKEND) 55 typedef uint64_t ram_addr_t; 56 # define RAM_ADDR_MAX UINT64_MAX 57 # define RAM_ADDR_FMT "%" PRIx64 58 #else 59 typedef uintptr_t ram_addr_t; 60 # define RAM_ADDR_MAX UINTPTR_MAX 61 # define RAM_ADDR_FMT "%" PRIxPTR 62 #endif 63 64 /* memory API */ 65 66 void qemu_ram_remap(ram_addr_t addr); 67 /* This should not be used by devices. */ 68 ram_addr_t qemu_ram_addr_from_host(void *ptr); 69 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr); 70 RAMBlock *qemu_ram_block_by_name(const char *name); 71 72 /* 73 * Translates a host ptr back to a RAMBlock and an offset in that RAMBlock. 74 * 75 * @ptr: The host pointer to translate. 76 * @round_offset: Whether to round the result offset down to a target page 77 * @offset: Will be set to the offset within the returned RAMBlock. 78 * 79 * Returns: RAMBlock (or NULL if not found) 80 * 81 * By the time this function returns, the returned pointer is not protected 82 * by RCU anymore. If the caller is not within an RCU critical section and 83 * does not hold the BQL, it must have other means of protecting the 84 * pointer, such as a reference to the memory region that owns the RAMBlock. 85 */ 86 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, 87 ram_addr_t *offset); 88 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host); 89 void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev); 90 void qemu_ram_unset_idstr(RAMBlock *block); 91 const char *qemu_ram_get_idstr(RAMBlock *rb); 92 void *qemu_ram_get_host_addr(RAMBlock *rb); 93 ram_addr_t qemu_ram_get_offset(RAMBlock *rb); 94 ram_addr_t qemu_ram_get_used_length(RAMBlock *rb); 95 ram_addr_t qemu_ram_get_max_length(RAMBlock *rb); 96 bool qemu_ram_is_shared(RAMBlock *rb); 97 bool qemu_ram_is_noreserve(RAMBlock *rb); 98 bool qemu_ram_is_uf_zeroable(RAMBlock *rb); 99 void qemu_ram_set_uf_zeroable(RAMBlock *rb); 100 bool qemu_ram_is_migratable(RAMBlock *rb); 101 void qemu_ram_set_migratable(RAMBlock *rb); 102 void qemu_ram_unset_migratable(RAMBlock *rb); 103 bool qemu_ram_is_named_file(RAMBlock *rb); 104 int qemu_ram_get_fd(RAMBlock *rb); 105 106 size_t qemu_ram_pagesize(RAMBlock *block); 107 size_t qemu_ram_pagesize_largest(void); 108 109 /** 110 * cpu_address_space_init: 111 * @cpu: CPU to add this address space to 112 * @asidx: integer index of this address space 113 * @prefix: prefix to be used as name of address space 114 * @mr: the root memory region of address space 115 * 116 * Add the specified address space to the CPU's cpu_ases list. 117 * The address space added with @asidx 0 is the one used for the 118 * convenience pointer cpu->as. 119 * The target-specific code which registers ASes is responsible 120 * for defining what semantics address space 0, 1, 2, etc have. 121 * 122 * Before the first call to this function, the caller must set 123 * cpu->num_ases to the total number of address spaces it needs 124 * to support. 125 * 126 * Note that with KVM only one address space is supported. 127 */ 128 void cpu_address_space_init(CPUState *cpu, int asidx, 129 const char *prefix, MemoryRegion *mr); 130 /** 131 * cpu_address_space_destroy: 132 * @cpu: CPU for which address space needs to be destroyed 133 * @asidx: integer index of this address space 134 * 135 * Note that with KVM only one address space is supported. 136 */ 137 void cpu_address_space_destroy(CPUState *cpu, int asidx); 138 139 void cpu_physical_memory_rw(hwaddr addr, void *buf, 140 hwaddr len, bool is_write); 141 static inline void cpu_physical_memory_read(hwaddr addr, 142 void *buf, hwaddr len) 143 { 144 cpu_physical_memory_rw(addr, buf, len, false); 145 } 146 static inline void cpu_physical_memory_write(hwaddr addr, 147 const void *buf, hwaddr len) 148 { 149 cpu_physical_memory_rw(addr, (void *)buf, len, true); 150 } 151 void *cpu_physical_memory_map(hwaddr addr, 152 hwaddr *plen, 153 bool is_write); 154 void cpu_physical_memory_unmap(void *buffer, hwaddr len, 155 bool is_write, hwaddr access_len); 156 157 bool cpu_physical_memory_is_io(hwaddr phys_addr); 158 159 /* Coalesced MMIO regions are areas where write operations can be reordered. 160 * This usually implies that write operations are side-effect free. This allows 161 * batching which can make a major impact on performance when using 162 * virtualization. 163 */ 164 void qemu_flush_coalesced_mmio_buffer(void); 165 166 void cpu_flush_icache_range(hwaddr start, hwaddr len); 167 168 typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque); 169 170 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); 171 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); 172 int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, 173 size_t length); 174 175 /* Returns: 0 on success, -1 on error */ 176 int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, 177 void *ptr, size_t len, bool is_write); 178 179 /* vl.c */ 180 void list_cpus(void); 181 182 #ifdef CONFIG_TCG 183 #include "qemu/atomic.h" 184 185 /** 186 * cpu_unwind_state_data: 187 * @cpu: the cpu context 188 * @host_pc: the host pc within the translation 189 * @data: output data 190 * 191 * Attempt to load the the unwind state for a host pc occurring in 192 * translated code. If @host_pc is not in translated code, the 193 * function returns false; otherwise @data is loaded. 194 * This is the same unwind info as given to restore_state_to_opc. 195 */ 196 bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data); 197 198 /** 199 * cpu_restore_state: 200 * @cpu: the cpu context 201 * @host_pc: the host pc within the translation 202 * @return: true if state was restored, false otherwise 203 * 204 * Attempt to restore the state for a fault occurring in translated 205 * code. If @host_pc is not in translated code no state is 206 * restored and the function returns false. 207 */ 208 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc); 209 210 /** 211 * cpu_loop_exit_requested: 212 * @cpu: The CPU state to be tested 213 * 214 * Indicate if somebody asked for a return of the CPU to the main loop 215 * (e.g., via cpu_exit() or cpu_interrupt()). 216 * 217 * This is helpful for architectures that support interruptible 218 * instructions. After writing back all state to registers/memory, this 219 * call can be used to check if it makes sense to return to the main loop 220 * or to continue executing the interruptible instruction. 221 */ 222 static inline bool cpu_loop_exit_requested(CPUState *cpu) 223 { 224 return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0; 225 } 226 227 G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu); 228 G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); 229 #endif /* CONFIG_TCG */ 230 G_NORETURN void cpu_loop_exit(CPUState *cpu); 231 G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); 232 233 /* accel/tcg/cpu-exec.c */ 234 int cpu_exec(CPUState *cpu); 235 236 /** 237 * env_archcpu(env) 238 * @env: The architecture environment 239 * 240 * Return the ArchCPU associated with the environment. 241 */ 242 static inline ArchCPU *env_archcpu(CPUArchState *env) 243 { 244 return (void *)env - sizeof(CPUState); 245 } 246 247 /** 248 * env_cpu_const(env) 249 * @env: The architecture environment 250 * 251 * Return the CPUState associated with the environment. 252 */ 253 static inline const CPUState *env_cpu_const(const CPUArchState *env) 254 { 255 return (void *)env - sizeof(CPUState); 256 } 257 258 /** 259 * env_cpu(env) 260 * @env: The architecture environment 261 * 262 * Return the CPUState associated with the environment. 263 */ 264 static inline CPUState *env_cpu(CPUArchState *env) 265 { 266 return (CPUState *)env_cpu_const(env); 267 } 268 269 #endif /* CPU_COMMON_H */ 270