142a623c7SBlue Swirl /* 242a623c7SBlue Swirl * User emulator execution 342a623c7SBlue Swirl * 442a623c7SBlue Swirl * Copyright (c) 2003-2005 Fabrice Bellard 542a623c7SBlue Swirl * 642a623c7SBlue Swirl * This library is free software; you can redistribute it and/or 742a623c7SBlue Swirl * modify it under the terms of the GNU Lesser General Public 842a623c7SBlue Swirl * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 1042a623c7SBlue Swirl * 1142a623c7SBlue Swirl * This library is distributed in the hope that it will be useful, 1242a623c7SBlue Swirl * but WITHOUT ANY WARRANTY; without even the implied warranty of 1342a623c7SBlue Swirl * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1442a623c7SBlue Swirl * Lesser General Public License for more details. 1542a623c7SBlue Swirl * 1642a623c7SBlue Swirl * You should have received a copy of the GNU Lesser General Public 1742a623c7SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 1842a623c7SBlue Swirl */ 19d38ea87aSPeter Maydell #include "qemu/osdep.h" 2078271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 2176cad711SPaolo Bonzini #include "disas/disas.h" 2263c91552SPaolo Bonzini #include "exec/exec-all.h" 23dcb32f1dSPhilippe Mathieu-Daudé #include "tcg/tcg.h" 24023b0ae3SPeter Maydell #include "qemu/bitops.h" 25f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 263b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 27a411d296SPhilippe Mathieu-Daudé #include "exec/helper-proto.h" 28e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 29243af022SPaolo Bonzini #include "trace/trace-root.h" 30*37e891e3SRichard Henderson #include "tcg/tcg-ldst.h" 310583f775SRichard Henderson #include "internal.h" 3242a623c7SBlue Swirl 33ec603b55SRichard Henderson __thread uintptr_t helper_retaddr; 34ec603b55SRichard Henderson 3542a623c7SBlue Swirl //#define DEBUG_SIGNAL 3642a623c7SBlue Swirl 370fdbb7d2SRichard Henderson /* 380fdbb7d2SRichard Henderson * Adjust the pc to pass to cpu_restore_state; return the memop type. 390fdbb7d2SRichard Henderson */ 400fdbb7d2SRichard Henderson MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) 4142a623c7SBlue Swirl { 4252ba13f0SRichard Henderson switch (helper_retaddr) { 4352ba13f0SRichard Henderson default: 4452ba13f0SRichard Henderson /* 4552ba13f0SRichard Henderson * Fault during host memory operation within a helper function. 4652ba13f0SRichard Henderson * The helper's host return address, saved here, gives us a 4752ba13f0SRichard Henderson * pointer into the generated code that will unwind to the 4852ba13f0SRichard Henderson * correct guest pc. 49ec603b55SRichard Henderson */ 500fdbb7d2SRichard Henderson *pc = helper_retaddr; 5152ba13f0SRichard Henderson break; 5252ba13f0SRichard Henderson 5352ba13f0SRichard Henderson case 0: 5452ba13f0SRichard Henderson /* 5552ba13f0SRichard Henderson * Fault during host memory operation within generated code. 5652ba13f0SRichard Henderson * (Or, a unrelated bug within qemu, but we can't tell from here). 5752ba13f0SRichard Henderson * 5852ba13f0SRichard Henderson * We take the host pc from the signal frame. However, we cannot 5952ba13f0SRichard Henderson * use that value directly. Within cpu_restore_state_from_tb, we 6052ba13f0SRichard Henderson * assume PC comes from GETPC(), as used by the helper functions, 6152ba13f0SRichard Henderson * so we adjust the address by -GETPC_ADJ to form an address that 62e3a6e0daSzhaolichang * is within the call insn, so that the address does not accidentally 6352ba13f0SRichard Henderson * match the beginning of the next guest insn. However, when the 6452ba13f0SRichard Henderson * pc comes from the signal frame it points to the actual faulting 6552ba13f0SRichard Henderson * host memory insn and not the return from a call insn. 6652ba13f0SRichard Henderson * 6752ba13f0SRichard Henderson * Therefore, adjust to compensate for what will be done later 6852ba13f0SRichard Henderson * by cpu_restore_state_from_tb. 6952ba13f0SRichard Henderson */ 700fdbb7d2SRichard Henderson *pc += GETPC_ADJ; 7152ba13f0SRichard Henderson break; 7252ba13f0SRichard Henderson 7352ba13f0SRichard Henderson case 1: 7452ba13f0SRichard Henderson /* 7552ba13f0SRichard Henderson * Fault during host read for translation, or loosely, "execution". 7652ba13f0SRichard Henderson * 7752ba13f0SRichard Henderson * The guest pc is already pointing to the start of the TB for which 7852ba13f0SRichard Henderson * code is being generated. If the guest translator manages the 7952ba13f0SRichard Henderson * page crossings correctly, this is exactly the correct address 8052ba13f0SRichard Henderson * (and if the translator doesn't handle page boundaries correctly 8152ba13f0SRichard Henderson * there's little we can do about that here). Therefore, do not 8252ba13f0SRichard Henderson * trigger the unwinder. 8352ba13f0SRichard Henderson * 8452ba13f0SRichard Henderson * Like tb_gen_code, release the memory lock before cpu_loop_exit. 8552ba13f0SRichard Henderson */ 8652ba13f0SRichard Henderson mmap_unlock(); 870fdbb7d2SRichard Henderson *pc = 0; 880fdbb7d2SRichard Henderson return MMU_INST_FETCH; 89ec603b55SRichard Henderson } 90ec603b55SRichard Henderson 910fdbb7d2SRichard Henderson return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; 920fdbb7d2SRichard Henderson } 930fdbb7d2SRichard Henderson 945e38ba7dSRichard Henderson /** 955e38ba7dSRichard Henderson * handle_sigsegv_accerr_write: 965e38ba7dSRichard Henderson * @cpu: the cpu context 975e38ba7dSRichard Henderson * @old_set: the sigset_t from the signal ucontext_t 985e38ba7dSRichard Henderson * @host_pc: the host pc, adjusted for the signal 995e38ba7dSRichard Henderson * @guest_addr: the guest address of the fault 1005e38ba7dSRichard Henderson * 1015e38ba7dSRichard Henderson * Return true if the write fault has been handled, and should be re-tried. 1025e38ba7dSRichard Henderson * 1035e38ba7dSRichard Henderson * Note that it is important that we don't call page_unprotect() unless 1045e38ba7dSRichard Henderson * this is really a "write to nonwriteable page" fault, because 1055e38ba7dSRichard Henderson * page_unprotect() assumes that if it is called for an access to 1065e38ba7dSRichard Henderson * a page that's writeable this means we had two threads racing and 1075e38ba7dSRichard Henderson * another thread got there first and already made the page writeable; 1085e38ba7dSRichard Henderson * so we will retry the access. If we were to call page_unprotect() 1095e38ba7dSRichard Henderson * for some other kind of fault that should really be passed to the 1105e38ba7dSRichard Henderson * guest, we'd end up in an infinite loop of retrying the faulting access. 1115e38ba7dSRichard Henderson */ 1125e38ba7dSRichard Henderson bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 1135e38ba7dSRichard Henderson uintptr_t host_pc, abi_ptr guest_addr) 1145e38ba7dSRichard Henderson { 1155e38ba7dSRichard Henderson switch (page_unprotect(guest_addr, host_pc)) { 1165e38ba7dSRichard Henderson case 0: 1175e38ba7dSRichard Henderson /* 1185e38ba7dSRichard Henderson * Fault not caused by a page marked unwritable to protect 1195e38ba7dSRichard Henderson * cached translations, must be the guest binary's problem. 1205e38ba7dSRichard Henderson */ 1215e38ba7dSRichard Henderson return false; 1225e38ba7dSRichard Henderson case 1: 1235e38ba7dSRichard Henderson /* 1245e38ba7dSRichard Henderson * Fault caused by protection of cached translation; TBs 1255e38ba7dSRichard Henderson * invalidated, so resume execution. 1265e38ba7dSRichard Henderson */ 1275e38ba7dSRichard Henderson return true; 1285e38ba7dSRichard Henderson case 2: 1295e38ba7dSRichard Henderson /* 1305e38ba7dSRichard Henderson * Fault caused by protection of cached translation, and the 1315e38ba7dSRichard Henderson * currently executing TB was modified and must be exited immediately. 1325e38ba7dSRichard Henderson */ 133940b3090SRichard Henderson sigprocmask(SIG_SETMASK, old_set, NULL); 134940b3090SRichard Henderson cpu_loop_exit_noexc(cpu); 1355e38ba7dSRichard Henderson /* NORETURN */ 1365e38ba7dSRichard Henderson default: 1375e38ba7dSRichard Henderson g_assert_not_reached(); 1385e38ba7dSRichard Henderson } 1395e38ba7dSRichard Henderson } 1405e38ba7dSRichard Henderson 141069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 142069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 143069cfe77SRichard Henderson bool nonfault, uintptr_t ra) 14459e96ac6SDavid Hildenbrand { 14572d2bbf9SRichard Henderson int acc_flag; 14672d2bbf9SRichard Henderson bool maperr; 147c25c283dSDavid Hildenbrand 148c25c283dSDavid Hildenbrand switch (access_type) { 149c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 15072d2bbf9SRichard Henderson acc_flag = PAGE_WRITE_ORG; 151c25c283dSDavid Hildenbrand break; 152c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 15372d2bbf9SRichard Henderson acc_flag = PAGE_READ; 154c25c283dSDavid Hildenbrand break; 155c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 15672d2bbf9SRichard Henderson acc_flag = PAGE_EXEC; 157c25c283dSDavid Hildenbrand break; 158c25c283dSDavid Hildenbrand default: 159c25c283dSDavid Hildenbrand g_assert_not_reached(); 160c25c283dSDavid Hildenbrand } 161c25c283dSDavid Hildenbrand 16272d2bbf9SRichard Henderson if (guest_addr_valid_untagged(addr)) { 16372d2bbf9SRichard Henderson int page_flags = page_get_flags(addr); 16472d2bbf9SRichard Henderson if (page_flags & acc_flag) { 16572d2bbf9SRichard Henderson return 0; /* success */ 16672d2bbf9SRichard Henderson } 16772d2bbf9SRichard Henderson maperr = !(page_flags & PAGE_VALID); 16872d2bbf9SRichard Henderson } else { 16972d2bbf9SRichard Henderson maperr = true; 17072d2bbf9SRichard Henderson } 17172d2bbf9SRichard Henderson 172069cfe77SRichard Henderson if (nonfault) { 173069cfe77SRichard Henderson return TLB_INVALID_MASK; 17459e96ac6SDavid Hildenbrand } 17572d2bbf9SRichard Henderson 17672d2bbf9SRichard Henderson cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); 177069cfe77SRichard Henderson } 178069cfe77SRichard Henderson 179069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 180069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 181069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t ra) 182069cfe77SRichard Henderson { 183069cfe77SRichard Henderson int flags; 184069cfe77SRichard Henderson 185069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra); 1863e8f1628SRichard Henderson *phost = flags ? NULL : g2h(env_cpu(env), addr); 187069cfe77SRichard Henderson return flags; 188069cfe77SRichard Henderson } 189069cfe77SRichard Henderson 190069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 191069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t ra) 192069cfe77SRichard Henderson { 193069cfe77SRichard Henderson int flags; 194069cfe77SRichard Henderson 195069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 196069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, false, ra); 197069cfe77SRichard Henderson g_assert(flags == 0); 198fef39ccdSDavid Hildenbrand 1993e8f1628SRichard Henderson return size ? g2h(env_cpu(env), addr) : NULL; 20059e96ac6SDavid Hildenbrand } 20159e96ac6SDavid Hildenbrand 202a411d296SPhilippe Mathieu-Daudé /* The softmmu versions of these helpers are in cputlb.c. */ 203a411d296SPhilippe Mathieu-Daudé 204f83bcecbSRichard Henderson /* 205f83bcecbSRichard Henderson * Verify that we have passed the correct MemOp to the correct function. 206f83bcecbSRichard Henderson * 207f83bcecbSRichard Henderson * We could present one function to target code, and dispatch based on 208f83bcecbSRichard Henderson * the MemOp, but so far we have worked hard to avoid an indirect function 209f83bcecbSRichard Henderson * call along the memory path. 210f83bcecbSRichard Henderson */ 211f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected) 212ed4cfbcdSRichard Henderson { 213f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG 214f83bcecbSRichard Henderson MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 215f83bcecbSRichard Henderson assert(have == expected); 216f83bcecbSRichard Henderson #endif 217f83bcecbSRichard Henderson } 218ed4cfbcdSRichard Henderson 219*37e891e3SRichard Henderson void helper_unaligned_ld(CPUArchState *env, target_ulong addr) 220*37e891e3SRichard Henderson { 221*37e891e3SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); 222*37e891e3SRichard Henderson } 223*37e891e3SRichard Henderson 224*37e891e3SRichard Henderson void helper_unaligned_st(CPUArchState *env, target_ulong addr) 225*37e891e3SRichard Henderson { 226*37e891e3SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); 227*37e891e3SRichard Henderson } 228*37e891e3SRichard Henderson 229f83bcecbSRichard Henderson static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, 230f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra, MMUAccessType type) 231f83bcecbSRichard Henderson { 2329395cd0aSRichard Henderson MemOp mop = get_memop(oi); 2339395cd0aSRichard Henderson int a_bits = get_alignment_bits(mop); 234f83bcecbSRichard Henderson void *ret; 235f83bcecbSRichard Henderson 2369395cd0aSRichard Henderson /* Enforce guest required alignment. */ 2379395cd0aSRichard Henderson if (unlikely(addr & ((1 << a_bits) - 1))) { 2389395cd0aSRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); 2399395cd0aSRichard Henderson } 240f83bcecbSRichard Henderson 241f83bcecbSRichard Henderson ret = g2h(env_cpu(env), addr); 242f83bcecbSRichard Henderson set_helper_retaddr(ra); 243ed4cfbcdSRichard Henderson return ret; 244ed4cfbcdSRichard Henderson } 245ed4cfbcdSRichard Henderson 246f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, 247f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 248ed4cfbcdSRichard Henderson { 249f83bcecbSRichard Henderson void *haddr; 250f83bcecbSRichard Henderson uint8_t ret; 251ed4cfbcdSRichard Henderson 252f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 253f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 254f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 255f83bcecbSRichard Henderson ret = ldub_p(haddr); 256f83bcecbSRichard Henderson clear_helper_retaddr(); 257f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 258ed4cfbcdSRichard Henderson return ret; 259ed4cfbcdSRichard Henderson } 260ed4cfbcdSRichard Henderson 261f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 262f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 263ed4cfbcdSRichard Henderson { 264f83bcecbSRichard Henderson void *haddr; 265f83bcecbSRichard Henderson uint16_t ret; 266ed4cfbcdSRichard Henderson 267f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 268f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 269f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 270f83bcecbSRichard Henderson ret = lduw_be_p(haddr); 271f83bcecbSRichard Henderson clear_helper_retaddr(); 272f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 273ed4cfbcdSRichard Henderson return ret; 274ed4cfbcdSRichard Henderson } 275ed4cfbcdSRichard Henderson 276f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 277f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 278ed4cfbcdSRichard Henderson { 279f83bcecbSRichard Henderson void *haddr; 280f83bcecbSRichard Henderson uint32_t ret; 281f83bcecbSRichard Henderson 282f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 283f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 284f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 285f83bcecbSRichard Henderson ret = ldl_be_p(haddr); 286f83bcecbSRichard Henderson clear_helper_retaddr(); 287f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 288f83bcecbSRichard Henderson return ret; 289f83bcecbSRichard Henderson } 290f83bcecbSRichard Henderson 291f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 292f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 293f83bcecbSRichard Henderson { 294f83bcecbSRichard Henderson void *haddr; 295ed4cfbcdSRichard Henderson uint64_t ret; 296ed4cfbcdSRichard Henderson 297f83bcecbSRichard Henderson validate_memop(oi, MO_BEQ); 298f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 299f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 300f83bcecbSRichard Henderson ret = ldq_be_p(haddr); 301f83bcecbSRichard Henderson clear_helper_retaddr(); 302f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 303b9e60257SRichard Henderson return ret; 304b9e60257SRichard Henderson } 305b9e60257SRichard Henderson 306f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 307f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 308b9e60257SRichard Henderson { 309f83bcecbSRichard Henderson void *haddr; 310f83bcecbSRichard Henderson uint16_t ret; 311f83bcecbSRichard Henderson 312f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 313f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 314f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 315f83bcecbSRichard Henderson ret = lduw_le_p(haddr); 316f83bcecbSRichard Henderson clear_helper_retaddr(); 317f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 318f83bcecbSRichard Henderson return ret; 319f83bcecbSRichard Henderson } 320f83bcecbSRichard Henderson 321f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 322f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 323f83bcecbSRichard Henderson { 324f83bcecbSRichard Henderson void *haddr; 325b9e60257SRichard Henderson uint32_t ret; 326b9e60257SRichard Henderson 327f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 328f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 329f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 330f83bcecbSRichard Henderson ret = ldl_le_p(haddr); 331f83bcecbSRichard Henderson clear_helper_retaddr(); 332f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 333b9e60257SRichard Henderson return ret; 334b9e60257SRichard Henderson } 335b9e60257SRichard Henderson 336f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 337f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 338b9e60257SRichard Henderson { 339f83bcecbSRichard Henderson void *haddr; 340b9e60257SRichard Henderson uint64_t ret; 341b9e60257SRichard Henderson 342f83bcecbSRichard Henderson validate_memop(oi, MO_LEQ); 343f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 344f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 345f83bcecbSRichard Henderson ret = ldq_le_p(haddr); 346f83bcecbSRichard Henderson clear_helper_retaddr(); 347f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 348ed4cfbcdSRichard Henderson return ret; 349ed4cfbcdSRichard Henderson } 350ed4cfbcdSRichard Henderson 351f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 352f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 353ed4cfbcdSRichard Henderson { 354f83bcecbSRichard Henderson void *haddr; 355ed4cfbcdSRichard Henderson 356f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 357f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 358f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 359f83bcecbSRichard Henderson stb_p(haddr, val); 360ed4cfbcdSRichard Henderson clear_helper_retaddr(); 361f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 362ed4cfbcdSRichard Henderson } 363ed4cfbcdSRichard Henderson 364f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 365f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 366ed4cfbcdSRichard Henderson { 367f83bcecbSRichard Henderson void *haddr; 368ed4cfbcdSRichard Henderson 369f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 370f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 371f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 372f83bcecbSRichard Henderson stw_be_p(haddr, val); 373ed4cfbcdSRichard Henderson clear_helper_retaddr(); 374f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 375ed4cfbcdSRichard Henderson } 376ed4cfbcdSRichard Henderson 377f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 378f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 379ed4cfbcdSRichard Henderson { 380f83bcecbSRichard Henderson void *haddr; 381ed4cfbcdSRichard Henderson 382f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 383f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 384f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 385f83bcecbSRichard Henderson stl_be_p(haddr, val); 386ed4cfbcdSRichard Henderson clear_helper_retaddr(); 387f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 388ed4cfbcdSRichard Henderson } 389ed4cfbcdSRichard Henderson 390f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 391f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 392ed4cfbcdSRichard Henderson { 393f83bcecbSRichard Henderson void *haddr; 394ed4cfbcdSRichard Henderson 395f83bcecbSRichard Henderson validate_memop(oi, MO_BEQ); 396f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 397f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 398f83bcecbSRichard Henderson stq_be_p(haddr, val); 399b9e60257SRichard Henderson clear_helper_retaddr(); 400f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 401b9e60257SRichard Henderson } 402b9e60257SRichard Henderson 403f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 404f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 405b9e60257SRichard Henderson { 406f83bcecbSRichard Henderson void *haddr; 407b9e60257SRichard Henderson 408f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 409f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 410f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 411f83bcecbSRichard Henderson stw_le_p(haddr, val); 412b9e60257SRichard Henderson clear_helper_retaddr(); 413f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 414b9e60257SRichard Henderson } 415b9e60257SRichard Henderson 416f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 417f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 418b9e60257SRichard Henderson { 419f83bcecbSRichard Henderson void *haddr; 420b9e60257SRichard Henderson 421f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 422f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 423f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 424f83bcecbSRichard Henderson stl_le_p(haddr, val); 425b9e60257SRichard Henderson clear_helper_retaddr(); 426f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 427b9e60257SRichard Henderson } 428b9e60257SRichard Henderson 429f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 430f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 431b9e60257SRichard Henderson { 432f83bcecbSRichard Henderson void *haddr; 433b9e60257SRichard Henderson 434f83bcecbSRichard Henderson validate_memop(oi, MO_LEQ); 435f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 436f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 437f83bcecbSRichard Henderson stq_le_p(haddr, val); 438ed4cfbcdSRichard Henderson clear_helper_retaddr(); 439f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 440ed4cfbcdSRichard Henderson } 441ed4cfbcdSRichard Henderson 442ed4cfbcdSRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) 443ed4cfbcdSRichard Henderson { 444ed4cfbcdSRichard Henderson uint32_t ret; 445ed4cfbcdSRichard Henderson 446ed4cfbcdSRichard Henderson set_helper_retaddr(1); 4473e8f1628SRichard Henderson ret = ldub_p(g2h_untagged(ptr)); 448ed4cfbcdSRichard Henderson clear_helper_retaddr(); 449ed4cfbcdSRichard Henderson return ret; 450ed4cfbcdSRichard Henderson } 451ed4cfbcdSRichard Henderson 452ed4cfbcdSRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) 453ed4cfbcdSRichard Henderson { 454ed4cfbcdSRichard Henderson uint32_t ret; 455ed4cfbcdSRichard Henderson 456ed4cfbcdSRichard Henderson set_helper_retaddr(1); 4573e8f1628SRichard Henderson ret = lduw_p(g2h_untagged(ptr)); 458ed4cfbcdSRichard Henderson clear_helper_retaddr(); 459ed4cfbcdSRichard Henderson return ret; 460ed4cfbcdSRichard Henderson } 461ed4cfbcdSRichard Henderson 462ed4cfbcdSRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) 463ed4cfbcdSRichard Henderson { 464ed4cfbcdSRichard Henderson uint32_t ret; 465ed4cfbcdSRichard Henderson 466ed4cfbcdSRichard Henderson set_helper_retaddr(1); 4673e8f1628SRichard Henderson ret = ldl_p(g2h_untagged(ptr)); 468ed4cfbcdSRichard Henderson clear_helper_retaddr(); 469ed4cfbcdSRichard Henderson return ret; 470ed4cfbcdSRichard Henderson } 471ed4cfbcdSRichard Henderson 472ed4cfbcdSRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) 473ed4cfbcdSRichard Henderson { 474ed4cfbcdSRichard Henderson uint64_t ret; 475ed4cfbcdSRichard Henderson 476ed4cfbcdSRichard Henderson set_helper_retaddr(1); 4773e8f1628SRichard Henderson ret = ldq_p(g2h_untagged(ptr)); 478ed4cfbcdSRichard Henderson clear_helper_retaddr(); 479ed4cfbcdSRichard Henderson return ret; 480ed4cfbcdSRichard Henderson } 481ed4cfbcdSRichard Henderson 482f83bcecbSRichard Henderson #include "ldst_common.c.inc" 483f83bcecbSRichard Henderson 484a754f7f3SRichard Henderson /* 485a754f7f3SRichard Henderson * Do not allow unaligned operations to proceed. Return the host address. 486a754f7f3SRichard Henderson * 487a754f7f3SRichard Henderson * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 488a754f7f3SRichard Henderson */ 489a411d296SPhilippe Mathieu-Daudé static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 4909002ffcbSRichard Henderson MemOpIdx oi, int size, int prot, 491a754f7f3SRichard Henderson uintptr_t retaddr) 492a411d296SPhilippe Mathieu-Daudé { 493fce3f474SRichard Henderson MemOp mop = get_memop(oi); 494fce3f474SRichard Henderson int a_bits = get_alignment_bits(mop); 495fce3f474SRichard Henderson void *ret; 496fce3f474SRichard Henderson 497fce3f474SRichard Henderson /* Enforce guest required alignment. */ 498fce3f474SRichard Henderson if (unlikely(addr & ((1 << a_bits) - 1))) { 499fce3f474SRichard Henderson MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; 500fce3f474SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); 501fce3f474SRichard Henderson } 502fce3f474SRichard Henderson 503a411d296SPhilippe Mathieu-Daudé /* Enforce qemu required alignment. */ 504a411d296SPhilippe Mathieu-Daudé if (unlikely(addr & (size - 1))) { 50529a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 506a411d296SPhilippe Mathieu-Daudé } 507fce3f474SRichard Henderson 508fce3f474SRichard Henderson ret = g2h(env_cpu(env), addr); 50908b97f7fSRichard Henderson set_helper_retaddr(retaddr); 51008b97f7fSRichard Henderson return ret; 511a411d296SPhilippe Mathieu-Daudé } 512a411d296SPhilippe Mathieu-Daudé 513be9568b4SRichard Henderson #include "atomic_common.c.inc" 514be9568b4SRichard Henderson 515be9568b4SRichard Henderson /* 516be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 517be9568b4SRichard Henderson * This makes them callable from other helpers. 518be9568b4SRichard Henderson */ 519be9568b4SRichard Henderson 520be9568b4SRichard Henderson #define ATOMIC_NAME(X) \ 521be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 52208b97f7fSRichard Henderson #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) 523504f73f7SAlex Bennée #define ATOMIC_MMU_IDX MMU_USER_IDX 524a411d296SPhilippe Mathieu-Daudé 525a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 1 526a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 527a411d296SPhilippe Mathieu-Daudé 528a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 2 529a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 530a411d296SPhilippe Mathieu-Daudé 531a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 4 532a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 533a411d296SPhilippe Mathieu-Daudé 534a411d296SPhilippe Mathieu-Daudé #ifdef CONFIG_ATOMIC64 535a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 8 536a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 537a411d296SPhilippe Mathieu-Daudé #endif 538a411d296SPhilippe Mathieu-Daudé 539e6cd4bb5SRichard Henderson #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 540be9568b4SRichard Henderson #define DATA_SIZE 16 541be9568b4SRichard Henderson #include "atomic_template.h" 542be9568b4SRichard Henderson #endif 543