142a623c7SBlue Swirl /* 242a623c7SBlue Swirl * User emulator execution 342a623c7SBlue Swirl * 442a623c7SBlue Swirl * Copyright (c) 2003-2005 Fabrice Bellard 542a623c7SBlue Swirl * 642a623c7SBlue Swirl * This library is free software; you can redistribute it and/or 742a623c7SBlue Swirl * modify it under the terms of the GNU Lesser General Public 842a623c7SBlue Swirl * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 1042a623c7SBlue Swirl * 1142a623c7SBlue Swirl * This library is distributed in the hope that it will be useful, 1242a623c7SBlue Swirl * but WITHOUT ANY WARRANTY; without even the implied warranty of 1342a623c7SBlue Swirl * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1442a623c7SBlue Swirl * Lesser General Public License for more details. 1542a623c7SBlue Swirl * 1642a623c7SBlue Swirl * You should have received a copy of the GNU Lesser General Public 1742a623c7SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 1842a623c7SBlue Swirl */ 19d38ea87aSPeter Maydell #include "qemu/osdep.h" 2078271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 2176cad711SPaolo Bonzini #include "disas/disas.h" 2263c91552SPaolo Bonzini #include "exec/exec-all.h" 23dcb32f1dSPhilippe Mathieu-Daudé #include "tcg/tcg.h" 24023b0ae3SPeter Maydell #include "qemu/bitops.h" 25f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 263b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 27a411d296SPhilippe Mathieu-Daudé #include "exec/helper-proto.h" 28e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 29243af022SPaolo Bonzini #include "trace/trace-root.h" 300583f775SRichard Henderson #include "internal.h" 3142a623c7SBlue Swirl 32ec603b55SRichard Henderson __thread uintptr_t helper_retaddr; 33ec603b55SRichard Henderson 3442a623c7SBlue Swirl //#define DEBUG_SIGNAL 3542a623c7SBlue Swirl 360fdbb7d2SRichard Henderson /* 370fdbb7d2SRichard Henderson * Adjust the pc to pass to cpu_restore_state; return the memop type. 380fdbb7d2SRichard Henderson */ 390fdbb7d2SRichard Henderson MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) 4042a623c7SBlue Swirl { 4152ba13f0SRichard Henderson switch (helper_retaddr) { 4252ba13f0SRichard Henderson default: 4352ba13f0SRichard Henderson /* 4452ba13f0SRichard Henderson * Fault during host memory operation within a helper function. 4552ba13f0SRichard Henderson * The helper's host return address, saved here, gives us a 4652ba13f0SRichard Henderson * pointer into the generated code that will unwind to the 4752ba13f0SRichard Henderson * correct guest pc. 48ec603b55SRichard Henderson */ 490fdbb7d2SRichard Henderson *pc = helper_retaddr; 5052ba13f0SRichard Henderson break; 5152ba13f0SRichard Henderson 5252ba13f0SRichard Henderson case 0: 5352ba13f0SRichard Henderson /* 5452ba13f0SRichard Henderson * Fault during host memory operation within generated code. 5552ba13f0SRichard Henderson * (Or, a unrelated bug within qemu, but we can't tell from here). 5652ba13f0SRichard Henderson * 5752ba13f0SRichard Henderson * We take the host pc from the signal frame. However, we cannot 5852ba13f0SRichard Henderson * use that value directly. Within cpu_restore_state_from_tb, we 5952ba13f0SRichard Henderson * assume PC comes from GETPC(), as used by the helper functions, 6052ba13f0SRichard Henderson * so we adjust the address by -GETPC_ADJ to form an address that 61e3a6e0daSzhaolichang * is within the call insn, so that the address does not accidentally 6252ba13f0SRichard Henderson * match the beginning of the next guest insn. However, when the 6352ba13f0SRichard Henderson * pc comes from the signal frame it points to the actual faulting 6452ba13f0SRichard Henderson * host memory insn and not the return from a call insn. 6552ba13f0SRichard Henderson * 6652ba13f0SRichard Henderson * Therefore, adjust to compensate for what will be done later 6752ba13f0SRichard Henderson * by cpu_restore_state_from_tb. 6852ba13f0SRichard Henderson */ 690fdbb7d2SRichard Henderson *pc += GETPC_ADJ; 7052ba13f0SRichard Henderson break; 7152ba13f0SRichard Henderson 7252ba13f0SRichard Henderson case 1: 7352ba13f0SRichard Henderson /* 7452ba13f0SRichard Henderson * Fault during host read for translation, or loosely, "execution". 7552ba13f0SRichard Henderson * 7652ba13f0SRichard Henderson * The guest pc is already pointing to the start of the TB for which 7752ba13f0SRichard Henderson * code is being generated. If the guest translator manages the 7852ba13f0SRichard Henderson * page crossings correctly, this is exactly the correct address 7952ba13f0SRichard Henderson * (and if the translator doesn't handle page boundaries correctly 8052ba13f0SRichard Henderson * there's little we can do about that here). Therefore, do not 8152ba13f0SRichard Henderson * trigger the unwinder. 8252ba13f0SRichard Henderson * 8352ba13f0SRichard Henderson * Like tb_gen_code, release the memory lock before cpu_loop_exit. 8452ba13f0SRichard Henderson */ 8552ba13f0SRichard Henderson mmap_unlock(); 860fdbb7d2SRichard Henderson *pc = 0; 870fdbb7d2SRichard Henderson return MMU_INST_FETCH; 88ec603b55SRichard Henderson } 89ec603b55SRichard Henderson 900fdbb7d2SRichard Henderson return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; 910fdbb7d2SRichard Henderson } 920fdbb7d2SRichard Henderson 935e38ba7dSRichard Henderson /** 945e38ba7dSRichard Henderson * handle_sigsegv_accerr_write: 955e38ba7dSRichard Henderson * @cpu: the cpu context 965e38ba7dSRichard Henderson * @old_set: the sigset_t from the signal ucontext_t 975e38ba7dSRichard Henderson * @host_pc: the host pc, adjusted for the signal 985e38ba7dSRichard Henderson * @guest_addr: the guest address of the fault 995e38ba7dSRichard Henderson * 1005e38ba7dSRichard Henderson * Return true if the write fault has been handled, and should be re-tried. 1015e38ba7dSRichard Henderson * 1025e38ba7dSRichard Henderson * Note that it is important that we don't call page_unprotect() unless 1035e38ba7dSRichard Henderson * this is really a "write to nonwriteable page" fault, because 1045e38ba7dSRichard Henderson * page_unprotect() assumes that if it is called for an access to 1055e38ba7dSRichard Henderson * a page that's writeable this means we had two threads racing and 1065e38ba7dSRichard Henderson * another thread got there first and already made the page writeable; 1075e38ba7dSRichard Henderson * so we will retry the access. If we were to call page_unprotect() 1085e38ba7dSRichard Henderson * for some other kind of fault that should really be passed to the 1095e38ba7dSRichard Henderson * guest, we'd end up in an infinite loop of retrying the faulting access. 1105e38ba7dSRichard Henderson */ 1115e38ba7dSRichard Henderson bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 1125e38ba7dSRichard Henderson uintptr_t host_pc, abi_ptr guest_addr) 1135e38ba7dSRichard Henderson { 1145e38ba7dSRichard Henderson switch (page_unprotect(guest_addr, host_pc)) { 1155e38ba7dSRichard Henderson case 0: 1165e38ba7dSRichard Henderson /* 1175e38ba7dSRichard Henderson * Fault not caused by a page marked unwritable to protect 1185e38ba7dSRichard Henderson * cached translations, must be the guest binary's problem. 1195e38ba7dSRichard Henderson */ 1205e38ba7dSRichard Henderson return false; 1215e38ba7dSRichard Henderson case 1: 1225e38ba7dSRichard Henderson /* 1235e38ba7dSRichard Henderson * Fault caused by protection of cached translation; TBs 1245e38ba7dSRichard Henderson * invalidated, so resume execution. 1255e38ba7dSRichard Henderson */ 1265e38ba7dSRichard Henderson return true; 1275e38ba7dSRichard Henderson case 2: 1285e38ba7dSRichard Henderson /* 1295e38ba7dSRichard Henderson * Fault caused by protection of cached translation, and the 1305e38ba7dSRichard Henderson * currently executing TB was modified and must be exited immediately. 1315e38ba7dSRichard Henderson */ 132940b3090SRichard Henderson sigprocmask(SIG_SETMASK, old_set, NULL); 133940b3090SRichard Henderson cpu_loop_exit_noexc(cpu); 1345e38ba7dSRichard Henderson /* NORETURN */ 1355e38ba7dSRichard Henderson default: 1365e38ba7dSRichard Henderson g_assert_not_reached(); 1375e38ba7dSRichard Henderson } 1385e38ba7dSRichard Henderson } 1395e38ba7dSRichard Henderson 140069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 141069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 142069cfe77SRichard Henderson bool nonfault, uintptr_t ra) 14359e96ac6SDavid Hildenbrand { 144*72d2bbf9SRichard Henderson int acc_flag; 145*72d2bbf9SRichard Henderson bool maperr; 146c25c283dSDavid Hildenbrand 147c25c283dSDavid Hildenbrand switch (access_type) { 148c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 149*72d2bbf9SRichard Henderson acc_flag = PAGE_WRITE_ORG; 150c25c283dSDavid Hildenbrand break; 151c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 152*72d2bbf9SRichard Henderson acc_flag = PAGE_READ; 153c25c283dSDavid Hildenbrand break; 154c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 155*72d2bbf9SRichard Henderson acc_flag = PAGE_EXEC; 156c25c283dSDavid Hildenbrand break; 157c25c283dSDavid Hildenbrand default: 158c25c283dSDavid Hildenbrand g_assert_not_reached(); 159c25c283dSDavid Hildenbrand } 160c25c283dSDavid Hildenbrand 161*72d2bbf9SRichard Henderson if (guest_addr_valid_untagged(addr)) { 162*72d2bbf9SRichard Henderson int page_flags = page_get_flags(addr); 163*72d2bbf9SRichard Henderson if (page_flags & acc_flag) { 164*72d2bbf9SRichard Henderson return 0; /* success */ 165*72d2bbf9SRichard Henderson } 166*72d2bbf9SRichard Henderson maperr = !(page_flags & PAGE_VALID); 167*72d2bbf9SRichard Henderson } else { 168*72d2bbf9SRichard Henderson maperr = true; 169*72d2bbf9SRichard Henderson } 170*72d2bbf9SRichard Henderson 171069cfe77SRichard Henderson if (nonfault) { 172069cfe77SRichard Henderson return TLB_INVALID_MASK; 17359e96ac6SDavid Hildenbrand } 174*72d2bbf9SRichard Henderson 175*72d2bbf9SRichard Henderson cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); 176069cfe77SRichard Henderson } 177069cfe77SRichard Henderson 178069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 179069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 180069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t ra) 181069cfe77SRichard Henderson { 182069cfe77SRichard Henderson int flags; 183069cfe77SRichard Henderson 184069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra); 1853e8f1628SRichard Henderson *phost = flags ? NULL : g2h(env_cpu(env), addr); 186069cfe77SRichard Henderson return flags; 187069cfe77SRichard Henderson } 188069cfe77SRichard Henderson 189069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 190069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t ra) 191069cfe77SRichard Henderson { 192069cfe77SRichard Henderson int flags; 193069cfe77SRichard Henderson 194069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 195069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, false, ra); 196069cfe77SRichard Henderson g_assert(flags == 0); 197fef39ccdSDavid Hildenbrand 1983e8f1628SRichard Henderson return size ? g2h(env_cpu(env), addr) : NULL; 19959e96ac6SDavid Hildenbrand } 20059e96ac6SDavid Hildenbrand 201a411d296SPhilippe Mathieu-Daudé /* The softmmu versions of these helpers are in cputlb.c. */ 202a411d296SPhilippe Mathieu-Daudé 203f83bcecbSRichard Henderson /* 204f83bcecbSRichard Henderson * Verify that we have passed the correct MemOp to the correct function. 205f83bcecbSRichard Henderson * 206f83bcecbSRichard Henderson * We could present one function to target code, and dispatch based on 207f83bcecbSRichard Henderson * the MemOp, but so far we have worked hard to avoid an indirect function 208f83bcecbSRichard Henderson * call along the memory path. 209f83bcecbSRichard Henderson */ 210f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected) 211ed4cfbcdSRichard Henderson { 212f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG 213f83bcecbSRichard Henderson MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 214f83bcecbSRichard Henderson assert(have == expected); 215f83bcecbSRichard Henderson #endif 216f83bcecbSRichard Henderson } 217ed4cfbcdSRichard Henderson 218f83bcecbSRichard Henderson static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, 219f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra, MMUAccessType type) 220f83bcecbSRichard Henderson { 221f83bcecbSRichard Henderson void *ret; 222f83bcecbSRichard Henderson 223f83bcecbSRichard Henderson /* TODO: Enforce guest required alignment. */ 224f83bcecbSRichard Henderson 225f83bcecbSRichard Henderson ret = g2h(env_cpu(env), addr); 226f83bcecbSRichard Henderson set_helper_retaddr(ra); 227ed4cfbcdSRichard Henderson return ret; 228ed4cfbcdSRichard Henderson } 229ed4cfbcdSRichard Henderson 230f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, 231f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 232ed4cfbcdSRichard Henderson { 233f83bcecbSRichard Henderson void *haddr; 234f83bcecbSRichard Henderson uint8_t ret; 235ed4cfbcdSRichard Henderson 236f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 237f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 238f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 239f83bcecbSRichard Henderson ret = ldub_p(haddr); 240f83bcecbSRichard Henderson clear_helper_retaddr(); 241f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 242ed4cfbcdSRichard Henderson return ret; 243ed4cfbcdSRichard Henderson } 244ed4cfbcdSRichard Henderson 245f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 246f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 247ed4cfbcdSRichard Henderson { 248f83bcecbSRichard Henderson void *haddr; 249f83bcecbSRichard Henderson uint16_t ret; 250ed4cfbcdSRichard Henderson 251f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 252f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 253f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 254f83bcecbSRichard Henderson ret = lduw_be_p(haddr); 255f83bcecbSRichard Henderson clear_helper_retaddr(); 256f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 257ed4cfbcdSRichard Henderson return ret; 258ed4cfbcdSRichard Henderson } 259ed4cfbcdSRichard Henderson 260f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 261f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 262ed4cfbcdSRichard Henderson { 263f83bcecbSRichard Henderson void *haddr; 264f83bcecbSRichard Henderson uint32_t ret; 265f83bcecbSRichard Henderson 266f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 267f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 268f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 269f83bcecbSRichard Henderson ret = ldl_be_p(haddr); 270f83bcecbSRichard Henderson clear_helper_retaddr(); 271f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 272f83bcecbSRichard Henderson return ret; 273f83bcecbSRichard Henderson } 274f83bcecbSRichard Henderson 275f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 276f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 277f83bcecbSRichard Henderson { 278f83bcecbSRichard Henderson void *haddr; 279ed4cfbcdSRichard Henderson uint64_t ret; 280ed4cfbcdSRichard Henderson 281f83bcecbSRichard Henderson validate_memop(oi, MO_BEQ); 282f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 283f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 284f83bcecbSRichard Henderson ret = ldq_be_p(haddr); 285f83bcecbSRichard Henderson clear_helper_retaddr(); 286f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 287b9e60257SRichard Henderson return ret; 288b9e60257SRichard Henderson } 289b9e60257SRichard Henderson 290f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 291f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 292b9e60257SRichard Henderson { 293f83bcecbSRichard Henderson void *haddr; 294f83bcecbSRichard Henderson uint16_t ret; 295f83bcecbSRichard Henderson 296f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 297f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 298f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 299f83bcecbSRichard Henderson ret = lduw_le_p(haddr); 300f83bcecbSRichard Henderson clear_helper_retaddr(); 301f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 302f83bcecbSRichard Henderson return ret; 303f83bcecbSRichard Henderson } 304f83bcecbSRichard Henderson 305f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 306f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 307f83bcecbSRichard Henderson { 308f83bcecbSRichard Henderson void *haddr; 309b9e60257SRichard Henderson uint32_t ret; 310b9e60257SRichard Henderson 311f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 312f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 313f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 314f83bcecbSRichard Henderson ret = ldl_le_p(haddr); 315f83bcecbSRichard Henderson clear_helper_retaddr(); 316f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 317b9e60257SRichard Henderson return ret; 318b9e60257SRichard Henderson } 319b9e60257SRichard Henderson 320f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 321f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 322b9e60257SRichard Henderson { 323f83bcecbSRichard Henderson void *haddr; 324b9e60257SRichard Henderson uint64_t ret; 325b9e60257SRichard Henderson 326f83bcecbSRichard Henderson validate_memop(oi, MO_LEQ); 327f83bcecbSRichard Henderson trace_guest_ld_before_exec(env_cpu(env), addr, oi); 328f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 329f83bcecbSRichard Henderson ret = ldq_le_p(haddr); 330f83bcecbSRichard Henderson clear_helper_retaddr(); 331f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 332ed4cfbcdSRichard Henderson return ret; 333ed4cfbcdSRichard Henderson } 334ed4cfbcdSRichard Henderson 335f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 336f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 337ed4cfbcdSRichard Henderson { 338f83bcecbSRichard Henderson void *haddr; 339ed4cfbcdSRichard Henderson 340f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 341f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 342f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 343f83bcecbSRichard Henderson stb_p(haddr, val); 344ed4cfbcdSRichard Henderson clear_helper_retaddr(); 345f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 346ed4cfbcdSRichard Henderson } 347ed4cfbcdSRichard Henderson 348f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 349f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 350ed4cfbcdSRichard Henderson { 351f83bcecbSRichard Henderson void *haddr; 352ed4cfbcdSRichard Henderson 353f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 354f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 355f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 356f83bcecbSRichard Henderson stw_be_p(haddr, val); 357ed4cfbcdSRichard Henderson clear_helper_retaddr(); 358f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 359ed4cfbcdSRichard Henderson } 360ed4cfbcdSRichard Henderson 361f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 362f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 363ed4cfbcdSRichard Henderson { 364f83bcecbSRichard Henderson void *haddr; 365ed4cfbcdSRichard Henderson 366f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 367f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 368f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 369f83bcecbSRichard Henderson stl_be_p(haddr, val); 370ed4cfbcdSRichard Henderson clear_helper_retaddr(); 371f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 372ed4cfbcdSRichard Henderson } 373ed4cfbcdSRichard Henderson 374f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 375f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 376ed4cfbcdSRichard Henderson { 377f83bcecbSRichard Henderson void *haddr; 378ed4cfbcdSRichard Henderson 379f83bcecbSRichard Henderson validate_memop(oi, MO_BEQ); 380f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 381f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 382f83bcecbSRichard Henderson stq_be_p(haddr, val); 383b9e60257SRichard Henderson clear_helper_retaddr(); 384f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 385b9e60257SRichard Henderson } 386b9e60257SRichard Henderson 387f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 388f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 389b9e60257SRichard Henderson { 390f83bcecbSRichard Henderson void *haddr; 391b9e60257SRichard Henderson 392f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 393f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 394f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 395f83bcecbSRichard Henderson stw_le_p(haddr, val); 396b9e60257SRichard Henderson clear_helper_retaddr(); 397f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 398b9e60257SRichard Henderson } 399b9e60257SRichard Henderson 400f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 401f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 402b9e60257SRichard Henderson { 403f83bcecbSRichard Henderson void *haddr; 404b9e60257SRichard Henderson 405f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 406f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 407f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 408f83bcecbSRichard Henderson stl_le_p(haddr, val); 409b9e60257SRichard Henderson clear_helper_retaddr(); 410f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 411b9e60257SRichard Henderson } 412b9e60257SRichard Henderson 413f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 414f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 415b9e60257SRichard Henderson { 416f83bcecbSRichard Henderson void *haddr; 417b9e60257SRichard Henderson 418f83bcecbSRichard Henderson validate_memop(oi, MO_LEQ); 419f83bcecbSRichard Henderson trace_guest_st_before_exec(env_cpu(env), addr, oi); 420f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 421f83bcecbSRichard Henderson stq_le_p(haddr, val); 422ed4cfbcdSRichard Henderson clear_helper_retaddr(); 423f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 424ed4cfbcdSRichard Henderson } 425ed4cfbcdSRichard Henderson 426ed4cfbcdSRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) 427ed4cfbcdSRichard Henderson { 428ed4cfbcdSRichard Henderson uint32_t ret; 429ed4cfbcdSRichard Henderson 430ed4cfbcdSRichard Henderson set_helper_retaddr(1); 4313e8f1628SRichard Henderson ret = ldub_p(g2h_untagged(ptr)); 432ed4cfbcdSRichard Henderson clear_helper_retaddr(); 433ed4cfbcdSRichard Henderson return ret; 434ed4cfbcdSRichard Henderson } 435ed4cfbcdSRichard Henderson 436ed4cfbcdSRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) 437ed4cfbcdSRichard Henderson { 438ed4cfbcdSRichard Henderson uint32_t ret; 439ed4cfbcdSRichard Henderson 440ed4cfbcdSRichard Henderson set_helper_retaddr(1); 4413e8f1628SRichard Henderson ret = lduw_p(g2h_untagged(ptr)); 442ed4cfbcdSRichard Henderson clear_helper_retaddr(); 443ed4cfbcdSRichard Henderson return ret; 444ed4cfbcdSRichard Henderson } 445ed4cfbcdSRichard Henderson 446ed4cfbcdSRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) 447ed4cfbcdSRichard Henderson { 448ed4cfbcdSRichard Henderson uint32_t ret; 449ed4cfbcdSRichard Henderson 450ed4cfbcdSRichard Henderson set_helper_retaddr(1); 4513e8f1628SRichard Henderson ret = ldl_p(g2h_untagged(ptr)); 452ed4cfbcdSRichard Henderson clear_helper_retaddr(); 453ed4cfbcdSRichard Henderson return ret; 454ed4cfbcdSRichard Henderson } 455ed4cfbcdSRichard Henderson 456ed4cfbcdSRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) 457ed4cfbcdSRichard Henderson { 458ed4cfbcdSRichard Henderson uint64_t ret; 459ed4cfbcdSRichard Henderson 460ed4cfbcdSRichard Henderson set_helper_retaddr(1); 4613e8f1628SRichard Henderson ret = ldq_p(g2h_untagged(ptr)); 462ed4cfbcdSRichard Henderson clear_helper_retaddr(); 463ed4cfbcdSRichard Henderson return ret; 464ed4cfbcdSRichard Henderson } 465ed4cfbcdSRichard Henderson 466f83bcecbSRichard Henderson #include "ldst_common.c.inc" 467f83bcecbSRichard Henderson 468a754f7f3SRichard Henderson /* 469a754f7f3SRichard Henderson * Do not allow unaligned operations to proceed. Return the host address. 470a754f7f3SRichard Henderson * 471a754f7f3SRichard Henderson * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 472a754f7f3SRichard Henderson */ 473a411d296SPhilippe Mathieu-Daudé static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 4749002ffcbSRichard Henderson MemOpIdx oi, int size, int prot, 475a754f7f3SRichard Henderson uintptr_t retaddr) 476a411d296SPhilippe Mathieu-Daudé { 477a411d296SPhilippe Mathieu-Daudé /* Enforce qemu required alignment. */ 478a411d296SPhilippe Mathieu-Daudé if (unlikely(addr & (size - 1))) { 47929a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 480a411d296SPhilippe Mathieu-Daudé } 4813e8f1628SRichard Henderson void *ret = g2h(env_cpu(env), addr); 48208b97f7fSRichard Henderson set_helper_retaddr(retaddr); 48308b97f7fSRichard Henderson return ret; 484a411d296SPhilippe Mathieu-Daudé } 485a411d296SPhilippe Mathieu-Daudé 486be9568b4SRichard Henderson #include "atomic_common.c.inc" 487be9568b4SRichard Henderson 488be9568b4SRichard Henderson /* 489be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 490be9568b4SRichard Henderson * This makes them callable from other helpers. 491be9568b4SRichard Henderson */ 492be9568b4SRichard Henderson 493be9568b4SRichard Henderson #define ATOMIC_NAME(X) \ 494be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 49508b97f7fSRichard Henderson #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) 496504f73f7SAlex Bennée #define ATOMIC_MMU_IDX MMU_USER_IDX 497a411d296SPhilippe Mathieu-Daudé 498a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 1 499a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 500a411d296SPhilippe Mathieu-Daudé 501a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 2 502a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 503a411d296SPhilippe Mathieu-Daudé 504a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 4 505a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 506a411d296SPhilippe Mathieu-Daudé 507a411d296SPhilippe Mathieu-Daudé #ifdef CONFIG_ATOMIC64 508a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 8 509a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 510a411d296SPhilippe Mathieu-Daudé #endif 511a411d296SPhilippe Mathieu-Daudé 512e6cd4bb5SRichard Henderson #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 513be9568b4SRichard Henderson #define DATA_SIZE 16 514be9568b4SRichard Henderson #include "atomic_template.h" 515be9568b4SRichard Henderson #endif 516