xref: /qemu/accel/tcg/user-exec.c (revision f88f3ac90f9e98333abf91f23c8547a428cd90fa)
142a623c7SBlue Swirl /*
242a623c7SBlue Swirl  *  User emulator execution
342a623c7SBlue Swirl  *
442a623c7SBlue Swirl  *  Copyright (c) 2003-2005 Fabrice Bellard
542a623c7SBlue Swirl  *
642a623c7SBlue Swirl  * This library is free software; you can redistribute it and/or
742a623c7SBlue Swirl  * modify it under the terms of the GNU Lesser General Public
842a623c7SBlue Swirl  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
1042a623c7SBlue Swirl  *
1142a623c7SBlue Swirl  * This library is distributed in the hope that it will be useful,
1242a623c7SBlue Swirl  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1342a623c7SBlue Swirl  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1442a623c7SBlue Swirl  * Lesser General Public License for more details.
1542a623c7SBlue Swirl  *
1642a623c7SBlue Swirl  * You should have received a copy of the GNU Lesser General Public
1742a623c7SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1842a623c7SBlue Swirl  */
19d38ea87aSPeter Maydell #include "qemu/osdep.h"
2078271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
2176cad711SPaolo Bonzini #include "disas/disas.h"
2263c91552SPaolo Bonzini #include "exec/exec-all.h"
23dcb32f1dSPhilippe Mathieu-Daudé #include "tcg/tcg.h"
24023b0ae3SPeter Maydell #include "qemu/bitops.h"
25f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h"
263b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
27a411d296SPhilippe Mathieu-Daudé #include "exec/helper-proto.h"
28e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
29243af022SPaolo Bonzini #include "trace/trace-root.h"
3037e891e3SRichard Henderson #include "tcg/tcg-ldst.h"
310583f775SRichard Henderson #include "internal.h"
3242a623c7SBlue Swirl 
33ec603b55SRichard Henderson __thread uintptr_t helper_retaddr;
34ec603b55SRichard Henderson 
3542a623c7SBlue Swirl //#define DEBUG_SIGNAL
3642a623c7SBlue Swirl 
370fdbb7d2SRichard Henderson /*
380fdbb7d2SRichard Henderson  * Adjust the pc to pass to cpu_restore_state; return the memop type.
390fdbb7d2SRichard Henderson  */
400fdbb7d2SRichard Henderson MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
4142a623c7SBlue Swirl {
4252ba13f0SRichard Henderson     switch (helper_retaddr) {
4352ba13f0SRichard Henderson     default:
4452ba13f0SRichard Henderson         /*
4552ba13f0SRichard Henderson          * Fault during host memory operation within a helper function.
4652ba13f0SRichard Henderson          * The helper's host return address, saved here, gives us a
4752ba13f0SRichard Henderson          * pointer into the generated code that will unwind to the
4852ba13f0SRichard Henderson          * correct guest pc.
49ec603b55SRichard Henderson          */
500fdbb7d2SRichard Henderson         *pc = helper_retaddr;
5152ba13f0SRichard Henderson         break;
5252ba13f0SRichard Henderson 
5352ba13f0SRichard Henderson     case 0:
5452ba13f0SRichard Henderson         /*
5552ba13f0SRichard Henderson          * Fault during host memory operation within generated code.
5652ba13f0SRichard Henderson          * (Or, a unrelated bug within qemu, but we can't tell from here).
5752ba13f0SRichard Henderson          *
5852ba13f0SRichard Henderson          * We take the host pc from the signal frame.  However, we cannot
5952ba13f0SRichard Henderson          * use that value directly.  Within cpu_restore_state_from_tb, we
6052ba13f0SRichard Henderson          * assume PC comes from GETPC(), as used by the helper functions,
6152ba13f0SRichard Henderson          * so we adjust the address by -GETPC_ADJ to form an address that
62e3a6e0daSzhaolichang          * is within the call insn, so that the address does not accidentally
6352ba13f0SRichard Henderson          * match the beginning of the next guest insn.  However, when the
6452ba13f0SRichard Henderson          * pc comes from the signal frame it points to the actual faulting
6552ba13f0SRichard Henderson          * host memory insn and not the return from a call insn.
6652ba13f0SRichard Henderson          *
6752ba13f0SRichard Henderson          * Therefore, adjust to compensate for what will be done later
6852ba13f0SRichard Henderson          * by cpu_restore_state_from_tb.
6952ba13f0SRichard Henderson          */
700fdbb7d2SRichard Henderson         *pc += GETPC_ADJ;
7152ba13f0SRichard Henderson         break;
7252ba13f0SRichard Henderson 
7352ba13f0SRichard Henderson     case 1:
7452ba13f0SRichard Henderson         /*
7552ba13f0SRichard Henderson          * Fault during host read for translation, or loosely, "execution".
7652ba13f0SRichard Henderson          *
7752ba13f0SRichard Henderson          * The guest pc is already pointing to the start of the TB for which
7852ba13f0SRichard Henderson          * code is being generated.  If the guest translator manages the
7952ba13f0SRichard Henderson          * page crossings correctly, this is exactly the correct address
8052ba13f0SRichard Henderson          * (and if the translator doesn't handle page boundaries correctly
8152ba13f0SRichard Henderson          * there's little we can do about that here).  Therefore, do not
8252ba13f0SRichard Henderson          * trigger the unwinder.
8352ba13f0SRichard Henderson          */
840fdbb7d2SRichard Henderson         *pc = 0;
850fdbb7d2SRichard Henderson         return MMU_INST_FETCH;
86ec603b55SRichard Henderson     }
87ec603b55SRichard Henderson 
880fdbb7d2SRichard Henderson     return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
890fdbb7d2SRichard Henderson }
900fdbb7d2SRichard Henderson 
915e38ba7dSRichard Henderson /**
925e38ba7dSRichard Henderson  * handle_sigsegv_accerr_write:
935e38ba7dSRichard Henderson  * @cpu: the cpu context
945e38ba7dSRichard Henderson  * @old_set: the sigset_t from the signal ucontext_t
955e38ba7dSRichard Henderson  * @host_pc: the host pc, adjusted for the signal
965e38ba7dSRichard Henderson  * @guest_addr: the guest address of the fault
975e38ba7dSRichard Henderson  *
985e38ba7dSRichard Henderson  * Return true if the write fault has been handled, and should be re-tried.
995e38ba7dSRichard Henderson  *
1005e38ba7dSRichard Henderson  * Note that it is important that we don't call page_unprotect() unless
1019323e79fSPeter Maydell  * this is really a "write to nonwritable page" fault, because
1025e38ba7dSRichard Henderson  * page_unprotect() assumes that if it is called for an access to
1039323e79fSPeter Maydell  * a page that's writable this means we had two threads racing and
1049323e79fSPeter Maydell  * another thread got there first and already made the page writable;
1055e38ba7dSRichard Henderson  * so we will retry the access. If we were to call page_unprotect()
1065e38ba7dSRichard Henderson  * for some other kind of fault that should really be passed to the
1075e38ba7dSRichard Henderson  * guest, we'd end up in an infinite loop of retrying the faulting access.
1085e38ba7dSRichard Henderson  */
1095e38ba7dSRichard Henderson bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
1105e38ba7dSRichard Henderson                                  uintptr_t host_pc, abi_ptr guest_addr)
1115e38ba7dSRichard Henderson {
1125e38ba7dSRichard Henderson     switch (page_unprotect(guest_addr, host_pc)) {
1135e38ba7dSRichard Henderson     case 0:
1145e38ba7dSRichard Henderson         /*
1155e38ba7dSRichard Henderson          * Fault not caused by a page marked unwritable to protect
1165e38ba7dSRichard Henderson          * cached translations, must be the guest binary's problem.
1175e38ba7dSRichard Henderson          */
1185e38ba7dSRichard Henderson         return false;
1195e38ba7dSRichard Henderson     case 1:
1205e38ba7dSRichard Henderson         /*
1215e38ba7dSRichard Henderson          * Fault caused by protection of cached translation; TBs
1225e38ba7dSRichard Henderson          * invalidated, so resume execution.
1235e38ba7dSRichard Henderson          */
1245e38ba7dSRichard Henderson         return true;
1255e38ba7dSRichard Henderson     case 2:
1265e38ba7dSRichard Henderson         /*
1275e38ba7dSRichard Henderson          * Fault caused by protection of cached translation, and the
1285e38ba7dSRichard Henderson          * currently executing TB was modified and must be exited immediately.
1295e38ba7dSRichard Henderson          */
130940b3090SRichard Henderson         sigprocmask(SIG_SETMASK, old_set, NULL);
131940b3090SRichard Henderson         cpu_loop_exit_noexc(cpu);
1325e38ba7dSRichard Henderson         /* NORETURN */
1335e38ba7dSRichard Henderson     default:
1345e38ba7dSRichard Henderson         g_assert_not_reached();
1355e38ba7dSRichard Henderson     }
1365e38ba7dSRichard Henderson }
1375e38ba7dSRichard Henderson 
138069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr,
139069cfe77SRichard Henderson                                  int fault_size, MMUAccessType access_type,
140069cfe77SRichard Henderson                                  bool nonfault, uintptr_t ra)
14159e96ac6SDavid Hildenbrand {
14272d2bbf9SRichard Henderson     int acc_flag;
14372d2bbf9SRichard Henderson     bool maperr;
144c25c283dSDavid Hildenbrand 
145c25c283dSDavid Hildenbrand     switch (access_type) {
146c25c283dSDavid Hildenbrand     case MMU_DATA_STORE:
14772d2bbf9SRichard Henderson         acc_flag = PAGE_WRITE_ORG;
148c25c283dSDavid Hildenbrand         break;
149c25c283dSDavid Hildenbrand     case MMU_DATA_LOAD:
15072d2bbf9SRichard Henderson         acc_flag = PAGE_READ;
151c25c283dSDavid Hildenbrand         break;
152c25c283dSDavid Hildenbrand     case MMU_INST_FETCH:
15372d2bbf9SRichard Henderson         acc_flag = PAGE_EXEC;
154c25c283dSDavid Hildenbrand         break;
155c25c283dSDavid Hildenbrand     default:
156c25c283dSDavid Hildenbrand         g_assert_not_reached();
157c25c283dSDavid Hildenbrand     }
158c25c283dSDavid Hildenbrand 
15972d2bbf9SRichard Henderson     if (guest_addr_valid_untagged(addr)) {
16072d2bbf9SRichard Henderson         int page_flags = page_get_flags(addr);
16172d2bbf9SRichard Henderson         if (page_flags & acc_flag) {
16272d2bbf9SRichard Henderson             return 0; /* success */
16372d2bbf9SRichard Henderson         }
16472d2bbf9SRichard Henderson         maperr = !(page_flags & PAGE_VALID);
16572d2bbf9SRichard Henderson     } else {
16672d2bbf9SRichard Henderson         maperr = true;
16772d2bbf9SRichard Henderson     }
16872d2bbf9SRichard Henderson 
169069cfe77SRichard Henderson     if (nonfault) {
170069cfe77SRichard Henderson         return TLB_INVALID_MASK;
17159e96ac6SDavid Hildenbrand     }
17272d2bbf9SRichard Henderson 
17372d2bbf9SRichard Henderson     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
174069cfe77SRichard Henderson }
175069cfe77SRichard Henderson 
176069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr,
177069cfe77SRichard Henderson                        MMUAccessType access_type, int mmu_idx,
178069cfe77SRichard Henderson                        bool nonfault, void **phost, uintptr_t ra)
179069cfe77SRichard Henderson {
180069cfe77SRichard Henderson     int flags;
181069cfe77SRichard Henderson 
182069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
1833e8f1628SRichard Henderson     *phost = flags ? NULL : g2h(env_cpu(env), addr);
184069cfe77SRichard Henderson     return flags;
185069cfe77SRichard Henderson }
186069cfe77SRichard Henderson 
187069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size,
188069cfe77SRichard Henderson                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
189069cfe77SRichard Henderson {
190069cfe77SRichard Henderson     int flags;
191069cfe77SRichard Henderson 
192069cfe77SRichard Henderson     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
193069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, size, access_type, false, ra);
194069cfe77SRichard Henderson     g_assert(flags == 0);
195fef39ccdSDavid Hildenbrand 
1963e8f1628SRichard Henderson     return size ? g2h(env_cpu(env), addr) : NULL;
19759e96ac6SDavid Hildenbrand }
19859e96ac6SDavid Hildenbrand 
199cdf71308SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
200cdf71308SRichard Henderson                                         void **hostp)
201cdf71308SRichard Henderson {
202cdf71308SRichard Henderson     int flags;
203cdf71308SRichard Henderson 
204cdf71308SRichard Henderson     flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
205cdf71308SRichard Henderson     g_assert(flags == 0);
206cdf71308SRichard Henderson 
207cdf71308SRichard Henderson     if (hostp) {
208cdf71308SRichard Henderson         *hostp = g2h_untagged(addr);
209cdf71308SRichard Henderson     }
210cdf71308SRichard Henderson     return addr;
211cdf71308SRichard Henderson }
212cdf71308SRichard Henderson 
213*f88f3ac9SRichard Henderson #ifdef TARGET_PAGE_DATA_SIZE
214*f88f3ac9SRichard Henderson /*
215*f88f3ac9SRichard Henderson  * Allocate chunks of target data together.  For the only current user,
216*f88f3ac9SRichard Henderson  * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
217*f88f3ac9SRichard Henderson  * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
218*f88f3ac9SRichard Henderson  */
219*f88f3ac9SRichard Henderson #define TPD_PAGES  64
220*f88f3ac9SRichard Henderson #define TBD_MASK   (TARGET_PAGE_MASK * TPD_PAGES)
221*f88f3ac9SRichard Henderson 
222*f88f3ac9SRichard Henderson typedef struct TargetPageDataNode {
223*f88f3ac9SRichard Henderson     IntervalTreeNode itree;
224*f88f3ac9SRichard Henderson     char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
225*f88f3ac9SRichard Henderson } TargetPageDataNode;
226*f88f3ac9SRichard Henderson 
227*f88f3ac9SRichard Henderson static IntervalTreeRoot targetdata_root;
228*f88f3ac9SRichard Henderson 
2290fe61084SRichard Henderson void page_reset_target_data(target_ulong start, target_ulong end)
2300fe61084SRichard Henderson {
231*f88f3ac9SRichard Henderson     IntervalTreeNode *n, *next;
232*f88f3ac9SRichard Henderson     target_ulong last;
2330fe61084SRichard Henderson 
2340fe61084SRichard Henderson     assert_memory_lock();
2350fe61084SRichard Henderson 
2360fe61084SRichard Henderson     start = start & TARGET_PAGE_MASK;
237*f88f3ac9SRichard Henderson     last = TARGET_PAGE_ALIGN(end) - 1;
2380fe61084SRichard Henderson 
239*f88f3ac9SRichard Henderson     for (n = interval_tree_iter_first(&targetdata_root, start, last),
240*f88f3ac9SRichard Henderson          next = n ? interval_tree_iter_next(n, start, last) : NULL;
241*f88f3ac9SRichard Henderson          n != NULL;
242*f88f3ac9SRichard Henderson          n = next,
243*f88f3ac9SRichard Henderson          next = next ? interval_tree_iter_next(n, start, last) : NULL) {
244*f88f3ac9SRichard Henderson         target_ulong n_start, n_last, p_ofs, p_len;
245*f88f3ac9SRichard Henderson         TargetPageDataNode *t;
2460fe61084SRichard Henderson 
247*f88f3ac9SRichard Henderson         if (n->start >= start && n->last <= last) {
248*f88f3ac9SRichard Henderson             interval_tree_remove(n, &targetdata_root);
249*f88f3ac9SRichard Henderson             g_free(n);
250*f88f3ac9SRichard Henderson             continue;
2510fe61084SRichard Henderson         }
2520fe61084SRichard Henderson 
253*f88f3ac9SRichard Henderson         if (n->start < start) {
254*f88f3ac9SRichard Henderson             n_start = start;
255*f88f3ac9SRichard Henderson             p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
256*f88f3ac9SRichard Henderson         } else {
257*f88f3ac9SRichard Henderson             n_start = n->start;
258*f88f3ac9SRichard Henderson             p_ofs = 0;
259*f88f3ac9SRichard Henderson         }
260*f88f3ac9SRichard Henderson         n_last = MIN(last, n->last);
261*f88f3ac9SRichard Henderson         p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
262*f88f3ac9SRichard Henderson 
263*f88f3ac9SRichard Henderson         t = container_of(n, TargetPageDataNode, itree);
264*f88f3ac9SRichard Henderson         memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
265*f88f3ac9SRichard Henderson     }
266*f88f3ac9SRichard Henderson }
267*f88f3ac9SRichard Henderson 
2680fe61084SRichard Henderson void *page_get_target_data(target_ulong address)
2690fe61084SRichard Henderson {
270*f88f3ac9SRichard Henderson     IntervalTreeNode *n;
271*f88f3ac9SRichard Henderson     TargetPageDataNode *t;
272*f88f3ac9SRichard Henderson     target_ulong page, region;
2730fe61084SRichard Henderson 
274*f88f3ac9SRichard Henderson     page = address & TARGET_PAGE_MASK;
275*f88f3ac9SRichard Henderson     region = address & TBD_MASK;
276*f88f3ac9SRichard Henderson 
277*f88f3ac9SRichard Henderson     n = interval_tree_iter_first(&targetdata_root, page, page);
278*f88f3ac9SRichard Henderson     if (!n) {
279*f88f3ac9SRichard Henderson         /*
280*f88f3ac9SRichard Henderson          * See util/interval-tree.c re lockless lookups: no false positives
281*f88f3ac9SRichard Henderson          * but there are false negatives.  If we find nothing, retry with
282*f88f3ac9SRichard Henderson          * the mmap lock acquired.  We also need the lock for the
283*f88f3ac9SRichard Henderson          * allocation + insert.
284*f88f3ac9SRichard Henderson          */
285*f88f3ac9SRichard Henderson         mmap_lock();
286*f88f3ac9SRichard Henderson         n = interval_tree_iter_first(&targetdata_root, page, page);
287*f88f3ac9SRichard Henderson         if (!n) {
288*f88f3ac9SRichard Henderson             t = g_new0(TargetPageDataNode, 1);
289*f88f3ac9SRichard Henderson             n = &t->itree;
290*f88f3ac9SRichard Henderson             n->start = region;
291*f88f3ac9SRichard Henderson             n->last = region | ~TBD_MASK;
292*f88f3ac9SRichard Henderson             interval_tree_insert(n, &targetdata_root);
2930fe61084SRichard Henderson         }
294*f88f3ac9SRichard Henderson         mmap_unlock();
2950fe61084SRichard Henderson     }
296*f88f3ac9SRichard Henderson 
297*f88f3ac9SRichard Henderson     t = container_of(n, TargetPageDataNode, itree);
298*f88f3ac9SRichard Henderson     return t->data[(page - region) >> TARGET_PAGE_BITS];
299*f88f3ac9SRichard Henderson }
300*f88f3ac9SRichard Henderson #else
301*f88f3ac9SRichard Henderson void page_reset_target_data(target_ulong start, target_ulong end) { }
302*f88f3ac9SRichard Henderson #endif /* TARGET_PAGE_DATA_SIZE */
3030fe61084SRichard Henderson 
304a411d296SPhilippe Mathieu-Daudé /* The softmmu versions of these helpers are in cputlb.c.  */
305a411d296SPhilippe Mathieu-Daudé 
306f83bcecbSRichard Henderson /*
307f83bcecbSRichard Henderson  * Verify that we have passed the correct MemOp to the correct function.
308f83bcecbSRichard Henderson  *
309f83bcecbSRichard Henderson  * We could present one function to target code, and dispatch based on
310f83bcecbSRichard Henderson  * the MemOp, but so far we have worked hard to avoid an indirect function
311f83bcecbSRichard Henderson  * call along the memory path.
312f83bcecbSRichard Henderson  */
313f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected)
314ed4cfbcdSRichard Henderson {
315f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG
316f83bcecbSRichard Henderson     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
317f83bcecbSRichard Henderson     assert(have == expected);
318f83bcecbSRichard Henderson #endif
319f83bcecbSRichard Henderson }
320ed4cfbcdSRichard Henderson 
32137e891e3SRichard Henderson void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
32237e891e3SRichard Henderson {
32337e891e3SRichard Henderson     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
32437e891e3SRichard Henderson }
32537e891e3SRichard Henderson 
32637e891e3SRichard Henderson void helper_unaligned_st(CPUArchState *env, target_ulong addr)
32737e891e3SRichard Henderson {
32837e891e3SRichard Henderson     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
32937e891e3SRichard Henderson }
33037e891e3SRichard Henderson 
331f83bcecbSRichard Henderson static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
332f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t ra, MMUAccessType type)
333f83bcecbSRichard Henderson {
3349395cd0aSRichard Henderson     MemOp mop = get_memop(oi);
3359395cd0aSRichard Henderson     int a_bits = get_alignment_bits(mop);
336f83bcecbSRichard Henderson     void *ret;
337f83bcecbSRichard Henderson 
3389395cd0aSRichard Henderson     /* Enforce guest required alignment.  */
3399395cd0aSRichard Henderson     if (unlikely(addr & ((1 << a_bits) - 1))) {
3409395cd0aSRichard Henderson         cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
3419395cd0aSRichard Henderson     }
342f83bcecbSRichard Henderson 
343f83bcecbSRichard Henderson     ret = g2h(env_cpu(env), addr);
344f83bcecbSRichard Henderson     set_helper_retaddr(ra);
345ed4cfbcdSRichard Henderson     return ret;
346ed4cfbcdSRichard Henderson }
347ed4cfbcdSRichard Henderson 
348f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
349f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t ra)
350ed4cfbcdSRichard Henderson {
351f83bcecbSRichard Henderson     void *haddr;
352f83bcecbSRichard Henderson     uint8_t ret;
353ed4cfbcdSRichard Henderson 
354f83bcecbSRichard Henderson     validate_memop(oi, MO_UB);
355f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
356f83bcecbSRichard Henderson     ret = ldub_p(haddr);
357f83bcecbSRichard Henderson     clear_helper_retaddr();
358f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
359ed4cfbcdSRichard Henderson     return ret;
360ed4cfbcdSRichard Henderson }
361ed4cfbcdSRichard Henderson 
362f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
363f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
364ed4cfbcdSRichard Henderson {
365f83bcecbSRichard Henderson     void *haddr;
366f83bcecbSRichard Henderson     uint16_t ret;
367ed4cfbcdSRichard Henderson 
368f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUW);
369f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
370f83bcecbSRichard Henderson     ret = lduw_be_p(haddr);
371f83bcecbSRichard Henderson     clear_helper_retaddr();
372f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
373ed4cfbcdSRichard Henderson     return ret;
374ed4cfbcdSRichard Henderson }
375ed4cfbcdSRichard Henderson 
376f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
377f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
378ed4cfbcdSRichard Henderson {
379f83bcecbSRichard Henderson     void *haddr;
380f83bcecbSRichard Henderson     uint32_t ret;
381f83bcecbSRichard Henderson 
382f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUL);
383f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
384f83bcecbSRichard Henderson     ret = ldl_be_p(haddr);
385f83bcecbSRichard Henderson     clear_helper_retaddr();
386f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
387f83bcecbSRichard Henderson     return ret;
388f83bcecbSRichard Henderson }
389f83bcecbSRichard Henderson 
390f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
391f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
392f83bcecbSRichard Henderson {
393f83bcecbSRichard Henderson     void *haddr;
394ed4cfbcdSRichard Henderson     uint64_t ret;
395ed4cfbcdSRichard Henderson 
396fc313c64SFrédéric Pétrot     validate_memop(oi, MO_BEUQ);
397f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
398f83bcecbSRichard Henderson     ret = ldq_be_p(haddr);
399f83bcecbSRichard Henderson     clear_helper_retaddr();
400f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
401b9e60257SRichard Henderson     return ret;
402b9e60257SRichard Henderson }
403b9e60257SRichard Henderson 
404f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
405f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
406b9e60257SRichard Henderson {
407f83bcecbSRichard Henderson     void *haddr;
408f83bcecbSRichard Henderson     uint16_t ret;
409f83bcecbSRichard Henderson 
410f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUW);
411f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
412f83bcecbSRichard Henderson     ret = lduw_le_p(haddr);
413f83bcecbSRichard Henderson     clear_helper_retaddr();
414f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
415f83bcecbSRichard Henderson     return ret;
416f83bcecbSRichard Henderson }
417f83bcecbSRichard Henderson 
418f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
419f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
420f83bcecbSRichard Henderson {
421f83bcecbSRichard Henderson     void *haddr;
422b9e60257SRichard Henderson     uint32_t ret;
423b9e60257SRichard Henderson 
424f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUL);
425f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
426f83bcecbSRichard Henderson     ret = ldl_le_p(haddr);
427f83bcecbSRichard Henderson     clear_helper_retaddr();
428f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
429b9e60257SRichard Henderson     return ret;
430b9e60257SRichard Henderson }
431b9e60257SRichard Henderson 
432f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
433f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
434b9e60257SRichard Henderson {
435f83bcecbSRichard Henderson     void *haddr;
436b9e60257SRichard Henderson     uint64_t ret;
437b9e60257SRichard Henderson 
438fc313c64SFrédéric Pétrot     validate_memop(oi, MO_LEUQ);
439f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
440f83bcecbSRichard Henderson     ret = ldq_le_p(haddr);
441f83bcecbSRichard Henderson     clear_helper_retaddr();
442f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
443ed4cfbcdSRichard Henderson     return ret;
444ed4cfbcdSRichard Henderson }
445ed4cfbcdSRichard Henderson 
446f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
447f83bcecbSRichard Henderson                  MemOpIdx oi, uintptr_t ra)
448ed4cfbcdSRichard Henderson {
449f83bcecbSRichard Henderson     void *haddr;
450ed4cfbcdSRichard Henderson 
451f83bcecbSRichard Henderson     validate_memop(oi, MO_UB);
452f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
453f83bcecbSRichard Henderson     stb_p(haddr, val);
454ed4cfbcdSRichard Henderson     clear_helper_retaddr();
455f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
456ed4cfbcdSRichard Henderson }
457ed4cfbcdSRichard Henderson 
458f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
459f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t ra)
460ed4cfbcdSRichard Henderson {
461f83bcecbSRichard Henderson     void *haddr;
462ed4cfbcdSRichard Henderson 
463f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUW);
464f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
465f83bcecbSRichard Henderson     stw_be_p(haddr, val);
466ed4cfbcdSRichard Henderson     clear_helper_retaddr();
467f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
468ed4cfbcdSRichard Henderson }
469ed4cfbcdSRichard Henderson 
470f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
471f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t ra)
472ed4cfbcdSRichard Henderson {
473f83bcecbSRichard Henderson     void *haddr;
474ed4cfbcdSRichard Henderson 
475f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUL);
476f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
477f83bcecbSRichard Henderson     stl_be_p(haddr, val);
478ed4cfbcdSRichard Henderson     clear_helper_retaddr();
479f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
480ed4cfbcdSRichard Henderson }
481ed4cfbcdSRichard Henderson 
482f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
483f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t ra)
484ed4cfbcdSRichard Henderson {
485f83bcecbSRichard Henderson     void *haddr;
486ed4cfbcdSRichard Henderson 
487fc313c64SFrédéric Pétrot     validate_memop(oi, MO_BEUQ);
488f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
489f83bcecbSRichard Henderson     stq_be_p(haddr, val);
490b9e60257SRichard Henderson     clear_helper_retaddr();
491f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
492b9e60257SRichard Henderson }
493b9e60257SRichard Henderson 
494f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
495f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t ra)
496b9e60257SRichard Henderson {
497f83bcecbSRichard Henderson     void *haddr;
498b9e60257SRichard Henderson 
499f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUW);
500f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
501f83bcecbSRichard Henderson     stw_le_p(haddr, val);
502b9e60257SRichard Henderson     clear_helper_retaddr();
503f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
504b9e60257SRichard Henderson }
505b9e60257SRichard Henderson 
506f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
507f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t ra)
508b9e60257SRichard Henderson {
509f83bcecbSRichard Henderson     void *haddr;
510b9e60257SRichard Henderson 
511f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUL);
512f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
513f83bcecbSRichard Henderson     stl_le_p(haddr, val);
514b9e60257SRichard Henderson     clear_helper_retaddr();
515f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
516b9e60257SRichard Henderson }
517b9e60257SRichard Henderson 
518f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
519f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t ra)
520b9e60257SRichard Henderson {
521f83bcecbSRichard Henderson     void *haddr;
522b9e60257SRichard Henderson 
523fc313c64SFrédéric Pétrot     validate_memop(oi, MO_LEUQ);
524f83bcecbSRichard Henderson     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
525f83bcecbSRichard Henderson     stq_le_p(haddr, val);
526ed4cfbcdSRichard Henderson     clear_helper_retaddr();
527f83bcecbSRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
528ed4cfbcdSRichard Henderson }
529ed4cfbcdSRichard Henderson 
530ed4cfbcdSRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
531ed4cfbcdSRichard Henderson {
532ed4cfbcdSRichard Henderson     uint32_t ret;
533ed4cfbcdSRichard Henderson 
534ed4cfbcdSRichard Henderson     set_helper_retaddr(1);
5353e8f1628SRichard Henderson     ret = ldub_p(g2h_untagged(ptr));
536ed4cfbcdSRichard Henderson     clear_helper_retaddr();
537ed4cfbcdSRichard Henderson     return ret;
538ed4cfbcdSRichard Henderson }
539ed4cfbcdSRichard Henderson 
540ed4cfbcdSRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
541ed4cfbcdSRichard Henderson {
542ed4cfbcdSRichard Henderson     uint32_t ret;
543ed4cfbcdSRichard Henderson 
544ed4cfbcdSRichard Henderson     set_helper_retaddr(1);
5453e8f1628SRichard Henderson     ret = lduw_p(g2h_untagged(ptr));
546ed4cfbcdSRichard Henderson     clear_helper_retaddr();
547ed4cfbcdSRichard Henderson     return ret;
548ed4cfbcdSRichard Henderson }
549ed4cfbcdSRichard Henderson 
550ed4cfbcdSRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
551ed4cfbcdSRichard Henderson {
552ed4cfbcdSRichard Henderson     uint32_t ret;
553ed4cfbcdSRichard Henderson 
554ed4cfbcdSRichard Henderson     set_helper_retaddr(1);
5553e8f1628SRichard Henderson     ret = ldl_p(g2h_untagged(ptr));
556ed4cfbcdSRichard Henderson     clear_helper_retaddr();
557ed4cfbcdSRichard Henderson     return ret;
558ed4cfbcdSRichard Henderson }
559ed4cfbcdSRichard Henderson 
560ed4cfbcdSRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
561ed4cfbcdSRichard Henderson {
562ed4cfbcdSRichard Henderson     uint64_t ret;
563ed4cfbcdSRichard Henderson 
564ed4cfbcdSRichard Henderson     set_helper_retaddr(1);
5653e8f1628SRichard Henderson     ret = ldq_p(g2h_untagged(ptr));
566ed4cfbcdSRichard Henderson     clear_helper_retaddr();
567ed4cfbcdSRichard Henderson     return ret;
568ed4cfbcdSRichard Henderson }
569ed4cfbcdSRichard Henderson 
570f83bcecbSRichard Henderson #include "ldst_common.c.inc"
571f83bcecbSRichard Henderson 
572a754f7f3SRichard Henderson /*
573a754f7f3SRichard Henderson  * Do not allow unaligned operations to proceed.  Return the host address.
574a754f7f3SRichard Henderson  *
575a754f7f3SRichard Henderson  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
576a754f7f3SRichard Henderson  */
577a411d296SPhilippe Mathieu-Daudé static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
5789002ffcbSRichard Henderson                                MemOpIdx oi, int size, int prot,
579a754f7f3SRichard Henderson                                uintptr_t retaddr)
580a411d296SPhilippe Mathieu-Daudé {
581fce3f474SRichard Henderson     MemOp mop = get_memop(oi);
582fce3f474SRichard Henderson     int a_bits = get_alignment_bits(mop);
583fce3f474SRichard Henderson     void *ret;
584fce3f474SRichard Henderson 
585fce3f474SRichard Henderson     /* Enforce guest required alignment.  */
586fce3f474SRichard Henderson     if (unlikely(addr & ((1 << a_bits) - 1))) {
587fce3f474SRichard Henderson         MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
588fce3f474SRichard Henderson         cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
589fce3f474SRichard Henderson     }
590fce3f474SRichard Henderson 
591a411d296SPhilippe Mathieu-Daudé     /* Enforce qemu required alignment.  */
592a411d296SPhilippe Mathieu-Daudé     if (unlikely(addr & (size - 1))) {
59329a0af61SRichard Henderson         cpu_loop_exit_atomic(env_cpu(env), retaddr);
594a411d296SPhilippe Mathieu-Daudé     }
595fce3f474SRichard Henderson 
596fce3f474SRichard Henderson     ret = g2h(env_cpu(env), addr);
59708b97f7fSRichard Henderson     set_helper_retaddr(retaddr);
59808b97f7fSRichard Henderson     return ret;
599a411d296SPhilippe Mathieu-Daudé }
600a411d296SPhilippe Mathieu-Daudé 
601be9568b4SRichard Henderson #include "atomic_common.c.inc"
602be9568b4SRichard Henderson 
603be9568b4SRichard Henderson /*
604be9568b4SRichard Henderson  * First set of functions passes in OI and RETADDR.
605be9568b4SRichard Henderson  * This makes them callable from other helpers.
606be9568b4SRichard Henderson  */
607be9568b4SRichard Henderson 
608be9568b4SRichard Henderson #define ATOMIC_NAME(X) \
609be9568b4SRichard Henderson     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
61008b97f7fSRichard Henderson #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
611a411d296SPhilippe Mathieu-Daudé 
612a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 1
613a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h"
614a411d296SPhilippe Mathieu-Daudé 
615a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 2
616a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h"
617a411d296SPhilippe Mathieu-Daudé 
618a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 4
619a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h"
620a411d296SPhilippe Mathieu-Daudé 
621a411d296SPhilippe Mathieu-Daudé #ifdef CONFIG_ATOMIC64
622a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 8
623a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h"
624a411d296SPhilippe Mathieu-Daudé #endif
625a411d296SPhilippe Mathieu-Daudé 
626e6cd4bb5SRichard Henderson #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
627be9568b4SRichard Henderson #define DATA_SIZE 16
628be9568b4SRichard Henderson #include "atomic_template.h"
629be9568b4SRichard Henderson #endif
630