xref: /qemu/accel/tcg/user-exec.c (revision 7893e42d5d9cafeeab30a114e8ec86517f8a1b36)
142a623c7SBlue Swirl /*
242a623c7SBlue Swirl  *  User emulator execution
342a623c7SBlue Swirl  *
442a623c7SBlue Swirl  *  Copyright (c) 2003-2005 Fabrice Bellard
542a623c7SBlue Swirl  *
642a623c7SBlue Swirl  * This library is free software; you can redistribute it and/or
742a623c7SBlue Swirl  * modify it under the terms of the GNU Lesser General Public
842a623c7SBlue Swirl  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
1042a623c7SBlue Swirl  *
1142a623c7SBlue Swirl  * This library is distributed in the hope that it will be useful,
1242a623c7SBlue Swirl  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1342a623c7SBlue Swirl  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1442a623c7SBlue Swirl  * Lesser General Public License for more details.
1542a623c7SBlue Swirl  *
1642a623c7SBlue Swirl  * You should have received a copy of the GNU Lesser General Public
1742a623c7SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1842a623c7SBlue Swirl  */
19d38ea87aSPeter Maydell #include "qemu/osdep.h"
2078271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
2176cad711SPaolo Bonzini #include "disas/disas.h"
2263c91552SPaolo Bonzini #include "exec/exec-all.h"
23dcb32f1dSPhilippe Mathieu-Daudé #include "tcg/tcg.h"
24023b0ae3SPeter Maydell #include "qemu/bitops.h"
25177a8cb8SRichard Henderson #include "qemu/rcu.h"
26f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h"
273b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
28a411d296SPhilippe Mathieu-Daudé #include "exec/helper-proto.h"
29e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
30243af022SPaolo Bonzini #include "trace/trace-root.h"
3137e891e3SRichard Henderson #include "tcg/tcg-ldst.h"
3243e7a2d3SPhilippe Mathieu-Daudé #include "internal-common.h"
334c268d6dSPhilippe Mathieu-Daudé #include "internal-target.h"
3442a623c7SBlue Swirl 
35ec603b55SRichard Henderson __thread uintptr_t helper_retaddr;
36ec603b55SRichard Henderson 
3742a623c7SBlue Swirl //#define DEBUG_SIGNAL
3842a623c7SBlue Swirl 
390fdbb7d2SRichard Henderson /*
400fdbb7d2SRichard Henderson  * Adjust the pc to pass to cpu_restore_state; return the memop type.
410fdbb7d2SRichard Henderson  */
420fdbb7d2SRichard Henderson MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
4342a623c7SBlue Swirl {
4452ba13f0SRichard Henderson     switch (helper_retaddr) {
4552ba13f0SRichard Henderson     default:
4652ba13f0SRichard Henderson         /*
4752ba13f0SRichard Henderson          * Fault during host memory operation within a helper function.
4852ba13f0SRichard Henderson          * The helper's host return address, saved here, gives us a
4952ba13f0SRichard Henderson          * pointer into the generated code that will unwind to the
5052ba13f0SRichard Henderson          * correct guest pc.
51ec603b55SRichard Henderson          */
520fdbb7d2SRichard Henderson         *pc = helper_retaddr;
5352ba13f0SRichard Henderson         break;
5452ba13f0SRichard Henderson 
5552ba13f0SRichard Henderson     case 0:
5652ba13f0SRichard Henderson         /*
5752ba13f0SRichard Henderson          * Fault during host memory operation within generated code.
5852ba13f0SRichard Henderson          * (Or, a unrelated bug within qemu, but we can't tell from here).
5952ba13f0SRichard Henderson          *
6052ba13f0SRichard Henderson          * We take the host pc from the signal frame.  However, we cannot
6152ba13f0SRichard Henderson          * use that value directly.  Within cpu_restore_state_from_tb, we
6252ba13f0SRichard Henderson          * assume PC comes from GETPC(), as used by the helper functions,
6352ba13f0SRichard Henderson          * so we adjust the address by -GETPC_ADJ to form an address that
64e3a6e0daSzhaolichang          * is within the call insn, so that the address does not accidentally
6552ba13f0SRichard Henderson          * match the beginning of the next guest insn.  However, when the
6652ba13f0SRichard Henderson          * pc comes from the signal frame it points to the actual faulting
6752ba13f0SRichard Henderson          * host memory insn and not the return from a call insn.
6852ba13f0SRichard Henderson          *
6952ba13f0SRichard Henderson          * Therefore, adjust to compensate for what will be done later
7052ba13f0SRichard Henderson          * by cpu_restore_state_from_tb.
7152ba13f0SRichard Henderson          */
720fdbb7d2SRichard Henderson         *pc += GETPC_ADJ;
7352ba13f0SRichard Henderson         break;
7452ba13f0SRichard Henderson 
7552ba13f0SRichard Henderson     case 1:
7652ba13f0SRichard Henderson         /*
7752ba13f0SRichard Henderson          * Fault during host read for translation, or loosely, "execution".
7852ba13f0SRichard Henderson          *
7952ba13f0SRichard Henderson          * The guest pc is already pointing to the start of the TB for which
8052ba13f0SRichard Henderson          * code is being generated.  If the guest translator manages the
8152ba13f0SRichard Henderson          * page crossings correctly, this is exactly the correct address
8252ba13f0SRichard Henderson          * (and if the translator doesn't handle page boundaries correctly
8352ba13f0SRichard Henderson          * there's little we can do about that here).  Therefore, do not
8452ba13f0SRichard Henderson          * trigger the unwinder.
8552ba13f0SRichard Henderson          */
860fdbb7d2SRichard Henderson         *pc = 0;
870fdbb7d2SRichard Henderson         return MMU_INST_FETCH;
88ec603b55SRichard Henderson     }
89ec603b55SRichard Henderson 
900fdbb7d2SRichard Henderson     return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
910fdbb7d2SRichard Henderson }
920fdbb7d2SRichard Henderson 
935e38ba7dSRichard Henderson /**
945e38ba7dSRichard Henderson  * handle_sigsegv_accerr_write:
955e38ba7dSRichard Henderson  * @cpu: the cpu context
965e38ba7dSRichard Henderson  * @old_set: the sigset_t from the signal ucontext_t
975e38ba7dSRichard Henderson  * @host_pc: the host pc, adjusted for the signal
985e38ba7dSRichard Henderson  * @guest_addr: the guest address of the fault
995e38ba7dSRichard Henderson  *
1005e38ba7dSRichard Henderson  * Return true if the write fault has been handled, and should be re-tried.
1015e38ba7dSRichard Henderson  *
1025e38ba7dSRichard Henderson  * Note that it is important that we don't call page_unprotect() unless
1039323e79fSPeter Maydell  * this is really a "write to nonwritable page" fault, because
1045e38ba7dSRichard Henderson  * page_unprotect() assumes that if it is called for an access to
1059323e79fSPeter Maydell  * a page that's writable this means we had two threads racing and
1069323e79fSPeter Maydell  * another thread got there first and already made the page writable;
1075e38ba7dSRichard Henderson  * so we will retry the access. If we were to call page_unprotect()
1085e38ba7dSRichard Henderson  * for some other kind of fault that should really be passed to the
1095e38ba7dSRichard Henderson  * guest, we'd end up in an infinite loop of retrying the faulting access.
1105e38ba7dSRichard Henderson  */
1115e38ba7dSRichard Henderson bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
1125e38ba7dSRichard Henderson                                  uintptr_t host_pc, abi_ptr guest_addr)
1135e38ba7dSRichard Henderson {
1145e38ba7dSRichard Henderson     switch (page_unprotect(guest_addr, host_pc)) {
1155e38ba7dSRichard Henderson     case 0:
1165e38ba7dSRichard Henderson         /*
1175e38ba7dSRichard Henderson          * Fault not caused by a page marked unwritable to protect
1185e38ba7dSRichard Henderson          * cached translations, must be the guest binary's problem.
1195e38ba7dSRichard Henderson          */
1205e38ba7dSRichard Henderson         return false;
1215e38ba7dSRichard Henderson     case 1:
1225e38ba7dSRichard Henderson         /*
1235e38ba7dSRichard Henderson          * Fault caused by protection of cached translation; TBs
1245e38ba7dSRichard Henderson          * invalidated, so resume execution.
1255e38ba7dSRichard Henderson          */
1265e38ba7dSRichard Henderson         return true;
1275e38ba7dSRichard Henderson     case 2:
1285e38ba7dSRichard Henderson         /*
1295e38ba7dSRichard Henderson          * Fault caused by protection of cached translation, and the
1305e38ba7dSRichard Henderson          * currently executing TB was modified and must be exited immediately.
1315e38ba7dSRichard Henderson          */
132940b3090SRichard Henderson         sigprocmask(SIG_SETMASK, old_set, NULL);
133940b3090SRichard Henderson         cpu_loop_exit_noexc(cpu);
1345e38ba7dSRichard Henderson         /* NORETURN */
1355e38ba7dSRichard Henderson     default:
1365e38ba7dSRichard Henderson         g_assert_not_reached();
1375e38ba7dSRichard Henderson     }
1385e38ba7dSRichard Henderson }
1395e38ba7dSRichard Henderson 
14067ff2186SRichard Henderson typedef struct PageFlagsNode {
141177a8cb8SRichard Henderson     struct rcu_head rcu;
14267ff2186SRichard Henderson     IntervalTreeNode itree;
14367ff2186SRichard Henderson     int flags;
14467ff2186SRichard Henderson } PageFlagsNode;
145d941c086SRichard Henderson 
14667ff2186SRichard Henderson static IntervalTreeRoot pageflags_root;
14767ff2186SRichard Henderson 
14832b12039SLuca Bonissi static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
149d941c086SRichard Henderson {
15067ff2186SRichard Henderson     IntervalTreeNode *n;
15167ff2186SRichard Henderson 
15267ff2186SRichard Henderson     n = interval_tree_iter_first(&pageflags_root, start, last);
15367ff2186SRichard Henderson     return n ? container_of(n, PageFlagsNode, itree) : NULL;
154d941c086SRichard Henderson }
155d941c086SRichard Henderson 
15667ff2186SRichard Henderson static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
15732b12039SLuca Bonissi                                      target_ulong last)
158d941c086SRichard Henderson {
15967ff2186SRichard Henderson     IntervalTreeNode *n;
160d941c086SRichard Henderson 
16167ff2186SRichard Henderson     n = interval_tree_iter_next(&p->itree, start, last);
16267ff2186SRichard Henderson     return n ? container_of(n, PageFlagsNode, itree) : NULL;
163d941c086SRichard Henderson }
164d941c086SRichard Henderson 
165d941c086SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
166d941c086SRichard Henderson {
16767ff2186SRichard Henderson     IntervalTreeNode *n;
16867ff2186SRichard Henderson     int rc = 0;
169d941c086SRichard Henderson 
17067ff2186SRichard Henderson     mmap_lock();
17167ff2186SRichard Henderson     for (n = interval_tree_iter_first(&pageflags_root, 0, -1);
17267ff2186SRichard Henderson          n != NULL;
17367ff2186SRichard Henderson          n = interval_tree_iter_next(n, 0, -1)) {
17467ff2186SRichard Henderson         PageFlagsNode *p = container_of(n, PageFlagsNode, itree);
175d941c086SRichard Henderson 
17667ff2186SRichard Henderson         rc = fn(priv, n->start, n->last + 1, p->flags);
177d941c086SRichard Henderson         if (rc != 0) {
17867ff2186SRichard Henderson             break;
179d941c086SRichard Henderson         }
180d941c086SRichard Henderson     }
18167ff2186SRichard Henderson     mmap_unlock();
182d941c086SRichard Henderson 
18367ff2186SRichard Henderson     return rc;
184d941c086SRichard Henderson }
185d941c086SRichard Henderson 
186d941c086SRichard Henderson static int dump_region(void *priv, target_ulong start,
187d941c086SRichard Henderson                        target_ulong end, unsigned long prot)
188d941c086SRichard Henderson {
189d941c086SRichard Henderson     FILE *f = (FILE *)priv;
190d941c086SRichard Henderson 
19167ff2186SRichard Henderson     fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
192d941c086SRichard Henderson             start, end, end - start,
193d941c086SRichard Henderson             ((prot & PAGE_READ) ? 'r' : '-'),
194d941c086SRichard Henderson             ((prot & PAGE_WRITE) ? 'w' : '-'),
195d941c086SRichard Henderson             ((prot & PAGE_EXEC) ? 'x' : '-'));
196d941c086SRichard Henderson     return 0;
197d941c086SRichard Henderson }
198d941c086SRichard Henderson 
199d941c086SRichard Henderson /* dump memory mappings */
200d941c086SRichard Henderson void page_dump(FILE *f)
201d941c086SRichard Henderson {
202d941c086SRichard Henderson     const int length = sizeof(target_ulong) * 2;
20367ff2186SRichard Henderson 
20467ff2186SRichard Henderson     fprintf(f, "%-*s %-*s %-*s %s\n",
205d941c086SRichard Henderson             length, "start", length, "end", length, "size", "prot");
206d941c086SRichard Henderson     walk_memory_regions(f, dump_region);
207d941c086SRichard Henderson }
208d941c086SRichard Henderson 
209d941c086SRichard Henderson int page_get_flags(target_ulong address)
210d941c086SRichard Henderson {
21167ff2186SRichard Henderson     PageFlagsNode *p = pageflags_find(address, address);
212d941c086SRichard Henderson 
21367ff2186SRichard Henderson     /*
21467ff2186SRichard Henderson      * See util/interval-tree.c re lockless lookups: no false positives but
21567ff2186SRichard Henderson      * there are false negatives.  If we find nothing, retry with the mmap
21667ff2186SRichard Henderson      * lock acquired.
21767ff2186SRichard Henderson      */
21867ff2186SRichard Henderson     if (p) {
21967ff2186SRichard Henderson         return p->flags;
22067ff2186SRichard Henderson     }
22167ff2186SRichard Henderson     if (have_mmap_lock()) {
222d941c086SRichard Henderson         return 0;
223d941c086SRichard Henderson     }
22467ff2186SRichard Henderson 
22567ff2186SRichard Henderson     mmap_lock();
22667ff2186SRichard Henderson     p = pageflags_find(address, address);
22767ff2186SRichard Henderson     mmap_unlock();
22867ff2186SRichard Henderson     return p ? p->flags : 0;
22967ff2186SRichard Henderson }
23067ff2186SRichard Henderson 
23167ff2186SRichard Henderson /* A subroutine of page_set_flags: insert a new node for [start,last]. */
23267ff2186SRichard Henderson static void pageflags_create(target_ulong start, target_ulong last, int flags)
23367ff2186SRichard Henderson {
23467ff2186SRichard Henderson     PageFlagsNode *p = g_new(PageFlagsNode, 1);
23567ff2186SRichard Henderson 
23667ff2186SRichard Henderson     p->itree.start = start;
23767ff2186SRichard Henderson     p->itree.last = last;
23867ff2186SRichard Henderson     p->flags = flags;
23967ff2186SRichard Henderson     interval_tree_insert(&p->itree, &pageflags_root);
24067ff2186SRichard Henderson }
24167ff2186SRichard Henderson 
24267ff2186SRichard Henderson /* A subroutine of page_set_flags: remove everything in [start,last]. */
24367ff2186SRichard Henderson static bool pageflags_unset(target_ulong start, target_ulong last)
24467ff2186SRichard Henderson {
24567ff2186SRichard Henderson     bool inval_tb = false;
24667ff2186SRichard Henderson 
24767ff2186SRichard Henderson     while (true) {
24867ff2186SRichard Henderson         PageFlagsNode *p = pageflags_find(start, last);
24967ff2186SRichard Henderson         target_ulong p_last;
25067ff2186SRichard Henderson 
25167ff2186SRichard Henderson         if (!p) {
25267ff2186SRichard Henderson             break;
25367ff2186SRichard Henderson         }
25467ff2186SRichard Henderson 
25567ff2186SRichard Henderson         if (p->flags & PAGE_EXEC) {
25667ff2186SRichard Henderson             inval_tb = true;
25767ff2186SRichard Henderson         }
25867ff2186SRichard Henderson 
25967ff2186SRichard Henderson         interval_tree_remove(&p->itree, &pageflags_root);
26067ff2186SRichard Henderson         p_last = p->itree.last;
26167ff2186SRichard Henderson 
26267ff2186SRichard Henderson         if (p->itree.start < start) {
26367ff2186SRichard Henderson             /* Truncate the node from the end, or split out the middle. */
26467ff2186SRichard Henderson             p->itree.last = start - 1;
26567ff2186SRichard Henderson             interval_tree_insert(&p->itree, &pageflags_root);
26667ff2186SRichard Henderson             if (last < p_last) {
26767ff2186SRichard Henderson                 pageflags_create(last + 1, p_last, p->flags);
26867ff2186SRichard Henderson                 break;
26967ff2186SRichard Henderson             }
27067ff2186SRichard Henderson         } else if (p_last <= last) {
27167ff2186SRichard Henderson             /* Range completely covers node -- remove it. */
272177a8cb8SRichard Henderson             g_free_rcu(p, rcu);
27367ff2186SRichard Henderson         } else {
27467ff2186SRichard Henderson             /* Truncate the node from the start. */
27567ff2186SRichard Henderson             p->itree.start = last + 1;
27667ff2186SRichard Henderson             interval_tree_insert(&p->itree, &pageflags_root);
27767ff2186SRichard Henderson             break;
27867ff2186SRichard Henderson         }
27967ff2186SRichard Henderson     }
28067ff2186SRichard Henderson 
28167ff2186SRichard Henderson     return inval_tb;
28267ff2186SRichard Henderson }
28367ff2186SRichard Henderson 
28467ff2186SRichard Henderson /*
28567ff2186SRichard Henderson  * A subroutine of page_set_flags: nothing overlaps [start,last],
28667ff2186SRichard Henderson  * but check adjacent mappings and maybe merge into a single range.
28767ff2186SRichard Henderson  */
28867ff2186SRichard Henderson static void pageflags_create_merge(target_ulong start, target_ulong last,
28967ff2186SRichard Henderson                                    int flags)
29067ff2186SRichard Henderson {
29167ff2186SRichard Henderson     PageFlagsNode *next = NULL, *prev = NULL;
29267ff2186SRichard Henderson 
29367ff2186SRichard Henderson     if (start > 0) {
29467ff2186SRichard Henderson         prev = pageflags_find(start - 1, start - 1);
29567ff2186SRichard Henderson         if (prev) {
29667ff2186SRichard Henderson             if (prev->flags == flags) {
29767ff2186SRichard Henderson                 interval_tree_remove(&prev->itree, &pageflags_root);
29867ff2186SRichard Henderson             } else {
29967ff2186SRichard Henderson                 prev = NULL;
30067ff2186SRichard Henderson             }
30167ff2186SRichard Henderson         }
30267ff2186SRichard Henderson     }
30367ff2186SRichard Henderson     if (last + 1 != 0) {
30467ff2186SRichard Henderson         next = pageflags_find(last + 1, last + 1);
30567ff2186SRichard Henderson         if (next) {
30667ff2186SRichard Henderson             if (next->flags == flags) {
30767ff2186SRichard Henderson                 interval_tree_remove(&next->itree, &pageflags_root);
30867ff2186SRichard Henderson             } else {
30967ff2186SRichard Henderson                 next = NULL;
31067ff2186SRichard Henderson             }
31167ff2186SRichard Henderson         }
31267ff2186SRichard Henderson     }
31367ff2186SRichard Henderson 
31467ff2186SRichard Henderson     if (prev) {
31567ff2186SRichard Henderson         if (next) {
31667ff2186SRichard Henderson             prev->itree.last = next->itree.last;
317177a8cb8SRichard Henderson             g_free_rcu(next, rcu);
31867ff2186SRichard Henderson         } else {
31967ff2186SRichard Henderson             prev->itree.last = last;
32067ff2186SRichard Henderson         }
32167ff2186SRichard Henderson         interval_tree_insert(&prev->itree, &pageflags_root);
32267ff2186SRichard Henderson     } else if (next) {
32367ff2186SRichard Henderson         next->itree.start = start;
32467ff2186SRichard Henderson         interval_tree_insert(&next->itree, &pageflags_root);
32567ff2186SRichard Henderson     } else {
32667ff2186SRichard Henderson         pageflags_create(start, last, flags);
32767ff2186SRichard Henderson     }
328d941c086SRichard Henderson }
329d941c086SRichard Henderson 
330d941c086SRichard Henderson /*
331d941c086SRichard Henderson  * Allow the target to decide if PAGE_TARGET_[12] may be reset.
332d941c086SRichard Henderson  * By default, they are not kept.
333d941c086SRichard Henderson  */
334d941c086SRichard Henderson #ifndef PAGE_TARGET_STICKY
335d941c086SRichard Henderson #define PAGE_TARGET_STICKY  0
336d941c086SRichard Henderson #endif
337d941c086SRichard Henderson #define PAGE_STICKY  (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
338d941c086SRichard Henderson 
33967ff2186SRichard Henderson /* A subroutine of page_set_flags: add flags to [start,last]. */
34067ff2186SRichard Henderson static bool pageflags_set_clear(target_ulong start, target_ulong last,
34167ff2186SRichard Henderson                                 int set_flags, int clear_flags)
34267ff2186SRichard Henderson {
34367ff2186SRichard Henderson     PageFlagsNode *p;
34467ff2186SRichard Henderson     target_ulong p_start, p_last;
34567ff2186SRichard Henderson     int p_flags, merge_flags;
34667ff2186SRichard Henderson     bool inval_tb = false;
34767ff2186SRichard Henderson 
34867ff2186SRichard Henderson  restart:
34967ff2186SRichard Henderson     p = pageflags_find(start, last);
35067ff2186SRichard Henderson     if (!p) {
35167ff2186SRichard Henderson         if (set_flags) {
35267ff2186SRichard Henderson             pageflags_create_merge(start, last, set_flags);
35367ff2186SRichard Henderson         }
35467ff2186SRichard Henderson         goto done;
35567ff2186SRichard Henderson     }
35667ff2186SRichard Henderson 
35767ff2186SRichard Henderson     p_start = p->itree.start;
35867ff2186SRichard Henderson     p_last = p->itree.last;
35967ff2186SRichard Henderson     p_flags = p->flags;
36067ff2186SRichard Henderson     /* Using mprotect on a page does not change sticky bits. */
36167ff2186SRichard Henderson     merge_flags = (p_flags & ~clear_flags) | set_flags;
36267ff2186SRichard Henderson 
36367ff2186SRichard Henderson     /*
36467ff2186SRichard Henderson      * Need to flush if an overlapping executable region
36567ff2186SRichard Henderson      * removes exec, or adds write.
36667ff2186SRichard Henderson      */
36767ff2186SRichard Henderson     if ((p_flags & PAGE_EXEC)
36867ff2186SRichard Henderson         && (!(merge_flags & PAGE_EXEC)
36967ff2186SRichard Henderson             || (merge_flags & ~p_flags & PAGE_WRITE))) {
37067ff2186SRichard Henderson         inval_tb = true;
37167ff2186SRichard Henderson     }
37267ff2186SRichard Henderson 
37367ff2186SRichard Henderson     /*
37467ff2186SRichard Henderson      * If there is an exact range match, update and return without
37567ff2186SRichard Henderson      * attempting to merge with adjacent regions.
37667ff2186SRichard Henderson      */
37767ff2186SRichard Henderson     if (start == p_start && last == p_last) {
37867ff2186SRichard Henderson         if (merge_flags) {
37967ff2186SRichard Henderson             p->flags = merge_flags;
38067ff2186SRichard Henderson         } else {
38167ff2186SRichard Henderson             interval_tree_remove(&p->itree, &pageflags_root);
382177a8cb8SRichard Henderson             g_free_rcu(p, rcu);
38367ff2186SRichard Henderson         }
38467ff2186SRichard Henderson         goto done;
38567ff2186SRichard Henderson     }
38667ff2186SRichard Henderson 
38767ff2186SRichard Henderson     /*
38867ff2186SRichard Henderson      * If sticky bits affect the original mapping, then we must be more
38967ff2186SRichard Henderson      * careful about the existing intervals and the separate flags.
39067ff2186SRichard Henderson      */
39167ff2186SRichard Henderson     if (set_flags != merge_flags) {
39267ff2186SRichard Henderson         if (p_start < start) {
39367ff2186SRichard Henderson             interval_tree_remove(&p->itree, &pageflags_root);
39467ff2186SRichard Henderson             p->itree.last = start - 1;
39567ff2186SRichard Henderson             interval_tree_insert(&p->itree, &pageflags_root);
39667ff2186SRichard Henderson 
39767ff2186SRichard Henderson             if (last < p_last) {
39867ff2186SRichard Henderson                 if (merge_flags) {
39967ff2186SRichard Henderson                     pageflags_create(start, last, merge_flags);
40067ff2186SRichard Henderson                 }
40167ff2186SRichard Henderson                 pageflags_create(last + 1, p_last, p_flags);
40267ff2186SRichard Henderson             } else {
40367ff2186SRichard Henderson                 if (merge_flags) {
40467ff2186SRichard Henderson                     pageflags_create(start, p_last, merge_flags);
40567ff2186SRichard Henderson                 }
40667ff2186SRichard Henderson                 if (p_last < last) {
40767ff2186SRichard Henderson                     start = p_last + 1;
40867ff2186SRichard Henderson                     goto restart;
40967ff2186SRichard Henderson                 }
41067ff2186SRichard Henderson             }
41167ff2186SRichard Henderson         } else {
41267ff2186SRichard Henderson             if (start < p_start && set_flags) {
41367ff2186SRichard Henderson                 pageflags_create(start, p_start - 1, set_flags);
41467ff2186SRichard Henderson             }
41567ff2186SRichard Henderson             if (last < p_last) {
41667ff2186SRichard Henderson                 interval_tree_remove(&p->itree, &pageflags_root);
41767ff2186SRichard Henderson                 p->itree.start = last + 1;
41867ff2186SRichard Henderson                 interval_tree_insert(&p->itree, &pageflags_root);
41967ff2186SRichard Henderson                 if (merge_flags) {
42067ff2186SRichard Henderson                     pageflags_create(start, last, merge_flags);
42167ff2186SRichard Henderson                 }
42267ff2186SRichard Henderson             } else {
42367ff2186SRichard Henderson                 if (merge_flags) {
42467ff2186SRichard Henderson                     p->flags = merge_flags;
42567ff2186SRichard Henderson                 } else {
42667ff2186SRichard Henderson                     interval_tree_remove(&p->itree, &pageflags_root);
427177a8cb8SRichard Henderson                     g_free_rcu(p, rcu);
42867ff2186SRichard Henderson                 }
42967ff2186SRichard Henderson                 if (p_last < last) {
43067ff2186SRichard Henderson                     start = p_last + 1;
43167ff2186SRichard Henderson                     goto restart;
43267ff2186SRichard Henderson                 }
43367ff2186SRichard Henderson             }
43467ff2186SRichard Henderson         }
43567ff2186SRichard Henderson         goto done;
43667ff2186SRichard Henderson     }
43767ff2186SRichard Henderson 
43867ff2186SRichard Henderson     /* If flags are not changing for this range, incorporate it. */
43967ff2186SRichard Henderson     if (set_flags == p_flags) {
44067ff2186SRichard Henderson         if (start < p_start) {
44167ff2186SRichard Henderson             interval_tree_remove(&p->itree, &pageflags_root);
44267ff2186SRichard Henderson             p->itree.start = start;
44367ff2186SRichard Henderson             interval_tree_insert(&p->itree, &pageflags_root);
44467ff2186SRichard Henderson         }
44567ff2186SRichard Henderson         if (p_last < last) {
44667ff2186SRichard Henderson             start = p_last + 1;
44767ff2186SRichard Henderson             goto restart;
44867ff2186SRichard Henderson         }
44967ff2186SRichard Henderson         goto done;
45067ff2186SRichard Henderson     }
45167ff2186SRichard Henderson 
45267ff2186SRichard Henderson     /* Maybe split out head and/or tail ranges with the original flags. */
45367ff2186SRichard Henderson     interval_tree_remove(&p->itree, &pageflags_root);
45467ff2186SRichard Henderson     if (p_start < start) {
45567ff2186SRichard Henderson         p->itree.last = start - 1;
45667ff2186SRichard Henderson         interval_tree_insert(&p->itree, &pageflags_root);
45767ff2186SRichard Henderson 
45867ff2186SRichard Henderson         if (p_last < last) {
45967ff2186SRichard Henderson             goto restart;
46067ff2186SRichard Henderson         }
46167ff2186SRichard Henderson         if (last < p_last) {
46267ff2186SRichard Henderson             pageflags_create(last + 1, p_last, p_flags);
46367ff2186SRichard Henderson         }
46467ff2186SRichard Henderson     } else if (last < p_last) {
46567ff2186SRichard Henderson         p->itree.start = last + 1;
46667ff2186SRichard Henderson         interval_tree_insert(&p->itree, &pageflags_root);
46767ff2186SRichard Henderson     } else {
468177a8cb8SRichard Henderson         g_free_rcu(p, rcu);
46967ff2186SRichard Henderson         goto restart;
47067ff2186SRichard Henderson     }
47167ff2186SRichard Henderson     if (set_flags) {
47267ff2186SRichard Henderson         pageflags_create(start, last, set_flags);
47367ff2186SRichard Henderson     }
47467ff2186SRichard Henderson 
47567ff2186SRichard Henderson  done:
47667ff2186SRichard Henderson     return inval_tb;
47767ff2186SRichard Henderson }
47867ff2186SRichard Henderson 
479d941c086SRichard Henderson /*
480d941c086SRichard Henderson  * Modify the flags of a page and invalidate the code if necessary.
481d941c086SRichard Henderson  * The flag PAGE_WRITE_ORG is positioned automatically depending
482d941c086SRichard Henderson  * on PAGE_WRITE.  The mmap_lock should already be held.
483d941c086SRichard Henderson  */
48449840a4aSRichard Henderson void page_set_flags(target_ulong start, target_ulong last, int flags)
485d941c086SRichard Henderson {
48667ff2186SRichard Henderson     bool reset = false;
48767ff2186SRichard Henderson     bool inval_tb = false;
488d941c086SRichard Henderson 
489d941c086SRichard Henderson     /* This function should never be called with addresses outside the
490d941c086SRichard Henderson        guest address space.  If this assert fires, it probably indicates
491d941c086SRichard Henderson        a missing call to h2g_valid.  */
49249840a4aSRichard Henderson     assert(start <= last);
49349840a4aSRichard Henderson     assert(last <= GUEST_ADDR_MAX);
494d941c086SRichard Henderson     /* Only set PAGE_ANON with new mappings. */
495d941c086SRichard Henderson     assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
496d941c086SRichard Henderson     assert_memory_lock();
497d941c086SRichard Henderson 
49849840a4aSRichard Henderson     start &= TARGET_PAGE_MASK;
49949840a4aSRichard Henderson     last |= ~TARGET_PAGE_MASK;
500d941c086SRichard Henderson 
50167ff2186SRichard Henderson     if (!(flags & PAGE_VALID)) {
50267ff2186SRichard Henderson         flags = 0;
50367ff2186SRichard Henderson     } else {
50467ff2186SRichard Henderson         reset = flags & PAGE_RESET;
50567ff2186SRichard Henderson         flags &= ~PAGE_RESET;
506d941c086SRichard Henderson         if (flags & PAGE_WRITE) {
507d941c086SRichard Henderson             flags |= PAGE_WRITE_ORG;
508d941c086SRichard Henderson         }
50967ff2186SRichard Henderson     }
51067ff2186SRichard Henderson 
51167ff2186SRichard Henderson     if (!flags || reset) {
51210310cbdSRichard Henderson         page_reset_target_data(start, last);
51367ff2186SRichard Henderson         inval_tb |= pageflags_unset(start, last);
514d941c086SRichard Henderson     }
51567ff2186SRichard Henderson     if (flags) {
51667ff2186SRichard Henderson         inval_tb |= pageflags_set_clear(start, last, flags,
51767ff2186SRichard Henderson                                         ~(reset ? 0 : PAGE_STICKY));
518d941c086SRichard Henderson     }
519d941c086SRichard Henderson     if (inval_tb) {
520e506ad6aSRichard Henderson         tb_invalidate_phys_range(start, last);
521d941c086SRichard Henderson     }
522d941c086SRichard Henderson }
523d941c086SRichard Henderson 
524bef6f008SRichard Henderson bool page_check_range(target_ulong start, target_ulong len, int flags)
525d941c086SRichard Henderson {
52667ff2186SRichard Henderson     target_ulong last;
527e630c012SRichard Henderson     int locked;  /* tri-state: =0: unlocked, +1: global, -1: local */
528bef6f008SRichard Henderson     bool ret;
529d941c086SRichard Henderson 
530d941c086SRichard Henderson     if (len == 0) {
531bef6f008SRichard Henderson         return true;  /* trivial length */
532d941c086SRichard Henderson     }
533d941c086SRichard Henderson 
53467ff2186SRichard Henderson     last = start + len - 1;
53567ff2186SRichard Henderson     if (last < start) {
536bef6f008SRichard Henderson         return false; /* wrap around */
53767ff2186SRichard Henderson     }
538d941c086SRichard Henderson 
539e630c012SRichard Henderson     locked = have_mmap_lock();
54067ff2186SRichard Henderson     while (true) {
54167ff2186SRichard Henderson         PageFlagsNode *p = pageflags_find(start, last);
54267ff2186SRichard Henderson         int missing;
54367ff2186SRichard Henderson 
544d941c086SRichard Henderson         if (!p) {
545e630c012SRichard Henderson             if (!locked) {
546e630c012SRichard Henderson                 /*
547e630c012SRichard Henderson                  * Lockless lookups have false negatives.
548e630c012SRichard Henderson                  * Retry with the lock held.
549e630c012SRichard Henderson                  */
550e630c012SRichard Henderson                 mmap_lock();
551e630c012SRichard Henderson                 locked = -1;
552e630c012SRichard Henderson                 p = pageflags_find(start, last);
553e630c012SRichard Henderson             }
554e630c012SRichard Henderson             if (!p) {
555bef6f008SRichard Henderson                 ret = false; /* entire region invalid */
556e630c012SRichard Henderson                 break;
557e630c012SRichard Henderson             }
558d941c086SRichard Henderson         }
55967ff2186SRichard Henderson         if (start < p->itree.start) {
560bef6f008SRichard Henderson             ret = false; /* initial bytes invalid */
561e630c012SRichard Henderson             break;
562d941c086SRichard Henderson         }
563d941c086SRichard Henderson 
56467ff2186SRichard Henderson         missing = flags & ~p->flags;
56591e9e116SRichard Henderson         if (missing & ~PAGE_WRITE) {
566bef6f008SRichard Henderson             ret = false; /* page doesn't match */
567e630c012SRichard Henderson             break;
568d941c086SRichard Henderson         }
56967ff2186SRichard Henderson         if (missing & PAGE_WRITE) {
570d941c086SRichard Henderson             if (!(p->flags & PAGE_WRITE_ORG)) {
571bef6f008SRichard Henderson                 ret = false; /* page not writable */
572e630c012SRichard Henderson                 break;
57367ff2186SRichard Henderson             }
57467ff2186SRichard Henderson             /* Asking about writable, but has been protected: undo. */
57567ff2186SRichard Henderson             if (!page_unprotect(start, 0)) {
576bef6f008SRichard Henderson                 ret = false;
577e630c012SRichard Henderson                 break;
578d941c086SRichard Henderson             }
57967ff2186SRichard Henderson             /* TODO: page_unprotect should take a range, not a single page. */
58067ff2186SRichard Henderson             if (last - start < TARGET_PAGE_SIZE) {
581bef6f008SRichard Henderson                 ret = true; /* ok */
582e630c012SRichard Henderson                 break;
583d941c086SRichard Henderson             }
58467ff2186SRichard Henderson             start += TARGET_PAGE_SIZE;
585d941c086SRichard Henderson             continue;
586d941c086SRichard Henderson         }
58767ff2186SRichard Henderson 
58867ff2186SRichard Henderson         if (last <= p->itree.last) {
589bef6f008SRichard Henderson             ret = true; /* ok */
590e630c012SRichard Henderson             break;
591d941c086SRichard Henderson         }
59267ff2186SRichard Henderson         start = p->itree.last + 1;
59367ff2186SRichard Henderson     }
594e630c012SRichard Henderson 
595e630c012SRichard Henderson     /* Release the lock if acquired locally. */
596e630c012SRichard Henderson     if (locked < 0) {
597e630c012SRichard Henderson         mmap_unlock();
598e630c012SRichard Henderson     }
599e630c012SRichard Henderson     return ret;
60067ff2186SRichard Henderson }
60167ff2186SRichard Henderson 
602c2281ddcSRichard Henderson bool page_check_range_empty(target_ulong start, target_ulong last)
603c2281ddcSRichard Henderson {
604c2281ddcSRichard Henderson     assert(last >= start);
605c2281ddcSRichard Henderson     assert_memory_lock();
606c2281ddcSRichard Henderson     return pageflags_find(start, last) == NULL;
607c2281ddcSRichard Henderson }
608c2281ddcSRichard Henderson 
609f2bb7cf2SRichard Henderson target_ulong page_find_range_empty(target_ulong min, target_ulong max,
610f2bb7cf2SRichard Henderson                                    target_ulong len, target_ulong align)
611f2bb7cf2SRichard Henderson {
612f2bb7cf2SRichard Henderson     target_ulong len_m1, align_m1;
613f2bb7cf2SRichard Henderson 
614f2bb7cf2SRichard Henderson     assert(min <= max);
615f2bb7cf2SRichard Henderson     assert(max <= GUEST_ADDR_MAX);
616f2bb7cf2SRichard Henderson     assert(len != 0);
617f2bb7cf2SRichard Henderson     assert(is_power_of_2(align));
618f2bb7cf2SRichard Henderson     assert_memory_lock();
619f2bb7cf2SRichard Henderson 
620f2bb7cf2SRichard Henderson     len_m1 = len - 1;
621f2bb7cf2SRichard Henderson     align_m1 = align - 1;
622f2bb7cf2SRichard Henderson 
623f2bb7cf2SRichard Henderson     /* Iteratively narrow the search region. */
624f2bb7cf2SRichard Henderson     while (1) {
625f2bb7cf2SRichard Henderson         PageFlagsNode *p;
626f2bb7cf2SRichard Henderson 
627f2bb7cf2SRichard Henderson         /* Align min and double-check there's enough space remaining. */
628f2bb7cf2SRichard Henderson         min = (min + align_m1) & ~align_m1;
629f2bb7cf2SRichard Henderson         if (min > max) {
630f2bb7cf2SRichard Henderson             return -1;
631f2bb7cf2SRichard Henderson         }
632f2bb7cf2SRichard Henderson         if (len_m1 > max - min) {
633f2bb7cf2SRichard Henderson             return -1;
634f2bb7cf2SRichard Henderson         }
635f2bb7cf2SRichard Henderson 
636f2bb7cf2SRichard Henderson         p = pageflags_find(min, min + len_m1);
637f2bb7cf2SRichard Henderson         if (p == NULL) {
638f2bb7cf2SRichard Henderson             /* Found! */
639f2bb7cf2SRichard Henderson             return min;
640f2bb7cf2SRichard Henderson         }
641f2bb7cf2SRichard Henderson         if (max <= p->itree.last) {
642f2bb7cf2SRichard Henderson             /* Existing allocation fills the remainder of the search region. */
643f2bb7cf2SRichard Henderson             return -1;
644f2bb7cf2SRichard Henderson         }
645f2bb7cf2SRichard Henderson         /* Skip across existing allocation. */
646f2bb7cf2SRichard Henderson         min = p->itree.last + 1;
647f2bb7cf2SRichard Henderson     }
648f2bb7cf2SRichard Henderson }
649f2bb7cf2SRichard Henderson 
65067ff2186SRichard Henderson void page_protect(tb_page_addr_t address)
65167ff2186SRichard Henderson {
65267ff2186SRichard Henderson     PageFlagsNode *p;
65367ff2186SRichard Henderson     target_ulong start, last;
65467ff2186SRichard Henderson     int prot;
65567ff2186SRichard Henderson 
65667ff2186SRichard Henderson     assert_memory_lock();
65767ff2186SRichard Henderson 
65867ff2186SRichard Henderson     if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
65967ff2186SRichard Henderson         start = address & TARGET_PAGE_MASK;
66067ff2186SRichard Henderson         last = start + TARGET_PAGE_SIZE - 1;
66167ff2186SRichard Henderson     } else {
66267ff2186SRichard Henderson         start = address & qemu_host_page_mask;
66367ff2186SRichard Henderson         last = start + qemu_host_page_size - 1;
66467ff2186SRichard Henderson     }
66567ff2186SRichard Henderson 
66667ff2186SRichard Henderson     p = pageflags_find(start, last);
66767ff2186SRichard Henderson     if (!p) {
66867ff2186SRichard Henderson         return;
66967ff2186SRichard Henderson     }
67067ff2186SRichard Henderson     prot = p->flags;
67167ff2186SRichard Henderson 
67267ff2186SRichard Henderson     if (unlikely(p->itree.last < last)) {
67367ff2186SRichard Henderson         /* More than one protection region covers the one host page. */
67467ff2186SRichard Henderson         assert(TARGET_PAGE_SIZE < qemu_host_page_size);
67567ff2186SRichard Henderson         while ((p = pageflags_next(p, start, last)) != NULL) {
67667ff2186SRichard Henderson             prot |= p->flags;
67767ff2186SRichard Henderson         }
67867ff2186SRichard Henderson     }
67967ff2186SRichard Henderson 
68067ff2186SRichard Henderson     if (prot & PAGE_WRITE) {
68167ff2186SRichard Henderson         pageflags_set_clear(start, last, 0, PAGE_WRITE);
68267ff2186SRichard Henderson         mprotect(g2h_untagged(start), qemu_host_page_size,
68367ff2186SRichard Henderson                  prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
684d941c086SRichard Henderson     }
685d941c086SRichard Henderson }
686d941c086SRichard Henderson 
687d941c086SRichard Henderson /*
688d941c086SRichard Henderson  * Called from signal handler: invalidate the code and unprotect the
689d941c086SRichard Henderson  * page. Return 0 if the fault was not handled, 1 if it was handled,
690d941c086SRichard Henderson  * and 2 if it was handled but the caller must cause the TB to be
691d941c086SRichard Henderson  * immediately exited. (We can only return 2 if the 'pc' argument is
692d941c086SRichard Henderson  * non-zero.)
693d941c086SRichard Henderson  */
694d941c086SRichard Henderson int page_unprotect(target_ulong address, uintptr_t pc)
695d941c086SRichard Henderson {
69667ff2186SRichard Henderson     PageFlagsNode *p;
697d941c086SRichard Henderson     bool current_tb_invalidated;
698d941c086SRichard Henderson 
699d941c086SRichard Henderson     /*
700d941c086SRichard Henderson      * Technically this isn't safe inside a signal handler.  However we
701d941c086SRichard Henderson      * know this only ever happens in a synchronous SEGV handler, so in
702d941c086SRichard Henderson      * practice it seems to be ok.
703d941c086SRichard Henderson      */
704d941c086SRichard Henderson     mmap_lock();
705d941c086SRichard Henderson 
70667ff2186SRichard Henderson     p = pageflags_find(address, address);
70767ff2186SRichard Henderson 
70867ff2186SRichard Henderson     /* If this address was not really writable, nothing to do. */
70967ff2186SRichard Henderson     if (!p || !(p->flags & PAGE_WRITE_ORG)) {
710d941c086SRichard Henderson         mmap_unlock();
711d941c086SRichard Henderson         return 0;
712d941c086SRichard Henderson     }
713d941c086SRichard Henderson 
714d941c086SRichard Henderson     current_tb_invalidated = false;
715d941c086SRichard Henderson     if (p->flags & PAGE_WRITE) {
716d941c086SRichard Henderson         /*
717d941c086SRichard Henderson          * If the page is actually marked WRITE then assume this is because
718d941c086SRichard Henderson          * this thread raced with another one which got here first and
719d941c086SRichard Henderson          * set the page to PAGE_WRITE and did the TB invalidate for us.
720d941c086SRichard Henderson          */
721d941c086SRichard Henderson #ifdef TARGET_HAS_PRECISE_SMC
722d941c086SRichard Henderson         TranslationBlock *current_tb = tcg_tb_lookup(pc);
723d941c086SRichard Henderson         if (current_tb) {
724d941c086SRichard Henderson             current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
725d941c086SRichard Henderson         }
726d941c086SRichard Henderson #endif
727d941c086SRichard Henderson     } else {
72867ff2186SRichard Henderson         target_ulong start, len, i;
72967ff2186SRichard Henderson         int prot;
730d941c086SRichard Henderson 
73167ff2186SRichard Henderson         if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
73267ff2186SRichard Henderson             start = address & TARGET_PAGE_MASK;
73367ff2186SRichard Henderson             len = TARGET_PAGE_SIZE;
73467ff2186SRichard Henderson             prot = p->flags | PAGE_WRITE;
73567ff2186SRichard Henderson             pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
73667ff2186SRichard Henderson             current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
73767ff2186SRichard Henderson         } else {
73867ff2186SRichard Henderson             start = address & qemu_host_page_mask;
73967ff2186SRichard Henderson             len = qemu_host_page_size;
740d941c086SRichard Henderson             prot = 0;
741d941c086SRichard Henderson 
74267ff2186SRichard Henderson             for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
74367ff2186SRichard Henderson                 target_ulong addr = start + i;
74467ff2186SRichard Henderson 
74567ff2186SRichard Henderson                 p = pageflags_find(addr, addr);
74667ff2186SRichard Henderson                 if (p) {
74767ff2186SRichard Henderson                     prot |= p->flags;
74867ff2186SRichard Henderson                     if (p->flags & PAGE_WRITE_ORG) {
74967ff2186SRichard Henderson                         prot |= PAGE_WRITE;
75067ff2186SRichard Henderson                         pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1,
75167ff2186SRichard Henderson                                             PAGE_WRITE, 0);
75267ff2186SRichard Henderson                     }
75367ff2186SRichard Henderson                 }
754d941c086SRichard Henderson                 /*
755d941c086SRichard Henderson                  * Since the content will be modified, we must invalidate
756d941c086SRichard Henderson                  * the corresponding translated code.
757d941c086SRichard Henderson                  */
758d941c086SRichard Henderson                 current_tb_invalidated |=
759d941c086SRichard Henderson                     tb_invalidate_phys_page_unwind(addr, pc);
760d941c086SRichard Henderson             }
76167ff2186SRichard Henderson         }
76267ff2186SRichard Henderson         if (prot & PAGE_EXEC) {
76367ff2186SRichard Henderson             prot = (prot & ~PAGE_EXEC) | PAGE_READ;
76467ff2186SRichard Henderson         }
76567ff2186SRichard Henderson         mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
766d941c086SRichard Henderson     }
767d941c086SRichard Henderson     mmap_unlock();
76867ff2186SRichard Henderson 
769d941c086SRichard Henderson     /* If current TB was invalidated return to main loop */
770d941c086SRichard Henderson     return current_tb_invalidated ? 2 : 1;
771d941c086SRichard Henderson }
772d941c086SRichard Henderson 
7734f8f4127SAnton Johansson static int probe_access_internal(CPUArchState *env, vaddr addr,
774069cfe77SRichard Henderson                                  int fault_size, MMUAccessType access_type,
775069cfe77SRichard Henderson                                  bool nonfault, uintptr_t ra)
77659e96ac6SDavid Hildenbrand {
77772d2bbf9SRichard Henderson     int acc_flag;
77872d2bbf9SRichard Henderson     bool maperr;
779c25c283dSDavid Hildenbrand 
780c25c283dSDavid Hildenbrand     switch (access_type) {
781c25c283dSDavid Hildenbrand     case MMU_DATA_STORE:
78272d2bbf9SRichard Henderson         acc_flag = PAGE_WRITE_ORG;
783c25c283dSDavid Hildenbrand         break;
784c25c283dSDavid Hildenbrand     case MMU_DATA_LOAD:
78572d2bbf9SRichard Henderson         acc_flag = PAGE_READ;
786c25c283dSDavid Hildenbrand         break;
787c25c283dSDavid Hildenbrand     case MMU_INST_FETCH:
78872d2bbf9SRichard Henderson         acc_flag = PAGE_EXEC;
789c25c283dSDavid Hildenbrand         break;
790c25c283dSDavid Hildenbrand     default:
791c25c283dSDavid Hildenbrand         g_assert_not_reached();
792c25c283dSDavid Hildenbrand     }
793c25c283dSDavid Hildenbrand 
79472d2bbf9SRichard Henderson     if (guest_addr_valid_untagged(addr)) {
79572d2bbf9SRichard Henderson         int page_flags = page_get_flags(addr);
79672d2bbf9SRichard Henderson         if (page_flags & acc_flag) {
7976d03226bSAlex Bennée             if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
7986d03226bSAlex Bennée                 && cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
7996d03226bSAlex Bennée                 return TLB_MMIO;
8006d03226bSAlex Bennée             }
80172d2bbf9SRichard Henderson             return 0; /* success */
80272d2bbf9SRichard Henderson         }
80372d2bbf9SRichard Henderson         maperr = !(page_flags & PAGE_VALID);
80472d2bbf9SRichard Henderson     } else {
80572d2bbf9SRichard Henderson         maperr = true;
80672d2bbf9SRichard Henderson     }
80772d2bbf9SRichard Henderson 
808069cfe77SRichard Henderson     if (nonfault) {
809069cfe77SRichard Henderson         return TLB_INVALID_MASK;
81059e96ac6SDavid Hildenbrand     }
81172d2bbf9SRichard Henderson 
81272d2bbf9SRichard Henderson     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
813069cfe77SRichard Henderson }
814069cfe77SRichard Henderson 
8154f8f4127SAnton Johansson int probe_access_flags(CPUArchState *env, vaddr addr, int size,
816069cfe77SRichard Henderson                        MMUAccessType access_type, int mmu_idx,
817069cfe77SRichard Henderson                        bool nonfault, void **phost, uintptr_t ra)
818069cfe77SRichard Henderson {
819069cfe77SRichard Henderson     int flags;
820069cfe77SRichard Henderson 
8211770b2f2SDaniel Henrique Barboza     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
8221770b2f2SDaniel Henrique Barboza     flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
8236d03226bSAlex Bennée     *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
824069cfe77SRichard Henderson     return flags;
825069cfe77SRichard Henderson }
826069cfe77SRichard Henderson 
8274f8f4127SAnton Johansson void *probe_access(CPUArchState *env, vaddr addr, int size,
828069cfe77SRichard Henderson                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
829069cfe77SRichard Henderson {
830069cfe77SRichard Henderson     int flags;
831069cfe77SRichard Henderson 
832069cfe77SRichard Henderson     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
833069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, size, access_type, false, ra);
8346d03226bSAlex Bennée     g_assert((flags & ~TLB_MMIO) == 0);
835fef39ccdSDavid Hildenbrand 
8363e8f1628SRichard Henderson     return size ? g2h(env_cpu(env), addr) : NULL;
83759e96ac6SDavid Hildenbrand }
83859e96ac6SDavid Hildenbrand 
8394f8f4127SAnton Johansson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
840cdf71308SRichard Henderson                                         void **hostp)
841cdf71308SRichard Henderson {
842cdf71308SRichard Henderson     int flags;
843cdf71308SRichard Henderson 
844cdf71308SRichard Henderson     flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
845cdf71308SRichard Henderson     g_assert(flags == 0);
846cdf71308SRichard Henderson 
847cdf71308SRichard Henderson     if (hostp) {
848cdf71308SRichard Henderson         *hostp = g2h_untagged(addr);
849cdf71308SRichard Henderson     }
850cdf71308SRichard Henderson     return addr;
851cdf71308SRichard Henderson }
852cdf71308SRichard Henderson 
853f88f3ac9SRichard Henderson #ifdef TARGET_PAGE_DATA_SIZE
854f88f3ac9SRichard Henderson /*
855f88f3ac9SRichard Henderson  * Allocate chunks of target data together.  For the only current user,
856f88f3ac9SRichard Henderson  * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
857f88f3ac9SRichard Henderson  * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
858f88f3ac9SRichard Henderson  */
859f88f3ac9SRichard Henderson #define TPD_PAGES  64
860f88f3ac9SRichard Henderson #define TBD_MASK   (TARGET_PAGE_MASK * TPD_PAGES)
861f88f3ac9SRichard Henderson 
862f88f3ac9SRichard Henderson typedef struct TargetPageDataNode {
863177a8cb8SRichard Henderson     struct rcu_head rcu;
864f88f3ac9SRichard Henderson     IntervalTreeNode itree;
865f88f3ac9SRichard Henderson     char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
866f88f3ac9SRichard Henderson } TargetPageDataNode;
867f88f3ac9SRichard Henderson 
868f88f3ac9SRichard Henderson static IntervalTreeRoot targetdata_root;
869f88f3ac9SRichard Henderson 
87010310cbdSRichard Henderson void page_reset_target_data(target_ulong start, target_ulong last)
8710fe61084SRichard Henderson {
872f88f3ac9SRichard Henderson     IntervalTreeNode *n, *next;
8730fe61084SRichard Henderson 
8740fe61084SRichard Henderson     assert_memory_lock();
8750fe61084SRichard Henderson 
87610310cbdSRichard Henderson     start &= TARGET_PAGE_MASK;
87710310cbdSRichard Henderson     last |= ~TARGET_PAGE_MASK;
8780fe61084SRichard Henderson 
879f88f3ac9SRichard Henderson     for (n = interval_tree_iter_first(&targetdata_root, start, last),
880f88f3ac9SRichard Henderson          next = n ? interval_tree_iter_next(n, start, last) : NULL;
881f88f3ac9SRichard Henderson          n != NULL;
882f88f3ac9SRichard Henderson          n = next,
883f88f3ac9SRichard Henderson          next = next ? interval_tree_iter_next(n, start, last) : NULL) {
884f88f3ac9SRichard Henderson         target_ulong n_start, n_last, p_ofs, p_len;
885177a8cb8SRichard Henderson         TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
8860fe61084SRichard Henderson 
887f88f3ac9SRichard Henderson         if (n->start >= start && n->last <= last) {
888f88f3ac9SRichard Henderson             interval_tree_remove(n, &targetdata_root);
889177a8cb8SRichard Henderson             g_free_rcu(t, rcu);
890f88f3ac9SRichard Henderson             continue;
8910fe61084SRichard Henderson         }
8920fe61084SRichard Henderson 
893f88f3ac9SRichard Henderson         if (n->start < start) {
894f88f3ac9SRichard Henderson             n_start = start;
895f88f3ac9SRichard Henderson             p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
896f88f3ac9SRichard Henderson         } else {
897f88f3ac9SRichard Henderson             n_start = n->start;
898f88f3ac9SRichard Henderson             p_ofs = 0;
899f88f3ac9SRichard Henderson         }
900f88f3ac9SRichard Henderson         n_last = MIN(last, n->last);
901f88f3ac9SRichard Henderson         p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
902f88f3ac9SRichard Henderson 
903f88f3ac9SRichard Henderson         memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
904f88f3ac9SRichard Henderson     }
905f88f3ac9SRichard Henderson }
906f88f3ac9SRichard Henderson 
9070fe61084SRichard Henderson void *page_get_target_data(target_ulong address)
9080fe61084SRichard Henderson {
909f88f3ac9SRichard Henderson     IntervalTreeNode *n;
910f88f3ac9SRichard Henderson     TargetPageDataNode *t;
911f88f3ac9SRichard Henderson     target_ulong page, region;
9120fe61084SRichard Henderson 
913f88f3ac9SRichard Henderson     page = address & TARGET_PAGE_MASK;
914f88f3ac9SRichard Henderson     region = address & TBD_MASK;
915f88f3ac9SRichard Henderson 
916f88f3ac9SRichard Henderson     n = interval_tree_iter_first(&targetdata_root, page, page);
917f88f3ac9SRichard Henderson     if (!n) {
918f88f3ac9SRichard Henderson         /*
919f88f3ac9SRichard Henderson          * See util/interval-tree.c re lockless lookups: no false positives
920f88f3ac9SRichard Henderson          * but there are false negatives.  If we find nothing, retry with
921f88f3ac9SRichard Henderson          * the mmap lock acquired.  We also need the lock for the
922f88f3ac9SRichard Henderson          * allocation + insert.
923f88f3ac9SRichard Henderson          */
924f88f3ac9SRichard Henderson         mmap_lock();
925f88f3ac9SRichard Henderson         n = interval_tree_iter_first(&targetdata_root, page, page);
926f88f3ac9SRichard Henderson         if (!n) {
927f88f3ac9SRichard Henderson             t = g_new0(TargetPageDataNode, 1);
928f88f3ac9SRichard Henderson             n = &t->itree;
929f88f3ac9SRichard Henderson             n->start = region;
930f88f3ac9SRichard Henderson             n->last = region | ~TBD_MASK;
931f88f3ac9SRichard Henderson             interval_tree_insert(n, &targetdata_root);
9320fe61084SRichard Henderson         }
933f88f3ac9SRichard Henderson         mmap_unlock();
9340fe61084SRichard Henderson     }
935f88f3ac9SRichard Henderson 
936f88f3ac9SRichard Henderson     t = container_of(n, TargetPageDataNode, itree);
937f88f3ac9SRichard Henderson     return t->data[(page - region) >> TARGET_PAGE_BITS];
938f88f3ac9SRichard Henderson }
939f88f3ac9SRichard Henderson #else
94010310cbdSRichard Henderson void page_reset_target_data(target_ulong start, target_ulong last) { }
941f88f3ac9SRichard Henderson #endif /* TARGET_PAGE_DATA_SIZE */
9420fe61084SRichard Henderson 
943*7893e42dSPhilippe Mathieu-Daudé /* The system-mode versions of these helpers are in cputlb.c.  */
944a411d296SPhilippe Mathieu-Daudé 
945e20f73fbSAnton Johansson static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
946de95016dSRichard Henderson                             MemOp mop, uintptr_t ra, MMUAccessType type)
947f83bcecbSRichard Henderson {
9489395cd0aSRichard Henderson     int a_bits = get_alignment_bits(mop);
949f83bcecbSRichard Henderson     void *ret;
950f83bcecbSRichard Henderson 
9519395cd0aSRichard Henderson     /* Enforce guest required alignment.  */
9529395cd0aSRichard Henderson     if (unlikely(addr & ((1 << a_bits) - 1))) {
953e20f73fbSAnton Johansson         cpu_loop_exit_sigbus(cpu, addr, type, ra);
9549395cd0aSRichard Henderson     }
955f83bcecbSRichard Henderson 
956e20f73fbSAnton Johansson     ret = g2h(cpu, addr);
957f83bcecbSRichard Henderson     set_helper_retaddr(ra);
958ed4cfbcdSRichard Henderson     return ret;
959ed4cfbcdSRichard Henderson }
960ed4cfbcdSRichard Henderson 
961cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc"
962cdfac37bSRichard Henderson 
963e20f73fbSAnton Johansson static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
964e20f73fbSAnton Johansson                           uintptr_t ra, MMUAccessType access_type)
965ed4cfbcdSRichard Henderson {
966f83bcecbSRichard Henderson     void *haddr;
967f83bcecbSRichard Henderson     uint8_t ret;
968ed4cfbcdSRichard Henderson 
969f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
970e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
971f83bcecbSRichard Henderson     ret = ldub_p(haddr);
972f83bcecbSRichard Henderson     clear_helper_retaddr();
973de95016dSRichard Henderson     return ret;
974de95016dSRichard Henderson }
975de95016dSRichard Henderson 
976e20f73fbSAnton Johansson static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
977e20f73fbSAnton Johansson                            uintptr_t ra, MMUAccessType access_type)
978de95016dSRichard Henderson {
979de95016dSRichard Henderson     void *haddr;
980de95016dSRichard Henderson     uint16_t ret;
981e20f73fbSAnton Johansson     MemOp mop = get_memop(oi);
982de95016dSRichard Henderson 
983f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
984e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
985e20f73fbSAnton Johansson     ret = load_atom_2(cpu, ra, haddr, mop);
986de95016dSRichard Henderson     clear_helper_retaddr();
987fbea7a40SRichard Henderson 
988fbea7a40SRichard Henderson     if (mop & MO_BSWAP) {
989fbea7a40SRichard Henderson         ret = bswap16(ret);
990fbea7a40SRichard Henderson     }
991de95016dSRichard Henderson     return ret;
992de95016dSRichard Henderson }
993de95016dSRichard Henderson 
994e20f73fbSAnton Johansson static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
995e20f73fbSAnton Johansson                            uintptr_t ra, MMUAccessType access_type)
996de95016dSRichard Henderson {
997de95016dSRichard Henderson     void *haddr;
998de95016dSRichard Henderson     uint32_t ret;
999e20f73fbSAnton Johansson     MemOp mop = get_memop(oi);
1000de95016dSRichard Henderson 
1001f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1002e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
1003e20f73fbSAnton Johansson     ret = load_atom_4(cpu, ra, haddr, mop);
1004de95016dSRichard Henderson     clear_helper_retaddr();
1005fbea7a40SRichard Henderson 
1006fbea7a40SRichard Henderson     if (mop & MO_BSWAP) {
1007fbea7a40SRichard Henderson         ret = bswap32(ret);
1008fbea7a40SRichard Henderson     }
1009de95016dSRichard Henderson     return ret;
1010de95016dSRichard Henderson }
1011de95016dSRichard Henderson 
1012e20f73fbSAnton Johansson static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
1013e20f73fbSAnton Johansson                            uintptr_t ra, MMUAccessType access_type)
1014de95016dSRichard Henderson {
1015de95016dSRichard Henderson     void *haddr;
1016de95016dSRichard Henderson     uint64_t ret;
1017e20f73fbSAnton Johansson     MemOp mop = get_memop(oi);
1018de95016dSRichard Henderson 
1019f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1020e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
1021e20f73fbSAnton Johansson     ret = load_atom_8(cpu, ra, haddr, mop);
1022de95016dSRichard Henderson     clear_helper_retaddr();
1023de95016dSRichard Henderson 
1024de95016dSRichard Henderson     if (mop & MO_BSWAP) {
1025de95016dSRichard Henderson         ret = bswap64(ret);
1026de95016dSRichard Henderson     }
1027de95016dSRichard Henderson     return ret;
1028de95016dSRichard Henderson }
1029de95016dSRichard Henderson 
1030e20f73fbSAnton Johansson static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
1031e20f73fbSAnton Johansson                           MemOpIdx oi, uintptr_t ra)
1032cb48f365SRichard Henderson {
1033cb48f365SRichard Henderson     void *haddr;
1034cb48f365SRichard Henderson     Int128 ret;
1035e20f73fbSAnton Johansson     MemOp mop = get_memop(oi);
1036cb48f365SRichard Henderson 
103735c653c4SRichard Henderson     tcg_debug_assert((mop & MO_SIZE) == MO_128);
1038f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1039e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
1040e20f73fbSAnton Johansson     ret = load_atom_16(cpu, ra, haddr, mop);
1041cb48f365SRichard Henderson     clear_helper_retaddr();
104235c653c4SRichard Henderson 
104335c653c4SRichard Henderson     if (mop & MO_BSWAP) {
104435c653c4SRichard Henderson         ret = bswap128(ret);
104535c653c4SRichard Henderson     }
104635c653c4SRichard Henderson     return ret;
104735c653c4SRichard Henderson }
104835c653c4SRichard Henderson 
1049e20f73fbSAnton Johansson static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
1050e20f73fbSAnton Johansson                        MemOpIdx oi, uintptr_t ra)
1051ed4cfbcdSRichard Henderson {
1052f83bcecbSRichard Henderson     void *haddr;
1053ed4cfbcdSRichard Henderson 
1054f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1055e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
1056f83bcecbSRichard Henderson     stb_p(haddr, val);
1057ed4cfbcdSRichard Henderson     clear_helper_retaddr();
1058de95016dSRichard Henderson }
1059de95016dSRichard Henderson 
1060e20f73fbSAnton Johansson static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
1061e20f73fbSAnton Johansson                        MemOpIdx oi, uintptr_t ra)
1062de95016dSRichard Henderson {
1063de95016dSRichard Henderson     void *haddr;
1064e20f73fbSAnton Johansson     MemOp mop = get_memop(oi);
1065de95016dSRichard Henderson 
1066f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1067e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
1068fbea7a40SRichard Henderson 
1069fbea7a40SRichard Henderson     if (mop & MO_BSWAP) {
1070fbea7a40SRichard Henderson         val = bswap16(val);
1071fbea7a40SRichard Henderson     }
1072e20f73fbSAnton Johansson     store_atom_2(cpu, ra, haddr, mop, val);
1073de95016dSRichard Henderson     clear_helper_retaddr();
1074de95016dSRichard Henderson }
1075de95016dSRichard Henderson 
1076e20f73fbSAnton Johansson static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
1077e20f73fbSAnton Johansson                        MemOpIdx oi, uintptr_t ra)
1078de95016dSRichard Henderson {
1079f83bcecbSRichard Henderson     void *haddr;
1080e20f73fbSAnton Johansson     MemOp mop = get_memop(oi);
1081b9e60257SRichard Henderson 
1082f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1083e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
1084fbea7a40SRichard Henderson 
1085fbea7a40SRichard Henderson     if (mop & MO_BSWAP) {
1086fbea7a40SRichard Henderson         val = bswap32(val);
1087fbea7a40SRichard Henderson     }
1088e20f73fbSAnton Johansson     store_atom_4(cpu, ra, haddr, mop, val);
1089b9e60257SRichard Henderson     clear_helper_retaddr();
1090de95016dSRichard Henderson }
1091de95016dSRichard Henderson 
1092e20f73fbSAnton Johansson static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
1093e20f73fbSAnton Johansson                        MemOpIdx oi, uintptr_t ra)
1094de95016dSRichard Henderson {
1095f83bcecbSRichard Henderson     void *haddr;
1096e20f73fbSAnton Johansson     MemOp mop = get_memop(oi);
1097b9e60257SRichard Henderson 
1098f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1099e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
1100fbea7a40SRichard Henderson 
1101fbea7a40SRichard Henderson     if (mop & MO_BSWAP) {
1102fbea7a40SRichard Henderson         val = bswap64(val);
1103fbea7a40SRichard Henderson     }
1104e20f73fbSAnton Johansson     store_atom_8(cpu, ra, haddr, mop, val);
1105b9e60257SRichard Henderson     clear_helper_retaddr();
1106de95016dSRichard Henderson }
1107de95016dSRichard Henderson 
1108e20f73fbSAnton Johansson static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
1109e20f73fbSAnton Johansson                         MemOpIdx oi, uintptr_t ra)
1110cb48f365SRichard Henderson {
1111cb48f365SRichard Henderson     void *haddr;
1112e20f73fbSAnton Johansson     MemOpIdx mop = get_memop(oi);
1113cb48f365SRichard Henderson 
1114f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1115e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
1116fbea7a40SRichard Henderson 
1117fbea7a40SRichard Henderson     if (mop & MO_BSWAP) {
1118fbea7a40SRichard Henderson         val = bswap128(val);
1119fbea7a40SRichard Henderson     }
1120e20f73fbSAnton Johansson     store_atom_16(cpu, ra, haddr, mop, val);
112135c653c4SRichard Henderson     clear_helper_retaddr();
112235c653c4SRichard Henderson }
112335c653c4SRichard Henderson 
1124ed4cfbcdSRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1125ed4cfbcdSRichard Henderson {
1126ed4cfbcdSRichard Henderson     uint32_t ret;
1127ed4cfbcdSRichard Henderson 
1128ed4cfbcdSRichard Henderson     set_helper_retaddr(1);
11293e8f1628SRichard Henderson     ret = ldub_p(g2h_untagged(ptr));
1130ed4cfbcdSRichard Henderson     clear_helper_retaddr();
1131ed4cfbcdSRichard Henderson     return ret;
1132ed4cfbcdSRichard Henderson }
1133ed4cfbcdSRichard Henderson 
1134ed4cfbcdSRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1135ed4cfbcdSRichard Henderson {
1136ed4cfbcdSRichard Henderson     uint32_t ret;
1137ed4cfbcdSRichard Henderson 
1138ed4cfbcdSRichard Henderson     set_helper_retaddr(1);
11393e8f1628SRichard Henderson     ret = lduw_p(g2h_untagged(ptr));
1140ed4cfbcdSRichard Henderson     clear_helper_retaddr();
1141ed4cfbcdSRichard Henderson     return ret;
1142ed4cfbcdSRichard Henderson }
1143ed4cfbcdSRichard Henderson 
1144ed4cfbcdSRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1145ed4cfbcdSRichard Henderson {
1146ed4cfbcdSRichard Henderson     uint32_t ret;
1147ed4cfbcdSRichard Henderson 
1148ed4cfbcdSRichard Henderson     set_helper_retaddr(1);
11493e8f1628SRichard Henderson     ret = ldl_p(g2h_untagged(ptr));
1150ed4cfbcdSRichard Henderson     clear_helper_retaddr();
1151ed4cfbcdSRichard Henderson     return ret;
1152ed4cfbcdSRichard Henderson }
1153ed4cfbcdSRichard Henderson 
1154ed4cfbcdSRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1155ed4cfbcdSRichard Henderson {
1156ed4cfbcdSRichard Henderson     uint64_t ret;
1157ed4cfbcdSRichard Henderson 
1158ed4cfbcdSRichard Henderson     set_helper_retaddr(1);
11593e8f1628SRichard Henderson     ret = ldq_p(g2h_untagged(ptr));
1160ed4cfbcdSRichard Henderson     clear_helper_retaddr();
1161ed4cfbcdSRichard Henderson     return ret;
1162ed4cfbcdSRichard Henderson }
1163ed4cfbcdSRichard Henderson 
116428990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
116528990626SRichard Henderson                          MemOpIdx oi, uintptr_t ra)
116628990626SRichard Henderson {
116728990626SRichard Henderson     void *haddr;
116828990626SRichard Henderson     uint8_t ret;
116928990626SRichard Henderson 
1170e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
117128990626SRichard Henderson     ret = ldub_p(haddr);
117228990626SRichard Henderson     clear_helper_retaddr();
117328990626SRichard Henderson     return ret;
117428990626SRichard Henderson }
117528990626SRichard Henderson 
117628990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
117728990626SRichard Henderson                           MemOpIdx oi, uintptr_t ra)
117828990626SRichard Henderson {
117928990626SRichard Henderson     void *haddr;
118028990626SRichard Henderson     uint16_t ret;
118128990626SRichard Henderson 
1182e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
118328990626SRichard Henderson     ret = lduw_p(haddr);
118428990626SRichard Henderson     clear_helper_retaddr();
118528990626SRichard Henderson     if (get_memop(oi) & MO_BSWAP) {
118628990626SRichard Henderson         ret = bswap16(ret);
118728990626SRichard Henderson     }
118828990626SRichard Henderson     return ret;
118928990626SRichard Henderson }
119028990626SRichard Henderson 
119128990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
119228990626SRichard Henderson                           MemOpIdx oi, uintptr_t ra)
119328990626SRichard Henderson {
119428990626SRichard Henderson     void *haddr;
119528990626SRichard Henderson     uint32_t ret;
119628990626SRichard Henderson 
1197e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
119828990626SRichard Henderson     ret = ldl_p(haddr);
119928990626SRichard Henderson     clear_helper_retaddr();
120028990626SRichard Henderson     if (get_memop(oi) & MO_BSWAP) {
120128990626SRichard Henderson         ret = bswap32(ret);
120228990626SRichard Henderson     }
120328990626SRichard Henderson     return ret;
120428990626SRichard Henderson }
120528990626SRichard Henderson 
120628990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
120728990626SRichard Henderson                           MemOpIdx oi, uintptr_t ra)
120828990626SRichard Henderson {
120928990626SRichard Henderson     void *haddr;
121028990626SRichard Henderson     uint64_t ret;
121128990626SRichard Henderson 
1212e20f73fbSAnton Johansson     haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
121328990626SRichard Henderson     ret = ldq_p(haddr);
121428990626SRichard Henderson     clear_helper_retaddr();
121528990626SRichard Henderson     if (get_memop(oi) & MO_BSWAP) {
121628990626SRichard Henderson         ret = bswap64(ret);
121728990626SRichard Henderson     }
121828990626SRichard Henderson     return ret;
121928990626SRichard Henderson }
122028990626SRichard Henderson 
1221f83bcecbSRichard Henderson #include "ldst_common.c.inc"
1222f83bcecbSRichard Henderson 
1223a754f7f3SRichard Henderson /*
1224a754f7f3SRichard Henderson  * Do not allow unaligned operations to proceed.  Return the host address.
1225a754f7f3SRichard Henderson  */
1226d560225fSAnton Johansson static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1227b0326eb9SAnton Johansson                                int size, uintptr_t retaddr)
1228a411d296SPhilippe Mathieu-Daudé {
1229fce3f474SRichard Henderson     MemOp mop = get_memop(oi);
1230fce3f474SRichard Henderson     int a_bits = get_alignment_bits(mop);
1231fce3f474SRichard Henderson     void *ret;
1232fce3f474SRichard Henderson 
1233fce3f474SRichard Henderson     /* Enforce guest required alignment.  */
1234fce3f474SRichard Henderson     if (unlikely(addr & ((1 << a_bits) - 1))) {
1235d560225fSAnton Johansson         cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
1236fce3f474SRichard Henderson     }
1237fce3f474SRichard Henderson 
1238a411d296SPhilippe Mathieu-Daudé     /* Enforce qemu required alignment.  */
1239a411d296SPhilippe Mathieu-Daudé     if (unlikely(addr & (size - 1))) {
1240d560225fSAnton Johansson         cpu_loop_exit_atomic(cpu, retaddr);
1241a411d296SPhilippe Mathieu-Daudé     }
1242fce3f474SRichard Henderson 
1243d560225fSAnton Johansson     ret = g2h(cpu, addr);
124408b97f7fSRichard Henderson     set_helper_retaddr(retaddr);
124508b97f7fSRichard Henderson     return ret;
1246a411d296SPhilippe Mathieu-Daudé }
1247a411d296SPhilippe Mathieu-Daudé 
1248be9568b4SRichard Henderson #include "atomic_common.c.inc"
1249be9568b4SRichard Henderson 
1250be9568b4SRichard Henderson /*
1251be9568b4SRichard Henderson  * First set of functions passes in OI and RETADDR.
1252be9568b4SRichard Henderson  * This makes them callable from other helpers.
1253be9568b4SRichard Henderson  */
1254be9568b4SRichard Henderson 
1255be9568b4SRichard Henderson #define ATOMIC_NAME(X) \
1256be9568b4SRichard Henderson     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
125708b97f7fSRichard Henderson #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1258a411d296SPhilippe Mathieu-Daudé 
1259a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 1
1260a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h"
1261a411d296SPhilippe Mathieu-Daudé 
1262a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 2
1263a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h"
1264a411d296SPhilippe Mathieu-Daudé 
1265a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 4
1266a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h"
1267a411d296SPhilippe Mathieu-Daudé 
1268a411d296SPhilippe Mathieu-Daudé #ifdef CONFIG_ATOMIC64
1269a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 8
1270a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h"
1271a411d296SPhilippe Mathieu-Daudé #endif
1272a411d296SPhilippe Mathieu-Daudé 
127376f9d6adSRichard Henderson #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
1274be9568b4SRichard Henderson #define DATA_SIZE 16
1275be9568b4SRichard Henderson #include "atomic_template.h"
1276be9568b4SRichard Henderson #endif
1277