142a623c7SBlue Swirl /* 242a623c7SBlue Swirl * User emulator execution 342a623c7SBlue Swirl * 442a623c7SBlue Swirl * Copyright (c) 2003-2005 Fabrice Bellard 542a623c7SBlue Swirl * 642a623c7SBlue Swirl * This library is free software; you can redistribute it and/or 742a623c7SBlue Swirl * modify it under the terms of the GNU Lesser General Public 842a623c7SBlue Swirl * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 1042a623c7SBlue Swirl * 1142a623c7SBlue Swirl * This library is distributed in the hope that it will be useful, 1242a623c7SBlue Swirl * but WITHOUT ANY WARRANTY; without even the implied warranty of 1342a623c7SBlue Swirl * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1442a623c7SBlue Swirl * Lesser General Public License for more details. 1542a623c7SBlue Swirl * 1642a623c7SBlue Swirl * You should have received a copy of the GNU Lesser General Public 1742a623c7SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 1842a623c7SBlue Swirl */ 19d38ea87aSPeter Maydell #include "qemu/osdep.h" 2078271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 2176cad711SPaolo Bonzini #include "disas/disas.h" 2263c91552SPaolo Bonzini #include "exec/exec-all.h" 23dcb32f1dSPhilippe Mathieu-Daudé #include "tcg/tcg.h" 24023b0ae3SPeter Maydell #include "qemu/bitops.h" 25177a8cb8SRichard Henderson #include "qemu/rcu.h" 26f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 273b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 28a411d296SPhilippe Mathieu-Daudé #include "exec/helper-proto.h" 29e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 30243af022SPaolo Bonzini #include "trace/trace-root.h" 3137e891e3SRichard Henderson #include "tcg/tcg-ldst.h" 320583f775SRichard Henderson #include "internal.h" 3342a623c7SBlue Swirl 34ec603b55SRichard Henderson __thread uintptr_t helper_retaddr; 35ec603b55SRichard Henderson 3642a623c7SBlue Swirl //#define DEBUG_SIGNAL 3742a623c7SBlue Swirl 380fdbb7d2SRichard Henderson /* 390fdbb7d2SRichard Henderson * Adjust the pc to pass to cpu_restore_state; return the memop type. 400fdbb7d2SRichard Henderson */ 410fdbb7d2SRichard Henderson MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) 4242a623c7SBlue Swirl { 4352ba13f0SRichard Henderson switch (helper_retaddr) { 4452ba13f0SRichard Henderson default: 4552ba13f0SRichard Henderson /* 4652ba13f0SRichard Henderson * Fault during host memory operation within a helper function. 4752ba13f0SRichard Henderson * The helper's host return address, saved here, gives us a 4852ba13f0SRichard Henderson * pointer into the generated code that will unwind to the 4952ba13f0SRichard Henderson * correct guest pc. 50ec603b55SRichard Henderson */ 510fdbb7d2SRichard Henderson *pc = helper_retaddr; 5252ba13f0SRichard Henderson break; 5352ba13f0SRichard Henderson 5452ba13f0SRichard Henderson case 0: 5552ba13f0SRichard Henderson /* 5652ba13f0SRichard Henderson * Fault during host memory operation within generated code. 5752ba13f0SRichard Henderson * (Or, a unrelated bug within qemu, but we can't tell from here). 5852ba13f0SRichard Henderson * 5952ba13f0SRichard Henderson * We take the host pc from the signal frame. However, we cannot 6052ba13f0SRichard Henderson * use that value directly. Within cpu_restore_state_from_tb, we 6152ba13f0SRichard Henderson * assume PC comes from GETPC(), as used by the helper functions, 6252ba13f0SRichard Henderson * so we adjust the address by -GETPC_ADJ to form an address that 63e3a6e0daSzhaolichang * is within the call insn, so that the address does not accidentally 6452ba13f0SRichard Henderson * match the beginning of the next guest insn. However, when the 6552ba13f0SRichard Henderson * pc comes from the signal frame it points to the actual faulting 6652ba13f0SRichard Henderson * host memory insn and not the return from a call insn. 6752ba13f0SRichard Henderson * 6852ba13f0SRichard Henderson * Therefore, adjust to compensate for what will be done later 6952ba13f0SRichard Henderson * by cpu_restore_state_from_tb. 7052ba13f0SRichard Henderson */ 710fdbb7d2SRichard Henderson *pc += GETPC_ADJ; 7252ba13f0SRichard Henderson break; 7352ba13f0SRichard Henderson 7452ba13f0SRichard Henderson case 1: 7552ba13f0SRichard Henderson /* 7652ba13f0SRichard Henderson * Fault during host read for translation, or loosely, "execution". 7752ba13f0SRichard Henderson * 7852ba13f0SRichard Henderson * The guest pc is already pointing to the start of the TB for which 7952ba13f0SRichard Henderson * code is being generated. If the guest translator manages the 8052ba13f0SRichard Henderson * page crossings correctly, this is exactly the correct address 8152ba13f0SRichard Henderson * (and if the translator doesn't handle page boundaries correctly 8252ba13f0SRichard Henderson * there's little we can do about that here). Therefore, do not 8352ba13f0SRichard Henderson * trigger the unwinder. 8452ba13f0SRichard Henderson */ 850fdbb7d2SRichard Henderson *pc = 0; 860fdbb7d2SRichard Henderson return MMU_INST_FETCH; 87ec603b55SRichard Henderson } 88ec603b55SRichard Henderson 890fdbb7d2SRichard Henderson return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; 900fdbb7d2SRichard Henderson } 910fdbb7d2SRichard Henderson 925e38ba7dSRichard Henderson /** 935e38ba7dSRichard Henderson * handle_sigsegv_accerr_write: 945e38ba7dSRichard Henderson * @cpu: the cpu context 955e38ba7dSRichard Henderson * @old_set: the sigset_t from the signal ucontext_t 965e38ba7dSRichard Henderson * @host_pc: the host pc, adjusted for the signal 975e38ba7dSRichard Henderson * @guest_addr: the guest address of the fault 985e38ba7dSRichard Henderson * 995e38ba7dSRichard Henderson * Return true if the write fault has been handled, and should be re-tried. 1005e38ba7dSRichard Henderson * 1015e38ba7dSRichard Henderson * Note that it is important that we don't call page_unprotect() unless 1029323e79fSPeter Maydell * this is really a "write to nonwritable page" fault, because 1035e38ba7dSRichard Henderson * page_unprotect() assumes that if it is called for an access to 1049323e79fSPeter Maydell * a page that's writable this means we had two threads racing and 1059323e79fSPeter Maydell * another thread got there first and already made the page writable; 1065e38ba7dSRichard Henderson * so we will retry the access. If we were to call page_unprotect() 1075e38ba7dSRichard Henderson * for some other kind of fault that should really be passed to the 1085e38ba7dSRichard Henderson * guest, we'd end up in an infinite loop of retrying the faulting access. 1095e38ba7dSRichard Henderson */ 1105e38ba7dSRichard Henderson bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 1115e38ba7dSRichard Henderson uintptr_t host_pc, abi_ptr guest_addr) 1125e38ba7dSRichard Henderson { 1135e38ba7dSRichard Henderson switch (page_unprotect(guest_addr, host_pc)) { 1145e38ba7dSRichard Henderson case 0: 1155e38ba7dSRichard Henderson /* 1165e38ba7dSRichard Henderson * Fault not caused by a page marked unwritable to protect 1175e38ba7dSRichard Henderson * cached translations, must be the guest binary's problem. 1185e38ba7dSRichard Henderson */ 1195e38ba7dSRichard Henderson return false; 1205e38ba7dSRichard Henderson case 1: 1215e38ba7dSRichard Henderson /* 1225e38ba7dSRichard Henderson * Fault caused by protection of cached translation; TBs 1235e38ba7dSRichard Henderson * invalidated, so resume execution. 1245e38ba7dSRichard Henderson */ 1255e38ba7dSRichard Henderson return true; 1265e38ba7dSRichard Henderson case 2: 1275e38ba7dSRichard Henderson /* 1285e38ba7dSRichard Henderson * Fault caused by protection of cached translation, and the 1295e38ba7dSRichard Henderson * currently executing TB was modified and must be exited immediately. 1305e38ba7dSRichard Henderson */ 131940b3090SRichard Henderson sigprocmask(SIG_SETMASK, old_set, NULL); 132940b3090SRichard Henderson cpu_loop_exit_noexc(cpu); 1335e38ba7dSRichard Henderson /* NORETURN */ 1345e38ba7dSRichard Henderson default: 1355e38ba7dSRichard Henderson g_assert_not_reached(); 1365e38ba7dSRichard Henderson } 1375e38ba7dSRichard Henderson } 1385e38ba7dSRichard Henderson 13967ff2186SRichard Henderson typedef struct PageFlagsNode { 140177a8cb8SRichard Henderson struct rcu_head rcu; 14167ff2186SRichard Henderson IntervalTreeNode itree; 14267ff2186SRichard Henderson int flags; 14367ff2186SRichard Henderson } PageFlagsNode; 144d941c086SRichard Henderson 14567ff2186SRichard Henderson static IntervalTreeRoot pageflags_root; 14667ff2186SRichard Henderson 14767ff2186SRichard Henderson static PageFlagsNode *pageflags_find(target_ulong start, target_long last) 148d941c086SRichard Henderson { 14967ff2186SRichard Henderson IntervalTreeNode *n; 15067ff2186SRichard Henderson 15167ff2186SRichard Henderson n = interval_tree_iter_first(&pageflags_root, start, last); 15267ff2186SRichard Henderson return n ? container_of(n, PageFlagsNode, itree) : NULL; 153d941c086SRichard Henderson } 154d941c086SRichard Henderson 15567ff2186SRichard Henderson static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start, 15667ff2186SRichard Henderson target_long last) 157d941c086SRichard Henderson { 15867ff2186SRichard Henderson IntervalTreeNode *n; 159d941c086SRichard Henderson 16067ff2186SRichard Henderson n = interval_tree_iter_next(&p->itree, start, last); 16167ff2186SRichard Henderson return n ? container_of(n, PageFlagsNode, itree) : NULL; 162d941c086SRichard Henderson } 163d941c086SRichard Henderson 164d941c086SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 165d941c086SRichard Henderson { 16667ff2186SRichard Henderson IntervalTreeNode *n; 16767ff2186SRichard Henderson int rc = 0; 168d941c086SRichard Henderson 16967ff2186SRichard Henderson mmap_lock(); 17067ff2186SRichard Henderson for (n = interval_tree_iter_first(&pageflags_root, 0, -1); 17167ff2186SRichard Henderson n != NULL; 17267ff2186SRichard Henderson n = interval_tree_iter_next(n, 0, -1)) { 17367ff2186SRichard Henderson PageFlagsNode *p = container_of(n, PageFlagsNode, itree); 174d941c086SRichard Henderson 17567ff2186SRichard Henderson rc = fn(priv, n->start, n->last + 1, p->flags); 176d941c086SRichard Henderson if (rc != 0) { 17767ff2186SRichard Henderson break; 178d941c086SRichard Henderson } 179d941c086SRichard Henderson } 18067ff2186SRichard Henderson mmap_unlock(); 181d941c086SRichard Henderson 18267ff2186SRichard Henderson return rc; 183d941c086SRichard Henderson } 184d941c086SRichard Henderson 185d941c086SRichard Henderson static int dump_region(void *priv, target_ulong start, 186d941c086SRichard Henderson target_ulong end, unsigned long prot) 187d941c086SRichard Henderson { 188d941c086SRichard Henderson FILE *f = (FILE *)priv; 189d941c086SRichard Henderson 19067ff2186SRichard Henderson fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n", 191d941c086SRichard Henderson start, end, end - start, 192d941c086SRichard Henderson ((prot & PAGE_READ) ? 'r' : '-'), 193d941c086SRichard Henderson ((prot & PAGE_WRITE) ? 'w' : '-'), 194d941c086SRichard Henderson ((prot & PAGE_EXEC) ? 'x' : '-')); 195d941c086SRichard Henderson return 0; 196d941c086SRichard Henderson } 197d941c086SRichard Henderson 198d941c086SRichard Henderson /* dump memory mappings */ 199d941c086SRichard Henderson void page_dump(FILE *f) 200d941c086SRichard Henderson { 201d941c086SRichard Henderson const int length = sizeof(target_ulong) * 2; 20267ff2186SRichard Henderson 20367ff2186SRichard Henderson fprintf(f, "%-*s %-*s %-*s %s\n", 204d941c086SRichard Henderson length, "start", length, "end", length, "size", "prot"); 205d941c086SRichard Henderson walk_memory_regions(f, dump_region); 206d941c086SRichard Henderson } 207d941c086SRichard Henderson 208d941c086SRichard Henderson int page_get_flags(target_ulong address) 209d941c086SRichard Henderson { 21067ff2186SRichard Henderson PageFlagsNode *p = pageflags_find(address, address); 211d941c086SRichard Henderson 21267ff2186SRichard Henderson /* 21367ff2186SRichard Henderson * See util/interval-tree.c re lockless lookups: no false positives but 21467ff2186SRichard Henderson * there are false negatives. If we find nothing, retry with the mmap 21567ff2186SRichard Henderson * lock acquired. 21667ff2186SRichard Henderson */ 21767ff2186SRichard Henderson if (p) { 21867ff2186SRichard Henderson return p->flags; 21967ff2186SRichard Henderson } 22067ff2186SRichard Henderson if (have_mmap_lock()) { 221d941c086SRichard Henderson return 0; 222d941c086SRichard Henderson } 22367ff2186SRichard Henderson 22467ff2186SRichard Henderson mmap_lock(); 22567ff2186SRichard Henderson p = pageflags_find(address, address); 22667ff2186SRichard Henderson mmap_unlock(); 22767ff2186SRichard Henderson return p ? p->flags : 0; 22867ff2186SRichard Henderson } 22967ff2186SRichard Henderson 23067ff2186SRichard Henderson /* A subroutine of page_set_flags: insert a new node for [start,last]. */ 23167ff2186SRichard Henderson static void pageflags_create(target_ulong start, target_ulong last, int flags) 23267ff2186SRichard Henderson { 23367ff2186SRichard Henderson PageFlagsNode *p = g_new(PageFlagsNode, 1); 23467ff2186SRichard Henderson 23567ff2186SRichard Henderson p->itree.start = start; 23667ff2186SRichard Henderson p->itree.last = last; 23767ff2186SRichard Henderson p->flags = flags; 23867ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 23967ff2186SRichard Henderson } 24067ff2186SRichard Henderson 24167ff2186SRichard Henderson /* A subroutine of page_set_flags: remove everything in [start,last]. */ 24267ff2186SRichard Henderson static bool pageflags_unset(target_ulong start, target_ulong last) 24367ff2186SRichard Henderson { 24467ff2186SRichard Henderson bool inval_tb = false; 24567ff2186SRichard Henderson 24667ff2186SRichard Henderson while (true) { 24767ff2186SRichard Henderson PageFlagsNode *p = pageflags_find(start, last); 24867ff2186SRichard Henderson target_ulong p_last; 24967ff2186SRichard Henderson 25067ff2186SRichard Henderson if (!p) { 25167ff2186SRichard Henderson break; 25267ff2186SRichard Henderson } 25367ff2186SRichard Henderson 25467ff2186SRichard Henderson if (p->flags & PAGE_EXEC) { 25567ff2186SRichard Henderson inval_tb = true; 25667ff2186SRichard Henderson } 25767ff2186SRichard Henderson 25867ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 25967ff2186SRichard Henderson p_last = p->itree.last; 26067ff2186SRichard Henderson 26167ff2186SRichard Henderson if (p->itree.start < start) { 26267ff2186SRichard Henderson /* Truncate the node from the end, or split out the middle. */ 26367ff2186SRichard Henderson p->itree.last = start - 1; 26467ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 26567ff2186SRichard Henderson if (last < p_last) { 26667ff2186SRichard Henderson pageflags_create(last + 1, p_last, p->flags); 26767ff2186SRichard Henderson break; 26867ff2186SRichard Henderson } 26967ff2186SRichard Henderson } else if (p_last <= last) { 27067ff2186SRichard Henderson /* Range completely covers node -- remove it. */ 271177a8cb8SRichard Henderson g_free_rcu(p, rcu); 27267ff2186SRichard Henderson } else { 27367ff2186SRichard Henderson /* Truncate the node from the start. */ 27467ff2186SRichard Henderson p->itree.start = last + 1; 27567ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 27667ff2186SRichard Henderson break; 27767ff2186SRichard Henderson } 27867ff2186SRichard Henderson } 27967ff2186SRichard Henderson 28067ff2186SRichard Henderson return inval_tb; 28167ff2186SRichard Henderson } 28267ff2186SRichard Henderson 28367ff2186SRichard Henderson /* 28467ff2186SRichard Henderson * A subroutine of page_set_flags: nothing overlaps [start,last], 28567ff2186SRichard Henderson * but check adjacent mappings and maybe merge into a single range. 28667ff2186SRichard Henderson */ 28767ff2186SRichard Henderson static void pageflags_create_merge(target_ulong start, target_ulong last, 28867ff2186SRichard Henderson int flags) 28967ff2186SRichard Henderson { 29067ff2186SRichard Henderson PageFlagsNode *next = NULL, *prev = NULL; 29167ff2186SRichard Henderson 29267ff2186SRichard Henderson if (start > 0) { 29367ff2186SRichard Henderson prev = pageflags_find(start - 1, start - 1); 29467ff2186SRichard Henderson if (prev) { 29567ff2186SRichard Henderson if (prev->flags == flags) { 29667ff2186SRichard Henderson interval_tree_remove(&prev->itree, &pageflags_root); 29767ff2186SRichard Henderson } else { 29867ff2186SRichard Henderson prev = NULL; 29967ff2186SRichard Henderson } 30067ff2186SRichard Henderson } 30167ff2186SRichard Henderson } 30267ff2186SRichard Henderson if (last + 1 != 0) { 30367ff2186SRichard Henderson next = pageflags_find(last + 1, last + 1); 30467ff2186SRichard Henderson if (next) { 30567ff2186SRichard Henderson if (next->flags == flags) { 30667ff2186SRichard Henderson interval_tree_remove(&next->itree, &pageflags_root); 30767ff2186SRichard Henderson } else { 30867ff2186SRichard Henderson next = NULL; 30967ff2186SRichard Henderson } 31067ff2186SRichard Henderson } 31167ff2186SRichard Henderson } 31267ff2186SRichard Henderson 31367ff2186SRichard Henderson if (prev) { 31467ff2186SRichard Henderson if (next) { 31567ff2186SRichard Henderson prev->itree.last = next->itree.last; 316177a8cb8SRichard Henderson g_free_rcu(next, rcu); 31767ff2186SRichard Henderson } else { 31867ff2186SRichard Henderson prev->itree.last = last; 31967ff2186SRichard Henderson } 32067ff2186SRichard Henderson interval_tree_insert(&prev->itree, &pageflags_root); 32167ff2186SRichard Henderson } else if (next) { 32267ff2186SRichard Henderson next->itree.start = start; 32367ff2186SRichard Henderson interval_tree_insert(&next->itree, &pageflags_root); 32467ff2186SRichard Henderson } else { 32567ff2186SRichard Henderson pageflags_create(start, last, flags); 32667ff2186SRichard Henderson } 327d941c086SRichard Henderson } 328d941c086SRichard Henderson 329d941c086SRichard Henderson /* 330d941c086SRichard Henderson * Allow the target to decide if PAGE_TARGET_[12] may be reset. 331d941c086SRichard Henderson * By default, they are not kept. 332d941c086SRichard Henderson */ 333d941c086SRichard Henderson #ifndef PAGE_TARGET_STICKY 334d941c086SRichard Henderson #define PAGE_TARGET_STICKY 0 335d941c086SRichard Henderson #endif 336d941c086SRichard Henderson #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) 337d941c086SRichard Henderson 33867ff2186SRichard Henderson /* A subroutine of page_set_flags: add flags to [start,last]. */ 33967ff2186SRichard Henderson static bool pageflags_set_clear(target_ulong start, target_ulong last, 34067ff2186SRichard Henderson int set_flags, int clear_flags) 34167ff2186SRichard Henderson { 34267ff2186SRichard Henderson PageFlagsNode *p; 34367ff2186SRichard Henderson target_ulong p_start, p_last; 34467ff2186SRichard Henderson int p_flags, merge_flags; 34567ff2186SRichard Henderson bool inval_tb = false; 34667ff2186SRichard Henderson 34767ff2186SRichard Henderson restart: 34867ff2186SRichard Henderson p = pageflags_find(start, last); 34967ff2186SRichard Henderson if (!p) { 35067ff2186SRichard Henderson if (set_flags) { 35167ff2186SRichard Henderson pageflags_create_merge(start, last, set_flags); 35267ff2186SRichard Henderson } 35367ff2186SRichard Henderson goto done; 35467ff2186SRichard Henderson } 35567ff2186SRichard Henderson 35667ff2186SRichard Henderson p_start = p->itree.start; 35767ff2186SRichard Henderson p_last = p->itree.last; 35867ff2186SRichard Henderson p_flags = p->flags; 35967ff2186SRichard Henderson /* Using mprotect on a page does not change sticky bits. */ 36067ff2186SRichard Henderson merge_flags = (p_flags & ~clear_flags) | set_flags; 36167ff2186SRichard Henderson 36267ff2186SRichard Henderson /* 36367ff2186SRichard Henderson * Need to flush if an overlapping executable region 36467ff2186SRichard Henderson * removes exec, or adds write. 36567ff2186SRichard Henderson */ 36667ff2186SRichard Henderson if ((p_flags & PAGE_EXEC) 36767ff2186SRichard Henderson && (!(merge_flags & PAGE_EXEC) 36867ff2186SRichard Henderson || (merge_flags & ~p_flags & PAGE_WRITE))) { 36967ff2186SRichard Henderson inval_tb = true; 37067ff2186SRichard Henderson } 37167ff2186SRichard Henderson 37267ff2186SRichard Henderson /* 37367ff2186SRichard Henderson * If there is an exact range match, update and return without 37467ff2186SRichard Henderson * attempting to merge with adjacent regions. 37567ff2186SRichard Henderson */ 37667ff2186SRichard Henderson if (start == p_start && last == p_last) { 37767ff2186SRichard Henderson if (merge_flags) { 37867ff2186SRichard Henderson p->flags = merge_flags; 37967ff2186SRichard Henderson } else { 38067ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 381177a8cb8SRichard Henderson g_free_rcu(p, rcu); 38267ff2186SRichard Henderson } 38367ff2186SRichard Henderson goto done; 38467ff2186SRichard Henderson } 38567ff2186SRichard Henderson 38667ff2186SRichard Henderson /* 38767ff2186SRichard Henderson * If sticky bits affect the original mapping, then we must be more 38867ff2186SRichard Henderson * careful about the existing intervals and the separate flags. 38967ff2186SRichard Henderson */ 39067ff2186SRichard Henderson if (set_flags != merge_flags) { 39167ff2186SRichard Henderson if (p_start < start) { 39267ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 39367ff2186SRichard Henderson p->itree.last = start - 1; 39467ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 39567ff2186SRichard Henderson 39667ff2186SRichard Henderson if (last < p_last) { 39767ff2186SRichard Henderson if (merge_flags) { 39867ff2186SRichard Henderson pageflags_create(start, last, merge_flags); 39967ff2186SRichard Henderson } 40067ff2186SRichard Henderson pageflags_create(last + 1, p_last, p_flags); 40167ff2186SRichard Henderson } else { 40267ff2186SRichard Henderson if (merge_flags) { 40367ff2186SRichard Henderson pageflags_create(start, p_last, merge_flags); 40467ff2186SRichard Henderson } 40567ff2186SRichard Henderson if (p_last < last) { 40667ff2186SRichard Henderson start = p_last + 1; 40767ff2186SRichard Henderson goto restart; 40867ff2186SRichard Henderson } 40967ff2186SRichard Henderson } 41067ff2186SRichard Henderson } else { 41167ff2186SRichard Henderson if (start < p_start && set_flags) { 41267ff2186SRichard Henderson pageflags_create(start, p_start - 1, set_flags); 41367ff2186SRichard Henderson } 41467ff2186SRichard Henderson if (last < p_last) { 41567ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 41667ff2186SRichard Henderson p->itree.start = last + 1; 41767ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 41867ff2186SRichard Henderson if (merge_flags) { 41967ff2186SRichard Henderson pageflags_create(start, last, merge_flags); 42067ff2186SRichard Henderson } 42167ff2186SRichard Henderson } else { 42267ff2186SRichard Henderson if (merge_flags) { 42367ff2186SRichard Henderson p->flags = merge_flags; 42467ff2186SRichard Henderson } else { 42567ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 426177a8cb8SRichard Henderson g_free_rcu(p, rcu); 42767ff2186SRichard Henderson } 42867ff2186SRichard Henderson if (p_last < last) { 42967ff2186SRichard Henderson start = p_last + 1; 43067ff2186SRichard Henderson goto restart; 43167ff2186SRichard Henderson } 43267ff2186SRichard Henderson } 43367ff2186SRichard Henderson } 43467ff2186SRichard Henderson goto done; 43567ff2186SRichard Henderson } 43667ff2186SRichard Henderson 43767ff2186SRichard Henderson /* If flags are not changing for this range, incorporate it. */ 43867ff2186SRichard Henderson if (set_flags == p_flags) { 43967ff2186SRichard Henderson if (start < p_start) { 44067ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 44167ff2186SRichard Henderson p->itree.start = start; 44267ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 44367ff2186SRichard Henderson } 44467ff2186SRichard Henderson if (p_last < last) { 44567ff2186SRichard Henderson start = p_last + 1; 44667ff2186SRichard Henderson goto restart; 44767ff2186SRichard Henderson } 44867ff2186SRichard Henderson goto done; 44967ff2186SRichard Henderson } 45067ff2186SRichard Henderson 45167ff2186SRichard Henderson /* Maybe split out head and/or tail ranges with the original flags. */ 45267ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 45367ff2186SRichard Henderson if (p_start < start) { 45467ff2186SRichard Henderson p->itree.last = start - 1; 45567ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 45667ff2186SRichard Henderson 45767ff2186SRichard Henderson if (p_last < last) { 45867ff2186SRichard Henderson goto restart; 45967ff2186SRichard Henderson } 46067ff2186SRichard Henderson if (last < p_last) { 46167ff2186SRichard Henderson pageflags_create(last + 1, p_last, p_flags); 46267ff2186SRichard Henderson } 46367ff2186SRichard Henderson } else if (last < p_last) { 46467ff2186SRichard Henderson p->itree.start = last + 1; 46567ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 46667ff2186SRichard Henderson } else { 467177a8cb8SRichard Henderson g_free_rcu(p, rcu); 46867ff2186SRichard Henderson goto restart; 46967ff2186SRichard Henderson } 47067ff2186SRichard Henderson if (set_flags) { 47167ff2186SRichard Henderson pageflags_create(start, last, set_flags); 47267ff2186SRichard Henderson } 47367ff2186SRichard Henderson 47467ff2186SRichard Henderson done: 47567ff2186SRichard Henderson return inval_tb; 47667ff2186SRichard Henderson } 47767ff2186SRichard Henderson 478d941c086SRichard Henderson /* 479d941c086SRichard Henderson * Modify the flags of a page and invalidate the code if necessary. 480d941c086SRichard Henderson * The flag PAGE_WRITE_ORG is positioned automatically depending 481d941c086SRichard Henderson * on PAGE_WRITE. The mmap_lock should already be held. 482d941c086SRichard Henderson */ 483d941c086SRichard Henderson void page_set_flags(target_ulong start, target_ulong end, int flags) 484d941c086SRichard Henderson { 48567ff2186SRichard Henderson target_ulong last; 48667ff2186SRichard Henderson bool reset = false; 48767ff2186SRichard Henderson bool inval_tb = false; 488d941c086SRichard Henderson 489d941c086SRichard Henderson /* This function should never be called with addresses outside the 490d941c086SRichard Henderson guest address space. If this assert fires, it probably indicates 491d941c086SRichard Henderson a missing call to h2g_valid. */ 492d941c086SRichard Henderson assert(start < end); 49367ff2186SRichard Henderson assert(end - 1 <= GUEST_ADDR_MAX); 494d941c086SRichard Henderson /* Only set PAGE_ANON with new mappings. */ 495d941c086SRichard Henderson assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); 496d941c086SRichard Henderson assert_memory_lock(); 497d941c086SRichard Henderson 498d941c086SRichard Henderson start = start & TARGET_PAGE_MASK; 499d941c086SRichard Henderson end = TARGET_PAGE_ALIGN(end); 50067ff2186SRichard Henderson last = end - 1; 501d941c086SRichard Henderson 50267ff2186SRichard Henderson if (!(flags & PAGE_VALID)) { 50367ff2186SRichard Henderson flags = 0; 50467ff2186SRichard Henderson } else { 50567ff2186SRichard Henderson reset = flags & PAGE_RESET; 50667ff2186SRichard Henderson flags &= ~PAGE_RESET; 507d941c086SRichard Henderson if (flags & PAGE_WRITE) { 508d941c086SRichard Henderson flags |= PAGE_WRITE_ORG; 509d941c086SRichard Henderson } 51067ff2186SRichard Henderson } 51167ff2186SRichard Henderson 51267ff2186SRichard Henderson if (!flags || reset) { 513d941c086SRichard Henderson page_reset_target_data(start, end); 51467ff2186SRichard Henderson inval_tb |= pageflags_unset(start, last); 515d941c086SRichard Henderson } 51667ff2186SRichard Henderson if (flags) { 51767ff2186SRichard Henderson inval_tb |= pageflags_set_clear(start, last, flags, 51867ff2186SRichard Henderson ~(reset ? 0 : PAGE_STICKY)); 519d941c086SRichard Henderson } 520d941c086SRichard Henderson if (inval_tb) { 521d941c086SRichard Henderson tb_invalidate_phys_range(start, end); 522d941c086SRichard Henderson } 523d941c086SRichard Henderson } 524d941c086SRichard Henderson 525d941c086SRichard Henderson int page_check_range(target_ulong start, target_ulong len, int flags) 526d941c086SRichard Henderson { 52767ff2186SRichard Henderson target_ulong last; 528e630c012SRichard Henderson int locked; /* tri-state: =0: unlocked, +1: global, -1: local */ 529e630c012SRichard Henderson int ret; 530d941c086SRichard Henderson 531d941c086SRichard Henderson if (len == 0) { 53267ff2186SRichard Henderson return 0; /* trivial length */ 533d941c086SRichard Henderson } 534d941c086SRichard Henderson 53567ff2186SRichard Henderson last = start + len - 1; 53667ff2186SRichard Henderson if (last < start) { 53767ff2186SRichard Henderson return -1; /* wrap around */ 53867ff2186SRichard Henderson } 539d941c086SRichard Henderson 540e630c012SRichard Henderson locked = have_mmap_lock(); 54167ff2186SRichard Henderson while (true) { 54267ff2186SRichard Henderson PageFlagsNode *p = pageflags_find(start, last); 54367ff2186SRichard Henderson int missing; 54467ff2186SRichard Henderson 545d941c086SRichard Henderson if (!p) { 546e630c012SRichard Henderson if (!locked) { 547e630c012SRichard Henderson /* 548e630c012SRichard Henderson * Lockless lookups have false negatives. 549e630c012SRichard Henderson * Retry with the lock held. 550e630c012SRichard Henderson */ 551e630c012SRichard Henderson mmap_lock(); 552e630c012SRichard Henderson locked = -1; 553e630c012SRichard Henderson p = pageflags_find(start, last); 554e630c012SRichard Henderson } 555e630c012SRichard Henderson if (!p) { 556e630c012SRichard Henderson ret = -1; /* entire region invalid */ 557e630c012SRichard Henderson break; 558e630c012SRichard Henderson } 559d941c086SRichard Henderson } 56067ff2186SRichard Henderson if (start < p->itree.start) { 561e630c012SRichard Henderson ret = -1; /* initial bytes invalid */ 562e630c012SRichard Henderson break; 563d941c086SRichard Henderson } 564d941c086SRichard Henderson 56567ff2186SRichard Henderson missing = flags & ~p->flags; 56667ff2186SRichard Henderson if (missing & PAGE_READ) { 567e630c012SRichard Henderson ret = -1; /* page not readable */ 568e630c012SRichard Henderson break; 569d941c086SRichard Henderson } 57067ff2186SRichard Henderson if (missing & PAGE_WRITE) { 571d941c086SRichard Henderson if (!(p->flags & PAGE_WRITE_ORG)) { 572e630c012SRichard Henderson ret = -1; /* page not writable */ 573e630c012SRichard Henderson break; 57467ff2186SRichard Henderson } 57567ff2186SRichard Henderson /* Asking about writable, but has been protected: undo. */ 57667ff2186SRichard Henderson if (!page_unprotect(start, 0)) { 577e630c012SRichard Henderson ret = -1; 578e630c012SRichard Henderson break; 579d941c086SRichard Henderson } 58067ff2186SRichard Henderson /* TODO: page_unprotect should take a range, not a single page. */ 58167ff2186SRichard Henderson if (last - start < TARGET_PAGE_SIZE) { 582e630c012SRichard Henderson ret = 0; /* ok */ 583e630c012SRichard Henderson break; 584d941c086SRichard Henderson } 58567ff2186SRichard Henderson start += TARGET_PAGE_SIZE; 586d941c086SRichard Henderson continue; 587d941c086SRichard Henderson } 58867ff2186SRichard Henderson 58967ff2186SRichard Henderson if (last <= p->itree.last) { 590e630c012SRichard Henderson ret = 0; /* ok */ 591e630c012SRichard Henderson break; 592d941c086SRichard Henderson } 59367ff2186SRichard Henderson start = p->itree.last + 1; 59467ff2186SRichard Henderson } 595e630c012SRichard Henderson 596e630c012SRichard Henderson /* Release the lock if acquired locally. */ 597e630c012SRichard Henderson if (locked < 0) { 598e630c012SRichard Henderson mmap_unlock(); 599e630c012SRichard Henderson } 600e630c012SRichard Henderson return ret; 60167ff2186SRichard Henderson } 60267ff2186SRichard Henderson 60367ff2186SRichard Henderson void page_protect(tb_page_addr_t address) 60467ff2186SRichard Henderson { 60567ff2186SRichard Henderson PageFlagsNode *p; 60667ff2186SRichard Henderson target_ulong start, last; 60767ff2186SRichard Henderson int prot; 60867ff2186SRichard Henderson 60967ff2186SRichard Henderson assert_memory_lock(); 61067ff2186SRichard Henderson 61167ff2186SRichard Henderson if (qemu_host_page_size <= TARGET_PAGE_SIZE) { 61267ff2186SRichard Henderson start = address & TARGET_PAGE_MASK; 61367ff2186SRichard Henderson last = start + TARGET_PAGE_SIZE - 1; 61467ff2186SRichard Henderson } else { 61567ff2186SRichard Henderson start = address & qemu_host_page_mask; 61667ff2186SRichard Henderson last = start + qemu_host_page_size - 1; 61767ff2186SRichard Henderson } 61867ff2186SRichard Henderson 61967ff2186SRichard Henderson p = pageflags_find(start, last); 62067ff2186SRichard Henderson if (!p) { 62167ff2186SRichard Henderson return; 62267ff2186SRichard Henderson } 62367ff2186SRichard Henderson prot = p->flags; 62467ff2186SRichard Henderson 62567ff2186SRichard Henderson if (unlikely(p->itree.last < last)) { 62667ff2186SRichard Henderson /* More than one protection region covers the one host page. */ 62767ff2186SRichard Henderson assert(TARGET_PAGE_SIZE < qemu_host_page_size); 62867ff2186SRichard Henderson while ((p = pageflags_next(p, start, last)) != NULL) { 62967ff2186SRichard Henderson prot |= p->flags; 63067ff2186SRichard Henderson } 63167ff2186SRichard Henderson } 63267ff2186SRichard Henderson 63367ff2186SRichard Henderson if (prot & PAGE_WRITE) { 63467ff2186SRichard Henderson pageflags_set_clear(start, last, 0, PAGE_WRITE); 63567ff2186SRichard Henderson mprotect(g2h_untagged(start), qemu_host_page_size, 63667ff2186SRichard Henderson prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE); 637d941c086SRichard Henderson } 638d941c086SRichard Henderson } 639d941c086SRichard Henderson 640d941c086SRichard Henderson /* 641d941c086SRichard Henderson * Called from signal handler: invalidate the code and unprotect the 642d941c086SRichard Henderson * page. Return 0 if the fault was not handled, 1 if it was handled, 643d941c086SRichard Henderson * and 2 if it was handled but the caller must cause the TB to be 644d941c086SRichard Henderson * immediately exited. (We can only return 2 if the 'pc' argument is 645d941c086SRichard Henderson * non-zero.) 646d941c086SRichard Henderson */ 647d941c086SRichard Henderson int page_unprotect(target_ulong address, uintptr_t pc) 648d941c086SRichard Henderson { 64967ff2186SRichard Henderson PageFlagsNode *p; 650d941c086SRichard Henderson bool current_tb_invalidated; 651d941c086SRichard Henderson 652d941c086SRichard Henderson /* 653d941c086SRichard Henderson * Technically this isn't safe inside a signal handler. However we 654d941c086SRichard Henderson * know this only ever happens in a synchronous SEGV handler, so in 655d941c086SRichard Henderson * practice it seems to be ok. 656d941c086SRichard Henderson */ 657d941c086SRichard Henderson mmap_lock(); 658d941c086SRichard Henderson 65967ff2186SRichard Henderson p = pageflags_find(address, address); 66067ff2186SRichard Henderson 66167ff2186SRichard Henderson /* If this address was not really writable, nothing to do. */ 66267ff2186SRichard Henderson if (!p || !(p->flags & PAGE_WRITE_ORG)) { 663d941c086SRichard Henderson mmap_unlock(); 664d941c086SRichard Henderson return 0; 665d941c086SRichard Henderson } 666d941c086SRichard Henderson 667d941c086SRichard Henderson current_tb_invalidated = false; 668d941c086SRichard Henderson if (p->flags & PAGE_WRITE) { 669d941c086SRichard Henderson /* 670d941c086SRichard Henderson * If the page is actually marked WRITE then assume this is because 671d941c086SRichard Henderson * this thread raced with another one which got here first and 672d941c086SRichard Henderson * set the page to PAGE_WRITE and did the TB invalidate for us. 673d941c086SRichard Henderson */ 674d941c086SRichard Henderson #ifdef TARGET_HAS_PRECISE_SMC 675d941c086SRichard Henderson TranslationBlock *current_tb = tcg_tb_lookup(pc); 676d941c086SRichard Henderson if (current_tb) { 677d941c086SRichard Henderson current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; 678d941c086SRichard Henderson } 679d941c086SRichard Henderson #endif 680d941c086SRichard Henderson } else { 68167ff2186SRichard Henderson target_ulong start, len, i; 68267ff2186SRichard Henderson int prot; 683d941c086SRichard Henderson 68467ff2186SRichard Henderson if (qemu_host_page_size <= TARGET_PAGE_SIZE) { 68567ff2186SRichard Henderson start = address & TARGET_PAGE_MASK; 68667ff2186SRichard Henderson len = TARGET_PAGE_SIZE; 68767ff2186SRichard Henderson prot = p->flags | PAGE_WRITE; 68867ff2186SRichard Henderson pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0); 68967ff2186SRichard Henderson current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc); 69067ff2186SRichard Henderson } else { 69167ff2186SRichard Henderson start = address & qemu_host_page_mask; 69267ff2186SRichard Henderson len = qemu_host_page_size; 693d941c086SRichard Henderson prot = 0; 694d941c086SRichard Henderson 69567ff2186SRichard Henderson for (i = 0; i < len; i += TARGET_PAGE_SIZE) { 69667ff2186SRichard Henderson target_ulong addr = start + i; 69767ff2186SRichard Henderson 69867ff2186SRichard Henderson p = pageflags_find(addr, addr); 69967ff2186SRichard Henderson if (p) { 70067ff2186SRichard Henderson prot |= p->flags; 70167ff2186SRichard Henderson if (p->flags & PAGE_WRITE_ORG) { 70267ff2186SRichard Henderson prot |= PAGE_WRITE; 70367ff2186SRichard Henderson pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1, 70467ff2186SRichard Henderson PAGE_WRITE, 0); 70567ff2186SRichard Henderson } 70667ff2186SRichard Henderson } 707d941c086SRichard Henderson /* 708d941c086SRichard Henderson * Since the content will be modified, we must invalidate 709d941c086SRichard Henderson * the corresponding translated code. 710d941c086SRichard Henderson */ 711d941c086SRichard Henderson current_tb_invalidated |= 712d941c086SRichard Henderson tb_invalidate_phys_page_unwind(addr, pc); 713d941c086SRichard Henderson } 71467ff2186SRichard Henderson } 71567ff2186SRichard Henderson if (prot & PAGE_EXEC) { 71667ff2186SRichard Henderson prot = (prot & ~PAGE_EXEC) | PAGE_READ; 71767ff2186SRichard Henderson } 71867ff2186SRichard Henderson mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS); 719d941c086SRichard Henderson } 720d941c086SRichard Henderson mmap_unlock(); 72167ff2186SRichard Henderson 722d941c086SRichard Henderson /* If current TB was invalidated return to main loop */ 723d941c086SRichard Henderson return current_tb_invalidated ? 2 : 1; 724d941c086SRichard Henderson } 725d941c086SRichard Henderson 726069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 727069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 728069cfe77SRichard Henderson bool nonfault, uintptr_t ra) 72959e96ac6SDavid Hildenbrand { 73072d2bbf9SRichard Henderson int acc_flag; 73172d2bbf9SRichard Henderson bool maperr; 732c25c283dSDavid Hildenbrand 733c25c283dSDavid Hildenbrand switch (access_type) { 734c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 73572d2bbf9SRichard Henderson acc_flag = PAGE_WRITE_ORG; 736c25c283dSDavid Hildenbrand break; 737c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 73872d2bbf9SRichard Henderson acc_flag = PAGE_READ; 739c25c283dSDavid Hildenbrand break; 740c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 74172d2bbf9SRichard Henderson acc_flag = PAGE_EXEC; 742c25c283dSDavid Hildenbrand break; 743c25c283dSDavid Hildenbrand default: 744c25c283dSDavid Hildenbrand g_assert_not_reached(); 745c25c283dSDavid Hildenbrand } 746c25c283dSDavid Hildenbrand 74772d2bbf9SRichard Henderson if (guest_addr_valid_untagged(addr)) { 74872d2bbf9SRichard Henderson int page_flags = page_get_flags(addr); 74972d2bbf9SRichard Henderson if (page_flags & acc_flag) { 75072d2bbf9SRichard Henderson return 0; /* success */ 75172d2bbf9SRichard Henderson } 75272d2bbf9SRichard Henderson maperr = !(page_flags & PAGE_VALID); 75372d2bbf9SRichard Henderson } else { 75472d2bbf9SRichard Henderson maperr = true; 75572d2bbf9SRichard Henderson } 75672d2bbf9SRichard Henderson 757069cfe77SRichard Henderson if (nonfault) { 758069cfe77SRichard Henderson return TLB_INVALID_MASK; 75959e96ac6SDavid Hildenbrand } 76072d2bbf9SRichard Henderson 76172d2bbf9SRichard Henderson cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); 762069cfe77SRichard Henderson } 763069cfe77SRichard Henderson 764*1770b2f2SDaniel Henrique Barboza int probe_access_flags(CPUArchState *env, target_ulong addr, int size, 765069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 766069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t ra) 767069cfe77SRichard Henderson { 768069cfe77SRichard Henderson int flags; 769069cfe77SRichard Henderson 770*1770b2f2SDaniel Henrique Barboza g_assert(-(addr | TARGET_PAGE_MASK) >= size); 771*1770b2f2SDaniel Henrique Barboza flags = probe_access_internal(env, addr, size, access_type, nonfault, ra); 7723e8f1628SRichard Henderson *phost = flags ? NULL : g2h(env_cpu(env), addr); 773069cfe77SRichard Henderson return flags; 774069cfe77SRichard Henderson } 775069cfe77SRichard Henderson 776069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 777069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t ra) 778069cfe77SRichard Henderson { 779069cfe77SRichard Henderson int flags; 780069cfe77SRichard Henderson 781069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 782069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, false, ra); 783069cfe77SRichard Henderson g_assert(flags == 0); 784fef39ccdSDavid Hildenbrand 7853e8f1628SRichard Henderson return size ? g2h(env_cpu(env), addr) : NULL; 78659e96ac6SDavid Hildenbrand } 78759e96ac6SDavid Hildenbrand 788cdf71308SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 789cdf71308SRichard Henderson void **hostp) 790cdf71308SRichard Henderson { 791cdf71308SRichard Henderson int flags; 792cdf71308SRichard Henderson 793cdf71308SRichard Henderson flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0); 794cdf71308SRichard Henderson g_assert(flags == 0); 795cdf71308SRichard Henderson 796cdf71308SRichard Henderson if (hostp) { 797cdf71308SRichard Henderson *hostp = g2h_untagged(addr); 798cdf71308SRichard Henderson } 799cdf71308SRichard Henderson return addr; 800cdf71308SRichard Henderson } 801cdf71308SRichard Henderson 802f88f3ac9SRichard Henderson #ifdef TARGET_PAGE_DATA_SIZE 803f88f3ac9SRichard Henderson /* 804f88f3ac9SRichard Henderson * Allocate chunks of target data together. For the only current user, 805f88f3ac9SRichard Henderson * if we allocate one hunk per page, we have overhead of 40/128 or 40%. 806f88f3ac9SRichard Henderson * Therefore, allocate memory for 64 pages at a time for overhead < 1%. 807f88f3ac9SRichard Henderson */ 808f88f3ac9SRichard Henderson #define TPD_PAGES 64 809f88f3ac9SRichard Henderson #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES) 810f88f3ac9SRichard Henderson 811f88f3ac9SRichard Henderson typedef struct TargetPageDataNode { 812177a8cb8SRichard Henderson struct rcu_head rcu; 813f88f3ac9SRichard Henderson IntervalTreeNode itree; 814f88f3ac9SRichard Henderson char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned)); 815f88f3ac9SRichard Henderson } TargetPageDataNode; 816f88f3ac9SRichard Henderson 817f88f3ac9SRichard Henderson static IntervalTreeRoot targetdata_root; 818f88f3ac9SRichard Henderson 8190fe61084SRichard Henderson void page_reset_target_data(target_ulong start, target_ulong end) 8200fe61084SRichard Henderson { 821f88f3ac9SRichard Henderson IntervalTreeNode *n, *next; 822f88f3ac9SRichard Henderson target_ulong last; 8230fe61084SRichard Henderson 8240fe61084SRichard Henderson assert_memory_lock(); 8250fe61084SRichard Henderson 8260fe61084SRichard Henderson start = start & TARGET_PAGE_MASK; 827f88f3ac9SRichard Henderson last = TARGET_PAGE_ALIGN(end) - 1; 8280fe61084SRichard Henderson 829f88f3ac9SRichard Henderson for (n = interval_tree_iter_first(&targetdata_root, start, last), 830f88f3ac9SRichard Henderson next = n ? interval_tree_iter_next(n, start, last) : NULL; 831f88f3ac9SRichard Henderson n != NULL; 832f88f3ac9SRichard Henderson n = next, 833f88f3ac9SRichard Henderson next = next ? interval_tree_iter_next(n, start, last) : NULL) { 834f88f3ac9SRichard Henderson target_ulong n_start, n_last, p_ofs, p_len; 835177a8cb8SRichard Henderson TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree); 8360fe61084SRichard Henderson 837f88f3ac9SRichard Henderson if (n->start >= start && n->last <= last) { 838f88f3ac9SRichard Henderson interval_tree_remove(n, &targetdata_root); 839177a8cb8SRichard Henderson g_free_rcu(t, rcu); 840f88f3ac9SRichard Henderson continue; 8410fe61084SRichard Henderson } 8420fe61084SRichard Henderson 843f88f3ac9SRichard Henderson if (n->start < start) { 844f88f3ac9SRichard Henderson n_start = start; 845f88f3ac9SRichard Henderson p_ofs = (start - n->start) >> TARGET_PAGE_BITS; 846f88f3ac9SRichard Henderson } else { 847f88f3ac9SRichard Henderson n_start = n->start; 848f88f3ac9SRichard Henderson p_ofs = 0; 849f88f3ac9SRichard Henderson } 850f88f3ac9SRichard Henderson n_last = MIN(last, n->last); 851f88f3ac9SRichard Henderson p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS; 852f88f3ac9SRichard Henderson 853f88f3ac9SRichard Henderson memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE); 854f88f3ac9SRichard Henderson } 855f88f3ac9SRichard Henderson } 856f88f3ac9SRichard Henderson 8570fe61084SRichard Henderson void *page_get_target_data(target_ulong address) 8580fe61084SRichard Henderson { 859f88f3ac9SRichard Henderson IntervalTreeNode *n; 860f88f3ac9SRichard Henderson TargetPageDataNode *t; 861f88f3ac9SRichard Henderson target_ulong page, region; 8620fe61084SRichard Henderson 863f88f3ac9SRichard Henderson page = address & TARGET_PAGE_MASK; 864f88f3ac9SRichard Henderson region = address & TBD_MASK; 865f88f3ac9SRichard Henderson 866f88f3ac9SRichard Henderson n = interval_tree_iter_first(&targetdata_root, page, page); 867f88f3ac9SRichard Henderson if (!n) { 868f88f3ac9SRichard Henderson /* 869f88f3ac9SRichard Henderson * See util/interval-tree.c re lockless lookups: no false positives 870f88f3ac9SRichard Henderson * but there are false negatives. If we find nothing, retry with 871f88f3ac9SRichard Henderson * the mmap lock acquired. We also need the lock for the 872f88f3ac9SRichard Henderson * allocation + insert. 873f88f3ac9SRichard Henderson */ 874f88f3ac9SRichard Henderson mmap_lock(); 875f88f3ac9SRichard Henderson n = interval_tree_iter_first(&targetdata_root, page, page); 876f88f3ac9SRichard Henderson if (!n) { 877f88f3ac9SRichard Henderson t = g_new0(TargetPageDataNode, 1); 878f88f3ac9SRichard Henderson n = &t->itree; 879f88f3ac9SRichard Henderson n->start = region; 880f88f3ac9SRichard Henderson n->last = region | ~TBD_MASK; 881f88f3ac9SRichard Henderson interval_tree_insert(n, &targetdata_root); 8820fe61084SRichard Henderson } 883f88f3ac9SRichard Henderson mmap_unlock(); 8840fe61084SRichard Henderson } 885f88f3ac9SRichard Henderson 886f88f3ac9SRichard Henderson t = container_of(n, TargetPageDataNode, itree); 887f88f3ac9SRichard Henderson return t->data[(page - region) >> TARGET_PAGE_BITS]; 888f88f3ac9SRichard Henderson } 889f88f3ac9SRichard Henderson #else 890f88f3ac9SRichard Henderson void page_reset_target_data(target_ulong start, target_ulong end) { } 891f88f3ac9SRichard Henderson #endif /* TARGET_PAGE_DATA_SIZE */ 8920fe61084SRichard Henderson 893a411d296SPhilippe Mathieu-Daudé /* The softmmu versions of these helpers are in cputlb.c. */ 894a411d296SPhilippe Mathieu-Daudé 895f83bcecbSRichard Henderson /* 896f83bcecbSRichard Henderson * Verify that we have passed the correct MemOp to the correct function. 897f83bcecbSRichard Henderson * 898f83bcecbSRichard Henderson * We could present one function to target code, and dispatch based on 899f83bcecbSRichard Henderson * the MemOp, but so far we have worked hard to avoid an indirect function 900f83bcecbSRichard Henderson * call along the memory path. 901f83bcecbSRichard Henderson */ 902f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected) 903ed4cfbcdSRichard Henderson { 904f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG 905f83bcecbSRichard Henderson MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 906f83bcecbSRichard Henderson assert(have == expected); 907f83bcecbSRichard Henderson #endif 908f83bcecbSRichard Henderson } 909ed4cfbcdSRichard Henderson 91037e891e3SRichard Henderson void helper_unaligned_ld(CPUArchState *env, target_ulong addr) 91137e891e3SRichard Henderson { 91237e891e3SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); 91337e891e3SRichard Henderson } 91437e891e3SRichard Henderson 91537e891e3SRichard Henderson void helper_unaligned_st(CPUArchState *env, target_ulong addr) 91637e891e3SRichard Henderson { 91737e891e3SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); 91837e891e3SRichard Henderson } 91937e891e3SRichard Henderson 920f83bcecbSRichard Henderson static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, 921f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra, MMUAccessType type) 922f83bcecbSRichard Henderson { 9239395cd0aSRichard Henderson MemOp mop = get_memop(oi); 9249395cd0aSRichard Henderson int a_bits = get_alignment_bits(mop); 925f83bcecbSRichard Henderson void *ret; 926f83bcecbSRichard Henderson 9279395cd0aSRichard Henderson /* Enforce guest required alignment. */ 9289395cd0aSRichard Henderson if (unlikely(addr & ((1 << a_bits) - 1))) { 9299395cd0aSRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); 9309395cd0aSRichard Henderson } 931f83bcecbSRichard Henderson 932f83bcecbSRichard Henderson ret = g2h(env_cpu(env), addr); 933f83bcecbSRichard Henderson set_helper_retaddr(ra); 934ed4cfbcdSRichard Henderson return ret; 935ed4cfbcdSRichard Henderson } 936ed4cfbcdSRichard Henderson 937f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, 938f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 939ed4cfbcdSRichard Henderson { 940f83bcecbSRichard Henderson void *haddr; 941f83bcecbSRichard Henderson uint8_t ret; 942ed4cfbcdSRichard Henderson 943f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 944f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 945f83bcecbSRichard Henderson ret = ldub_p(haddr); 946f83bcecbSRichard Henderson clear_helper_retaddr(); 947f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 948ed4cfbcdSRichard Henderson return ret; 949ed4cfbcdSRichard Henderson } 950ed4cfbcdSRichard Henderson 951f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 952f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 953ed4cfbcdSRichard Henderson { 954f83bcecbSRichard Henderson void *haddr; 955f83bcecbSRichard Henderson uint16_t ret; 956ed4cfbcdSRichard Henderson 957f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 958f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 959f83bcecbSRichard Henderson ret = lduw_be_p(haddr); 960f83bcecbSRichard Henderson clear_helper_retaddr(); 961f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 962ed4cfbcdSRichard Henderson return ret; 963ed4cfbcdSRichard Henderson } 964ed4cfbcdSRichard Henderson 965f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 966f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 967ed4cfbcdSRichard Henderson { 968f83bcecbSRichard Henderson void *haddr; 969f83bcecbSRichard Henderson uint32_t ret; 970f83bcecbSRichard Henderson 971f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 972f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 973f83bcecbSRichard Henderson ret = ldl_be_p(haddr); 974f83bcecbSRichard Henderson clear_helper_retaddr(); 975f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 976f83bcecbSRichard Henderson return ret; 977f83bcecbSRichard Henderson } 978f83bcecbSRichard Henderson 979f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 980f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 981f83bcecbSRichard Henderson { 982f83bcecbSRichard Henderson void *haddr; 983ed4cfbcdSRichard Henderson uint64_t ret; 984ed4cfbcdSRichard Henderson 985fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 986f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 987f83bcecbSRichard Henderson ret = ldq_be_p(haddr); 988f83bcecbSRichard Henderson clear_helper_retaddr(); 989f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 990b9e60257SRichard Henderson return ret; 991b9e60257SRichard Henderson } 992b9e60257SRichard Henderson 993f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 994f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 995b9e60257SRichard Henderson { 996f83bcecbSRichard Henderson void *haddr; 997f83bcecbSRichard Henderson uint16_t ret; 998f83bcecbSRichard Henderson 999f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 1000f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1001f83bcecbSRichard Henderson ret = lduw_le_p(haddr); 1002f83bcecbSRichard Henderson clear_helper_retaddr(); 1003f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1004f83bcecbSRichard Henderson return ret; 1005f83bcecbSRichard Henderson } 1006f83bcecbSRichard Henderson 1007f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 1008f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1009f83bcecbSRichard Henderson { 1010f83bcecbSRichard Henderson void *haddr; 1011b9e60257SRichard Henderson uint32_t ret; 1012b9e60257SRichard Henderson 1013f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 1014f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1015f83bcecbSRichard Henderson ret = ldl_le_p(haddr); 1016f83bcecbSRichard Henderson clear_helper_retaddr(); 1017f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1018b9e60257SRichard Henderson return ret; 1019b9e60257SRichard Henderson } 1020b9e60257SRichard Henderson 1021f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 1022f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1023b9e60257SRichard Henderson { 1024f83bcecbSRichard Henderson void *haddr; 1025b9e60257SRichard Henderson uint64_t ret; 1026b9e60257SRichard Henderson 1027fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 1028f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1029f83bcecbSRichard Henderson ret = ldq_le_p(haddr); 1030f83bcecbSRichard Henderson clear_helper_retaddr(); 1031f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1032ed4cfbcdSRichard Henderson return ret; 1033ed4cfbcdSRichard Henderson } 1034ed4cfbcdSRichard Henderson 1035cb48f365SRichard Henderson Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, 1036cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 1037cb48f365SRichard Henderson { 1038cb48f365SRichard Henderson void *haddr; 1039cb48f365SRichard Henderson Int128 ret; 1040cb48f365SRichard Henderson 1041cb48f365SRichard Henderson validate_memop(oi, MO_128 | MO_BE); 1042cb48f365SRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1043cb48f365SRichard Henderson memcpy(&ret, haddr, 16); 1044cb48f365SRichard Henderson clear_helper_retaddr(); 1045cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1046cb48f365SRichard Henderson 1047cb48f365SRichard Henderson if (!HOST_BIG_ENDIAN) { 1048cb48f365SRichard Henderson ret = bswap128(ret); 1049cb48f365SRichard Henderson } 1050cb48f365SRichard Henderson return ret; 1051cb48f365SRichard Henderson } 1052cb48f365SRichard Henderson 1053cb48f365SRichard Henderson Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, 1054cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 1055cb48f365SRichard Henderson { 1056cb48f365SRichard Henderson void *haddr; 1057cb48f365SRichard Henderson Int128 ret; 1058cb48f365SRichard Henderson 1059cb48f365SRichard Henderson validate_memop(oi, MO_128 | MO_LE); 1060cb48f365SRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1061cb48f365SRichard Henderson memcpy(&ret, haddr, 16); 1062cb48f365SRichard Henderson clear_helper_retaddr(); 1063cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1064cb48f365SRichard Henderson 1065cb48f365SRichard Henderson if (HOST_BIG_ENDIAN) { 1066cb48f365SRichard Henderson ret = bswap128(ret); 1067cb48f365SRichard Henderson } 1068cb48f365SRichard Henderson return ret; 1069cb48f365SRichard Henderson } 1070cb48f365SRichard Henderson 1071f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 1072f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1073ed4cfbcdSRichard Henderson { 1074f83bcecbSRichard Henderson void *haddr; 1075ed4cfbcdSRichard Henderson 1076f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 1077f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1078f83bcecbSRichard Henderson stb_p(haddr, val); 1079ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1080f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1081ed4cfbcdSRichard Henderson } 1082ed4cfbcdSRichard Henderson 1083f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 1084f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1085ed4cfbcdSRichard Henderson { 1086f83bcecbSRichard Henderson void *haddr; 1087ed4cfbcdSRichard Henderson 1088f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 1089f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1090f83bcecbSRichard Henderson stw_be_p(haddr, val); 1091ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1092f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1093ed4cfbcdSRichard Henderson } 1094ed4cfbcdSRichard Henderson 1095f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 1096f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1097ed4cfbcdSRichard Henderson { 1098f83bcecbSRichard Henderson void *haddr; 1099ed4cfbcdSRichard Henderson 1100f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 1101f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1102f83bcecbSRichard Henderson stl_be_p(haddr, val); 1103ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1104f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1105ed4cfbcdSRichard Henderson } 1106ed4cfbcdSRichard Henderson 1107f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 1108f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1109ed4cfbcdSRichard Henderson { 1110f83bcecbSRichard Henderson void *haddr; 1111ed4cfbcdSRichard Henderson 1112fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 1113f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1114f83bcecbSRichard Henderson stq_be_p(haddr, val); 1115b9e60257SRichard Henderson clear_helper_retaddr(); 1116f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1117b9e60257SRichard Henderson } 1118b9e60257SRichard Henderson 1119f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 1120f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1121b9e60257SRichard Henderson { 1122f83bcecbSRichard Henderson void *haddr; 1123b9e60257SRichard Henderson 1124f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 1125f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1126f83bcecbSRichard Henderson stw_le_p(haddr, val); 1127b9e60257SRichard Henderson clear_helper_retaddr(); 1128f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1129b9e60257SRichard Henderson } 1130b9e60257SRichard Henderson 1131f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 1132f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1133b9e60257SRichard Henderson { 1134f83bcecbSRichard Henderson void *haddr; 1135b9e60257SRichard Henderson 1136f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 1137f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1138f83bcecbSRichard Henderson stl_le_p(haddr, val); 1139b9e60257SRichard Henderson clear_helper_retaddr(); 1140f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1141b9e60257SRichard Henderson } 1142b9e60257SRichard Henderson 1143f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 1144f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1145b9e60257SRichard Henderson { 1146f83bcecbSRichard Henderson void *haddr; 1147b9e60257SRichard Henderson 1148fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 1149f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1150f83bcecbSRichard Henderson stq_le_p(haddr, val); 1151ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1152f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1153ed4cfbcdSRichard Henderson } 1154ed4cfbcdSRichard Henderson 1155cb48f365SRichard Henderson void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, 1156cb48f365SRichard Henderson Int128 val, MemOpIdx oi, uintptr_t ra) 1157cb48f365SRichard Henderson { 1158cb48f365SRichard Henderson void *haddr; 1159cb48f365SRichard Henderson 1160cb48f365SRichard Henderson validate_memop(oi, MO_128 | MO_BE); 1161cb48f365SRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1162cb48f365SRichard Henderson if (!HOST_BIG_ENDIAN) { 1163cb48f365SRichard Henderson val = bswap128(val); 1164cb48f365SRichard Henderson } 1165cb48f365SRichard Henderson memcpy(haddr, &val, 16); 1166cb48f365SRichard Henderson clear_helper_retaddr(); 1167cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1168cb48f365SRichard Henderson } 1169cb48f365SRichard Henderson 1170cb48f365SRichard Henderson void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, 1171cb48f365SRichard Henderson Int128 val, MemOpIdx oi, uintptr_t ra) 1172cb48f365SRichard Henderson { 1173cb48f365SRichard Henderson void *haddr; 1174cb48f365SRichard Henderson 1175cb48f365SRichard Henderson validate_memop(oi, MO_128 | MO_LE); 1176cb48f365SRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1177cb48f365SRichard Henderson if (HOST_BIG_ENDIAN) { 1178cb48f365SRichard Henderson val = bswap128(val); 1179cb48f365SRichard Henderson } 1180cb48f365SRichard Henderson memcpy(haddr, &val, 16); 1181cb48f365SRichard Henderson clear_helper_retaddr(); 1182cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1183cb48f365SRichard Henderson } 1184cb48f365SRichard Henderson 1185ed4cfbcdSRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) 1186ed4cfbcdSRichard Henderson { 1187ed4cfbcdSRichard Henderson uint32_t ret; 1188ed4cfbcdSRichard Henderson 1189ed4cfbcdSRichard Henderson set_helper_retaddr(1); 11903e8f1628SRichard Henderson ret = ldub_p(g2h_untagged(ptr)); 1191ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1192ed4cfbcdSRichard Henderson return ret; 1193ed4cfbcdSRichard Henderson } 1194ed4cfbcdSRichard Henderson 1195ed4cfbcdSRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) 1196ed4cfbcdSRichard Henderson { 1197ed4cfbcdSRichard Henderson uint32_t ret; 1198ed4cfbcdSRichard Henderson 1199ed4cfbcdSRichard Henderson set_helper_retaddr(1); 12003e8f1628SRichard Henderson ret = lduw_p(g2h_untagged(ptr)); 1201ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1202ed4cfbcdSRichard Henderson return ret; 1203ed4cfbcdSRichard Henderson } 1204ed4cfbcdSRichard Henderson 1205ed4cfbcdSRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) 1206ed4cfbcdSRichard Henderson { 1207ed4cfbcdSRichard Henderson uint32_t ret; 1208ed4cfbcdSRichard Henderson 1209ed4cfbcdSRichard Henderson set_helper_retaddr(1); 12103e8f1628SRichard Henderson ret = ldl_p(g2h_untagged(ptr)); 1211ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1212ed4cfbcdSRichard Henderson return ret; 1213ed4cfbcdSRichard Henderson } 1214ed4cfbcdSRichard Henderson 1215ed4cfbcdSRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) 1216ed4cfbcdSRichard Henderson { 1217ed4cfbcdSRichard Henderson uint64_t ret; 1218ed4cfbcdSRichard Henderson 1219ed4cfbcdSRichard Henderson set_helper_retaddr(1); 12203e8f1628SRichard Henderson ret = ldq_p(g2h_untagged(ptr)); 1221ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1222ed4cfbcdSRichard Henderson return ret; 1223ed4cfbcdSRichard Henderson } 1224ed4cfbcdSRichard Henderson 1225f83bcecbSRichard Henderson #include "ldst_common.c.inc" 1226f83bcecbSRichard Henderson 1227a754f7f3SRichard Henderson /* 1228a754f7f3SRichard Henderson * Do not allow unaligned operations to proceed. Return the host address. 1229a754f7f3SRichard Henderson * 1230a754f7f3SRichard Henderson * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 1231a754f7f3SRichard Henderson */ 1232a411d296SPhilippe Mathieu-Daudé static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 12339002ffcbSRichard Henderson MemOpIdx oi, int size, int prot, 1234a754f7f3SRichard Henderson uintptr_t retaddr) 1235a411d296SPhilippe Mathieu-Daudé { 1236fce3f474SRichard Henderson MemOp mop = get_memop(oi); 1237fce3f474SRichard Henderson int a_bits = get_alignment_bits(mop); 1238fce3f474SRichard Henderson void *ret; 1239fce3f474SRichard Henderson 1240fce3f474SRichard Henderson /* Enforce guest required alignment. */ 1241fce3f474SRichard Henderson if (unlikely(addr & ((1 << a_bits) - 1))) { 1242fce3f474SRichard Henderson MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; 1243fce3f474SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); 1244fce3f474SRichard Henderson } 1245fce3f474SRichard Henderson 1246a411d296SPhilippe Mathieu-Daudé /* Enforce qemu required alignment. */ 1247a411d296SPhilippe Mathieu-Daudé if (unlikely(addr & (size - 1))) { 124829a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1249a411d296SPhilippe Mathieu-Daudé } 1250fce3f474SRichard Henderson 1251fce3f474SRichard Henderson ret = g2h(env_cpu(env), addr); 125208b97f7fSRichard Henderson set_helper_retaddr(retaddr); 125308b97f7fSRichard Henderson return ret; 1254a411d296SPhilippe Mathieu-Daudé } 1255a411d296SPhilippe Mathieu-Daudé 1256be9568b4SRichard Henderson #include "atomic_common.c.inc" 1257be9568b4SRichard Henderson 1258be9568b4SRichard Henderson /* 1259be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 1260be9568b4SRichard Henderson * This makes them callable from other helpers. 1261be9568b4SRichard Henderson */ 1262be9568b4SRichard Henderson 1263be9568b4SRichard Henderson #define ATOMIC_NAME(X) \ 1264be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 126508b97f7fSRichard Henderson #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) 1266a411d296SPhilippe Mathieu-Daudé 1267a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 1 1268a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 1269a411d296SPhilippe Mathieu-Daudé 1270a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 2 1271a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 1272a411d296SPhilippe Mathieu-Daudé 1273a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 4 1274a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 1275a411d296SPhilippe Mathieu-Daudé 1276a411d296SPhilippe Mathieu-Daudé #ifdef CONFIG_ATOMIC64 1277a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 8 1278a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 1279a411d296SPhilippe Mathieu-Daudé #endif 1280a411d296SPhilippe Mathieu-Daudé 1281e6cd4bb5SRichard Henderson #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 1282be9568b4SRichard Henderson #define DATA_SIZE 16 1283be9568b4SRichard Henderson #include "atomic_template.h" 1284be9568b4SRichard Henderson #endif 1285