142a623c7SBlue Swirl /* 242a623c7SBlue Swirl * User emulator execution 342a623c7SBlue Swirl * 442a623c7SBlue Swirl * Copyright (c) 2003-2005 Fabrice Bellard 542a623c7SBlue Swirl * 642a623c7SBlue Swirl * This library is free software; you can redistribute it and/or 742a623c7SBlue Swirl * modify it under the terms of the GNU Lesser General Public 842a623c7SBlue Swirl * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 1042a623c7SBlue Swirl * 1142a623c7SBlue Swirl * This library is distributed in the hope that it will be useful, 1242a623c7SBlue Swirl * but WITHOUT ANY WARRANTY; without even the implied warranty of 1342a623c7SBlue Swirl * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1442a623c7SBlue Swirl * Lesser General Public License for more details. 1542a623c7SBlue Swirl * 1642a623c7SBlue Swirl * You should have received a copy of the GNU Lesser General Public 1742a623c7SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 1842a623c7SBlue Swirl */ 19d38ea87aSPeter Maydell #include "qemu/osdep.h" 2078271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 2176cad711SPaolo Bonzini #include "disas/disas.h" 2263c91552SPaolo Bonzini #include "exec/exec-all.h" 23dcb32f1dSPhilippe Mathieu-Daudé #include "tcg/tcg.h" 24023b0ae3SPeter Maydell #include "qemu/bitops.h" 25177a8cb8SRichard Henderson #include "qemu/rcu.h" 26f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 273b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 28a411d296SPhilippe Mathieu-Daudé #include "exec/helper-proto.h" 29e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 30243af022SPaolo Bonzini #include "trace/trace-root.h" 3137e891e3SRichard Henderson #include "tcg/tcg-ldst.h" 320583f775SRichard Henderson #include "internal.h" 3342a623c7SBlue Swirl 34ec603b55SRichard Henderson __thread uintptr_t helper_retaddr; 35ec603b55SRichard Henderson 3642a623c7SBlue Swirl //#define DEBUG_SIGNAL 3742a623c7SBlue Swirl 380fdbb7d2SRichard Henderson /* 390fdbb7d2SRichard Henderson * Adjust the pc to pass to cpu_restore_state; return the memop type. 400fdbb7d2SRichard Henderson */ 410fdbb7d2SRichard Henderson MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) 4242a623c7SBlue Swirl { 4352ba13f0SRichard Henderson switch (helper_retaddr) { 4452ba13f0SRichard Henderson default: 4552ba13f0SRichard Henderson /* 4652ba13f0SRichard Henderson * Fault during host memory operation within a helper function. 4752ba13f0SRichard Henderson * The helper's host return address, saved here, gives us a 4852ba13f0SRichard Henderson * pointer into the generated code that will unwind to the 4952ba13f0SRichard Henderson * correct guest pc. 50ec603b55SRichard Henderson */ 510fdbb7d2SRichard Henderson *pc = helper_retaddr; 5252ba13f0SRichard Henderson break; 5352ba13f0SRichard Henderson 5452ba13f0SRichard Henderson case 0: 5552ba13f0SRichard Henderson /* 5652ba13f0SRichard Henderson * Fault during host memory operation within generated code. 5752ba13f0SRichard Henderson * (Or, a unrelated bug within qemu, but we can't tell from here). 5852ba13f0SRichard Henderson * 5952ba13f0SRichard Henderson * We take the host pc from the signal frame. However, we cannot 6052ba13f0SRichard Henderson * use that value directly. Within cpu_restore_state_from_tb, we 6152ba13f0SRichard Henderson * assume PC comes from GETPC(), as used by the helper functions, 6252ba13f0SRichard Henderson * so we adjust the address by -GETPC_ADJ to form an address that 63e3a6e0daSzhaolichang * is within the call insn, so that the address does not accidentally 6452ba13f0SRichard Henderson * match the beginning of the next guest insn. However, when the 6552ba13f0SRichard Henderson * pc comes from the signal frame it points to the actual faulting 6652ba13f0SRichard Henderson * host memory insn and not the return from a call insn. 6752ba13f0SRichard Henderson * 6852ba13f0SRichard Henderson * Therefore, adjust to compensate for what will be done later 6952ba13f0SRichard Henderson * by cpu_restore_state_from_tb. 7052ba13f0SRichard Henderson */ 710fdbb7d2SRichard Henderson *pc += GETPC_ADJ; 7252ba13f0SRichard Henderson break; 7352ba13f0SRichard Henderson 7452ba13f0SRichard Henderson case 1: 7552ba13f0SRichard Henderson /* 7652ba13f0SRichard Henderson * Fault during host read for translation, or loosely, "execution". 7752ba13f0SRichard Henderson * 7852ba13f0SRichard Henderson * The guest pc is already pointing to the start of the TB for which 7952ba13f0SRichard Henderson * code is being generated. If the guest translator manages the 8052ba13f0SRichard Henderson * page crossings correctly, this is exactly the correct address 8152ba13f0SRichard Henderson * (and if the translator doesn't handle page boundaries correctly 8252ba13f0SRichard Henderson * there's little we can do about that here). Therefore, do not 8352ba13f0SRichard Henderson * trigger the unwinder. 8452ba13f0SRichard Henderson */ 850fdbb7d2SRichard Henderson *pc = 0; 860fdbb7d2SRichard Henderson return MMU_INST_FETCH; 87ec603b55SRichard Henderson } 88ec603b55SRichard Henderson 890fdbb7d2SRichard Henderson return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; 900fdbb7d2SRichard Henderson } 910fdbb7d2SRichard Henderson 925e38ba7dSRichard Henderson /** 935e38ba7dSRichard Henderson * handle_sigsegv_accerr_write: 945e38ba7dSRichard Henderson * @cpu: the cpu context 955e38ba7dSRichard Henderson * @old_set: the sigset_t from the signal ucontext_t 965e38ba7dSRichard Henderson * @host_pc: the host pc, adjusted for the signal 975e38ba7dSRichard Henderson * @guest_addr: the guest address of the fault 985e38ba7dSRichard Henderson * 995e38ba7dSRichard Henderson * Return true if the write fault has been handled, and should be re-tried. 1005e38ba7dSRichard Henderson * 1015e38ba7dSRichard Henderson * Note that it is important that we don't call page_unprotect() unless 1029323e79fSPeter Maydell * this is really a "write to nonwritable page" fault, because 1035e38ba7dSRichard Henderson * page_unprotect() assumes that if it is called for an access to 1049323e79fSPeter Maydell * a page that's writable this means we had two threads racing and 1059323e79fSPeter Maydell * another thread got there first and already made the page writable; 1065e38ba7dSRichard Henderson * so we will retry the access. If we were to call page_unprotect() 1075e38ba7dSRichard Henderson * for some other kind of fault that should really be passed to the 1085e38ba7dSRichard Henderson * guest, we'd end up in an infinite loop of retrying the faulting access. 1095e38ba7dSRichard Henderson */ 1105e38ba7dSRichard Henderson bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 1115e38ba7dSRichard Henderson uintptr_t host_pc, abi_ptr guest_addr) 1125e38ba7dSRichard Henderson { 1135e38ba7dSRichard Henderson switch (page_unprotect(guest_addr, host_pc)) { 1145e38ba7dSRichard Henderson case 0: 1155e38ba7dSRichard Henderson /* 1165e38ba7dSRichard Henderson * Fault not caused by a page marked unwritable to protect 1175e38ba7dSRichard Henderson * cached translations, must be the guest binary's problem. 1185e38ba7dSRichard Henderson */ 1195e38ba7dSRichard Henderson return false; 1205e38ba7dSRichard Henderson case 1: 1215e38ba7dSRichard Henderson /* 1225e38ba7dSRichard Henderson * Fault caused by protection of cached translation; TBs 1235e38ba7dSRichard Henderson * invalidated, so resume execution. 1245e38ba7dSRichard Henderson */ 1255e38ba7dSRichard Henderson return true; 1265e38ba7dSRichard Henderson case 2: 1275e38ba7dSRichard Henderson /* 1285e38ba7dSRichard Henderson * Fault caused by protection of cached translation, and the 1295e38ba7dSRichard Henderson * currently executing TB was modified and must be exited immediately. 1305e38ba7dSRichard Henderson */ 131940b3090SRichard Henderson sigprocmask(SIG_SETMASK, old_set, NULL); 132940b3090SRichard Henderson cpu_loop_exit_noexc(cpu); 1335e38ba7dSRichard Henderson /* NORETURN */ 1345e38ba7dSRichard Henderson default: 1355e38ba7dSRichard Henderson g_assert_not_reached(); 1365e38ba7dSRichard Henderson } 1375e38ba7dSRichard Henderson } 1385e38ba7dSRichard Henderson 13967ff2186SRichard Henderson typedef struct PageFlagsNode { 140177a8cb8SRichard Henderson struct rcu_head rcu; 14167ff2186SRichard Henderson IntervalTreeNode itree; 14267ff2186SRichard Henderson int flags; 14367ff2186SRichard Henderson } PageFlagsNode; 144d941c086SRichard Henderson 14567ff2186SRichard Henderson static IntervalTreeRoot pageflags_root; 14667ff2186SRichard Henderson 14767ff2186SRichard Henderson static PageFlagsNode *pageflags_find(target_ulong start, target_long last) 148d941c086SRichard Henderson { 14967ff2186SRichard Henderson IntervalTreeNode *n; 15067ff2186SRichard Henderson 15167ff2186SRichard Henderson n = interval_tree_iter_first(&pageflags_root, start, last); 15267ff2186SRichard Henderson return n ? container_of(n, PageFlagsNode, itree) : NULL; 153d941c086SRichard Henderson } 154d941c086SRichard Henderson 15567ff2186SRichard Henderson static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start, 15667ff2186SRichard Henderson target_long last) 157d941c086SRichard Henderson { 15867ff2186SRichard Henderson IntervalTreeNode *n; 159d941c086SRichard Henderson 16067ff2186SRichard Henderson n = interval_tree_iter_next(&p->itree, start, last); 16167ff2186SRichard Henderson return n ? container_of(n, PageFlagsNode, itree) : NULL; 162d941c086SRichard Henderson } 163d941c086SRichard Henderson 164d941c086SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 165d941c086SRichard Henderson { 16667ff2186SRichard Henderson IntervalTreeNode *n; 16767ff2186SRichard Henderson int rc = 0; 168d941c086SRichard Henderson 16967ff2186SRichard Henderson mmap_lock(); 17067ff2186SRichard Henderson for (n = interval_tree_iter_first(&pageflags_root, 0, -1); 17167ff2186SRichard Henderson n != NULL; 17267ff2186SRichard Henderson n = interval_tree_iter_next(n, 0, -1)) { 17367ff2186SRichard Henderson PageFlagsNode *p = container_of(n, PageFlagsNode, itree); 174d941c086SRichard Henderson 17567ff2186SRichard Henderson rc = fn(priv, n->start, n->last + 1, p->flags); 176d941c086SRichard Henderson if (rc != 0) { 17767ff2186SRichard Henderson break; 178d941c086SRichard Henderson } 179d941c086SRichard Henderson } 18067ff2186SRichard Henderson mmap_unlock(); 181d941c086SRichard Henderson 18267ff2186SRichard Henderson return rc; 183d941c086SRichard Henderson } 184d941c086SRichard Henderson 185d941c086SRichard Henderson static int dump_region(void *priv, target_ulong start, 186d941c086SRichard Henderson target_ulong end, unsigned long prot) 187d941c086SRichard Henderson { 188d941c086SRichard Henderson FILE *f = (FILE *)priv; 189d941c086SRichard Henderson 19067ff2186SRichard Henderson fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n", 191d941c086SRichard Henderson start, end, end - start, 192d941c086SRichard Henderson ((prot & PAGE_READ) ? 'r' : '-'), 193d941c086SRichard Henderson ((prot & PAGE_WRITE) ? 'w' : '-'), 194d941c086SRichard Henderson ((prot & PAGE_EXEC) ? 'x' : '-')); 195d941c086SRichard Henderson return 0; 196d941c086SRichard Henderson } 197d941c086SRichard Henderson 198d941c086SRichard Henderson /* dump memory mappings */ 199d941c086SRichard Henderson void page_dump(FILE *f) 200d941c086SRichard Henderson { 201d941c086SRichard Henderson const int length = sizeof(target_ulong) * 2; 20267ff2186SRichard Henderson 20367ff2186SRichard Henderson fprintf(f, "%-*s %-*s %-*s %s\n", 204d941c086SRichard Henderson length, "start", length, "end", length, "size", "prot"); 205d941c086SRichard Henderson walk_memory_regions(f, dump_region); 206d941c086SRichard Henderson } 207d941c086SRichard Henderson 208d941c086SRichard Henderson int page_get_flags(target_ulong address) 209d941c086SRichard Henderson { 21067ff2186SRichard Henderson PageFlagsNode *p = pageflags_find(address, address); 211d941c086SRichard Henderson 21267ff2186SRichard Henderson /* 21367ff2186SRichard Henderson * See util/interval-tree.c re lockless lookups: no false positives but 21467ff2186SRichard Henderson * there are false negatives. If we find nothing, retry with the mmap 21567ff2186SRichard Henderson * lock acquired. 21667ff2186SRichard Henderson */ 21767ff2186SRichard Henderson if (p) { 21867ff2186SRichard Henderson return p->flags; 21967ff2186SRichard Henderson } 22067ff2186SRichard Henderson if (have_mmap_lock()) { 221d941c086SRichard Henderson return 0; 222d941c086SRichard Henderson } 22367ff2186SRichard Henderson 22467ff2186SRichard Henderson mmap_lock(); 22567ff2186SRichard Henderson p = pageflags_find(address, address); 22667ff2186SRichard Henderson mmap_unlock(); 22767ff2186SRichard Henderson return p ? p->flags : 0; 22867ff2186SRichard Henderson } 22967ff2186SRichard Henderson 23067ff2186SRichard Henderson /* A subroutine of page_set_flags: insert a new node for [start,last]. */ 23167ff2186SRichard Henderson static void pageflags_create(target_ulong start, target_ulong last, int flags) 23267ff2186SRichard Henderson { 23367ff2186SRichard Henderson PageFlagsNode *p = g_new(PageFlagsNode, 1); 23467ff2186SRichard Henderson 23567ff2186SRichard Henderson p->itree.start = start; 23667ff2186SRichard Henderson p->itree.last = last; 23767ff2186SRichard Henderson p->flags = flags; 23867ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 23967ff2186SRichard Henderson } 24067ff2186SRichard Henderson 24167ff2186SRichard Henderson /* A subroutine of page_set_flags: remove everything in [start,last]. */ 24267ff2186SRichard Henderson static bool pageflags_unset(target_ulong start, target_ulong last) 24367ff2186SRichard Henderson { 24467ff2186SRichard Henderson bool inval_tb = false; 24567ff2186SRichard Henderson 24667ff2186SRichard Henderson while (true) { 24767ff2186SRichard Henderson PageFlagsNode *p = pageflags_find(start, last); 24867ff2186SRichard Henderson target_ulong p_last; 24967ff2186SRichard Henderson 25067ff2186SRichard Henderson if (!p) { 25167ff2186SRichard Henderson break; 25267ff2186SRichard Henderson } 25367ff2186SRichard Henderson 25467ff2186SRichard Henderson if (p->flags & PAGE_EXEC) { 25567ff2186SRichard Henderson inval_tb = true; 25667ff2186SRichard Henderson } 25767ff2186SRichard Henderson 25867ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 25967ff2186SRichard Henderson p_last = p->itree.last; 26067ff2186SRichard Henderson 26167ff2186SRichard Henderson if (p->itree.start < start) { 26267ff2186SRichard Henderson /* Truncate the node from the end, or split out the middle. */ 26367ff2186SRichard Henderson p->itree.last = start - 1; 26467ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 26567ff2186SRichard Henderson if (last < p_last) { 26667ff2186SRichard Henderson pageflags_create(last + 1, p_last, p->flags); 26767ff2186SRichard Henderson break; 26867ff2186SRichard Henderson } 26967ff2186SRichard Henderson } else if (p_last <= last) { 27067ff2186SRichard Henderson /* Range completely covers node -- remove it. */ 271177a8cb8SRichard Henderson g_free_rcu(p, rcu); 27267ff2186SRichard Henderson } else { 27367ff2186SRichard Henderson /* Truncate the node from the start. */ 27467ff2186SRichard Henderson p->itree.start = last + 1; 27567ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 27667ff2186SRichard Henderson break; 27767ff2186SRichard Henderson } 27867ff2186SRichard Henderson } 27967ff2186SRichard Henderson 28067ff2186SRichard Henderson return inval_tb; 28167ff2186SRichard Henderson } 28267ff2186SRichard Henderson 28367ff2186SRichard Henderson /* 28467ff2186SRichard Henderson * A subroutine of page_set_flags: nothing overlaps [start,last], 28567ff2186SRichard Henderson * but check adjacent mappings and maybe merge into a single range. 28667ff2186SRichard Henderson */ 28767ff2186SRichard Henderson static void pageflags_create_merge(target_ulong start, target_ulong last, 28867ff2186SRichard Henderson int flags) 28967ff2186SRichard Henderson { 29067ff2186SRichard Henderson PageFlagsNode *next = NULL, *prev = NULL; 29167ff2186SRichard Henderson 29267ff2186SRichard Henderson if (start > 0) { 29367ff2186SRichard Henderson prev = pageflags_find(start - 1, start - 1); 29467ff2186SRichard Henderson if (prev) { 29567ff2186SRichard Henderson if (prev->flags == flags) { 29667ff2186SRichard Henderson interval_tree_remove(&prev->itree, &pageflags_root); 29767ff2186SRichard Henderson } else { 29867ff2186SRichard Henderson prev = NULL; 29967ff2186SRichard Henderson } 30067ff2186SRichard Henderson } 30167ff2186SRichard Henderson } 30267ff2186SRichard Henderson if (last + 1 != 0) { 30367ff2186SRichard Henderson next = pageflags_find(last + 1, last + 1); 30467ff2186SRichard Henderson if (next) { 30567ff2186SRichard Henderson if (next->flags == flags) { 30667ff2186SRichard Henderson interval_tree_remove(&next->itree, &pageflags_root); 30767ff2186SRichard Henderson } else { 30867ff2186SRichard Henderson next = NULL; 30967ff2186SRichard Henderson } 31067ff2186SRichard Henderson } 31167ff2186SRichard Henderson } 31267ff2186SRichard Henderson 31367ff2186SRichard Henderson if (prev) { 31467ff2186SRichard Henderson if (next) { 31567ff2186SRichard Henderson prev->itree.last = next->itree.last; 316177a8cb8SRichard Henderson g_free_rcu(next, rcu); 31767ff2186SRichard Henderson } else { 31867ff2186SRichard Henderson prev->itree.last = last; 31967ff2186SRichard Henderson } 32067ff2186SRichard Henderson interval_tree_insert(&prev->itree, &pageflags_root); 32167ff2186SRichard Henderson } else if (next) { 32267ff2186SRichard Henderson next->itree.start = start; 32367ff2186SRichard Henderson interval_tree_insert(&next->itree, &pageflags_root); 32467ff2186SRichard Henderson } else { 32567ff2186SRichard Henderson pageflags_create(start, last, flags); 32667ff2186SRichard Henderson } 327d941c086SRichard Henderson } 328d941c086SRichard Henderson 329d941c086SRichard Henderson /* 330d941c086SRichard Henderson * Allow the target to decide if PAGE_TARGET_[12] may be reset. 331d941c086SRichard Henderson * By default, they are not kept. 332d941c086SRichard Henderson */ 333d941c086SRichard Henderson #ifndef PAGE_TARGET_STICKY 334d941c086SRichard Henderson #define PAGE_TARGET_STICKY 0 335d941c086SRichard Henderson #endif 336d941c086SRichard Henderson #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) 337d941c086SRichard Henderson 33867ff2186SRichard Henderson /* A subroutine of page_set_flags: add flags to [start,last]. */ 33967ff2186SRichard Henderson static bool pageflags_set_clear(target_ulong start, target_ulong last, 34067ff2186SRichard Henderson int set_flags, int clear_flags) 34167ff2186SRichard Henderson { 34267ff2186SRichard Henderson PageFlagsNode *p; 34367ff2186SRichard Henderson target_ulong p_start, p_last; 34467ff2186SRichard Henderson int p_flags, merge_flags; 34567ff2186SRichard Henderson bool inval_tb = false; 34667ff2186SRichard Henderson 34767ff2186SRichard Henderson restart: 34867ff2186SRichard Henderson p = pageflags_find(start, last); 34967ff2186SRichard Henderson if (!p) { 35067ff2186SRichard Henderson if (set_flags) { 35167ff2186SRichard Henderson pageflags_create_merge(start, last, set_flags); 35267ff2186SRichard Henderson } 35367ff2186SRichard Henderson goto done; 35467ff2186SRichard Henderson } 35567ff2186SRichard Henderson 35667ff2186SRichard Henderson p_start = p->itree.start; 35767ff2186SRichard Henderson p_last = p->itree.last; 35867ff2186SRichard Henderson p_flags = p->flags; 35967ff2186SRichard Henderson /* Using mprotect on a page does not change sticky bits. */ 36067ff2186SRichard Henderson merge_flags = (p_flags & ~clear_flags) | set_flags; 36167ff2186SRichard Henderson 36267ff2186SRichard Henderson /* 36367ff2186SRichard Henderson * Need to flush if an overlapping executable region 36467ff2186SRichard Henderson * removes exec, or adds write. 36567ff2186SRichard Henderson */ 36667ff2186SRichard Henderson if ((p_flags & PAGE_EXEC) 36767ff2186SRichard Henderson && (!(merge_flags & PAGE_EXEC) 36867ff2186SRichard Henderson || (merge_flags & ~p_flags & PAGE_WRITE))) { 36967ff2186SRichard Henderson inval_tb = true; 37067ff2186SRichard Henderson } 37167ff2186SRichard Henderson 37267ff2186SRichard Henderson /* 37367ff2186SRichard Henderson * If there is an exact range match, update and return without 37467ff2186SRichard Henderson * attempting to merge with adjacent regions. 37567ff2186SRichard Henderson */ 37667ff2186SRichard Henderson if (start == p_start && last == p_last) { 37767ff2186SRichard Henderson if (merge_flags) { 37867ff2186SRichard Henderson p->flags = merge_flags; 37967ff2186SRichard Henderson } else { 38067ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 381177a8cb8SRichard Henderson g_free_rcu(p, rcu); 38267ff2186SRichard Henderson } 38367ff2186SRichard Henderson goto done; 38467ff2186SRichard Henderson } 38567ff2186SRichard Henderson 38667ff2186SRichard Henderson /* 38767ff2186SRichard Henderson * If sticky bits affect the original mapping, then we must be more 38867ff2186SRichard Henderson * careful about the existing intervals and the separate flags. 38967ff2186SRichard Henderson */ 39067ff2186SRichard Henderson if (set_flags != merge_flags) { 39167ff2186SRichard Henderson if (p_start < start) { 39267ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 39367ff2186SRichard Henderson p->itree.last = start - 1; 39467ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 39567ff2186SRichard Henderson 39667ff2186SRichard Henderson if (last < p_last) { 39767ff2186SRichard Henderson if (merge_flags) { 39867ff2186SRichard Henderson pageflags_create(start, last, merge_flags); 39967ff2186SRichard Henderson } 40067ff2186SRichard Henderson pageflags_create(last + 1, p_last, p_flags); 40167ff2186SRichard Henderson } else { 40267ff2186SRichard Henderson if (merge_flags) { 40367ff2186SRichard Henderson pageflags_create(start, p_last, merge_flags); 40467ff2186SRichard Henderson } 40567ff2186SRichard Henderson if (p_last < last) { 40667ff2186SRichard Henderson start = p_last + 1; 40767ff2186SRichard Henderson goto restart; 40867ff2186SRichard Henderson } 40967ff2186SRichard Henderson } 41067ff2186SRichard Henderson } else { 41167ff2186SRichard Henderson if (start < p_start && set_flags) { 41267ff2186SRichard Henderson pageflags_create(start, p_start - 1, set_flags); 41367ff2186SRichard Henderson } 41467ff2186SRichard Henderson if (last < p_last) { 41567ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 41667ff2186SRichard Henderson p->itree.start = last + 1; 41767ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 41867ff2186SRichard Henderson if (merge_flags) { 41967ff2186SRichard Henderson pageflags_create(start, last, merge_flags); 42067ff2186SRichard Henderson } 42167ff2186SRichard Henderson } else { 42267ff2186SRichard Henderson if (merge_flags) { 42367ff2186SRichard Henderson p->flags = merge_flags; 42467ff2186SRichard Henderson } else { 42567ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 426177a8cb8SRichard Henderson g_free_rcu(p, rcu); 42767ff2186SRichard Henderson } 42867ff2186SRichard Henderson if (p_last < last) { 42967ff2186SRichard Henderson start = p_last + 1; 43067ff2186SRichard Henderson goto restart; 43167ff2186SRichard Henderson } 43267ff2186SRichard Henderson } 43367ff2186SRichard Henderson } 43467ff2186SRichard Henderson goto done; 43567ff2186SRichard Henderson } 43667ff2186SRichard Henderson 43767ff2186SRichard Henderson /* If flags are not changing for this range, incorporate it. */ 43867ff2186SRichard Henderson if (set_flags == p_flags) { 43967ff2186SRichard Henderson if (start < p_start) { 44067ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 44167ff2186SRichard Henderson p->itree.start = start; 44267ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 44367ff2186SRichard Henderson } 44467ff2186SRichard Henderson if (p_last < last) { 44567ff2186SRichard Henderson start = p_last + 1; 44667ff2186SRichard Henderson goto restart; 44767ff2186SRichard Henderson } 44867ff2186SRichard Henderson goto done; 44967ff2186SRichard Henderson } 45067ff2186SRichard Henderson 45167ff2186SRichard Henderson /* Maybe split out head and/or tail ranges with the original flags. */ 45267ff2186SRichard Henderson interval_tree_remove(&p->itree, &pageflags_root); 45367ff2186SRichard Henderson if (p_start < start) { 45467ff2186SRichard Henderson p->itree.last = start - 1; 45567ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 45667ff2186SRichard Henderson 45767ff2186SRichard Henderson if (p_last < last) { 45867ff2186SRichard Henderson goto restart; 45967ff2186SRichard Henderson } 46067ff2186SRichard Henderson if (last < p_last) { 46167ff2186SRichard Henderson pageflags_create(last + 1, p_last, p_flags); 46267ff2186SRichard Henderson } 46367ff2186SRichard Henderson } else if (last < p_last) { 46467ff2186SRichard Henderson p->itree.start = last + 1; 46567ff2186SRichard Henderson interval_tree_insert(&p->itree, &pageflags_root); 46667ff2186SRichard Henderson } else { 467177a8cb8SRichard Henderson g_free_rcu(p, rcu); 46867ff2186SRichard Henderson goto restart; 46967ff2186SRichard Henderson } 47067ff2186SRichard Henderson if (set_flags) { 47167ff2186SRichard Henderson pageflags_create(start, last, set_flags); 47267ff2186SRichard Henderson } 47367ff2186SRichard Henderson 47467ff2186SRichard Henderson done: 47567ff2186SRichard Henderson return inval_tb; 47667ff2186SRichard Henderson } 47767ff2186SRichard Henderson 478d941c086SRichard Henderson /* 479d941c086SRichard Henderson * Modify the flags of a page and invalidate the code if necessary. 480d941c086SRichard Henderson * The flag PAGE_WRITE_ORG is positioned automatically depending 481d941c086SRichard Henderson * on PAGE_WRITE. The mmap_lock should already be held. 482d941c086SRichard Henderson */ 483*49840a4aSRichard Henderson void page_set_flags(target_ulong start, target_ulong last, int flags) 484d941c086SRichard Henderson { 48567ff2186SRichard Henderson bool reset = false; 48667ff2186SRichard Henderson bool inval_tb = false; 487d941c086SRichard Henderson 488d941c086SRichard Henderson /* This function should never be called with addresses outside the 489d941c086SRichard Henderson guest address space. If this assert fires, it probably indicates 490d941c086SRichard Henderson a missing call to h2g_valid. */ 491*49840a4aSRichard Henderson assert(start <= last); 492*49840a4aSRichard Henderson assert(last <= GUEST_ADDR_MAX); 493d941c086SRichard Henderson /* Only set PAGE_ANON with new mappings. */ 494d941c086SRichard Henderson assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); 495d941c086SRichard Henderson assert_memory_lock(); 496d941c086SRichard Henderson 497*49840a4aSRichard Henderson start &= TARGET_PAGE_MASK; 498*49840a4aSRichard Henderson last |= ~TARGET_PAGE_MASK; 499d941c086SRichard Henderson 50067ff2186SRichard Henderson if (!(flags & PAGE_VALID)) { 50167ff2186SRichard Henderson flags = 0; 50267ff2186SRichard Henderson } else { 50367ff2186SRichard Henderson reset = flags & PAGE_RESET; 50467ff2186SRichard Henderson flags &= ~PAGE_RESET; 505d941c086SRichard Henderson if (flags & PAGE_WRITE) { 506d941c086SRichard Henderson flags |= PAGE_WRITE_ORG; 507d941c086SRichard Henderson } 50867ff2186SRichard Henderson } 50967ff2186SRichard Henderson 51067ff2186SRichard Henderson if (!flags || reset) { 511*49840a4aSRichard Henderson page_reset_target_data(start, last + 1); 51267ff2186SRichard Henderson inval_tb |= pageflags_unset(start, last); 513d941c086SRichard Henderson } 51467ff2186SRichard Henderson if (flags) { 51567ff2186SRichard Henderson inval_tb |= pageflags_set_clear(start, last, flags, 51667ff2186SRichard Henderson ~(reset ? 0 : PAGE_STICKY)); 517d941c086SRichard Henderson } 518d941c086SRichard Henderson if (inval_tb) { 519*49840a4aSRichard Henderson tb_invalidate_phys_range(start, last + 1); 520d941c086SRichard Henderson } 521d941c086SRichard Henderson } 522d941c086SRichard Henderson 523d941c086SRichard Henderson int page_check_range(target_ulong start, target_ulong len, int flags) 524d941c086SRichard Henderson { 52567ff2186SRichard Henderson target_ulong last; 526e630c012SRichard Henderson int locked; /* tri-state: =0: unlocked, +1: global, -1: local */ 527e630c012SRichard Henderson int ret; 528d941c086SRichard Henderson 529d941c086SRichard Henderson if (len == 0) { 53067ff2186SRichard Henderson return 0; /* trivial length */ 531d941c086SRichard Henderson } 532d941c086SRichard Henderson 53367ff2186SRichard Henderson last = start + len - 1; 53467ff2186SRichard Henderson if (last < start) { 53567ff2186SRichard Henderson return -1; /* wrap around */ 53667ff2186SRichard Henderson } 537d941c086SRichard Henderson 538e630c012SRichard Henderson locked = have_mmap_lock(); 53967ff2186SRichard Henderson while (true) { 54067ff2186SRichard Henderson PageFlagsNode *p = pageflags_find(start, last); 54167ff2186SRichard Henderson int missing; 54267ff2186SRichard Henderson 543d941c086SRichard Henderson if (!p) { 544e630c012SRichard Henderson if (!locked) { 545e630c012SRichard Henderson /* 546e630c012SRichard Henderson * Lockless lookups have false negatives. 547e630c012SRichard Henderson * Retry with the lock held. 548e630c012SRichard Henderson */ 549e630c012SRichard Henderson mmap_lock(); 550e630c012SRichard Henderson locked = -1; 551e630c012SRichard Henderson p = pageflags_find(start, last); 552e630c012SRichard Henderson } 553e630c012SRichard Henderson if (!p) { 554e630c012SRichard Henderson ret = -1; /* entire region invalid */ 555e630c012SRichard Henderson break; 556e630c012SRichard Henderson } 557d941c086SRichard Henderson } 55867ff2186SRichard Henderson if (start < p->itree.start) { 559e630c012SRichard Henderson ret = -1; /* initial bytes invalid */ 560e630c012SRichard Henderson break; 561d941c086SRichard Henderson } 562d941c086SRichard Henderson 56367ff2186SRichard Henderson missing = flags & ~p->flags; 56467ff2186SRichard Henderson if (missing & PAGE_READ) { 565e630c012SRichard Henderson ret = -1; /* page not readable */ 566e630c012SRichard Henderson break; 567d941c086SRichard Henderson } 56867ff2186SRichard Henderson if (missing & PAGE_WRITE) { 569d941c086SRichard Henderson if (!(p->flags & PAGE_WRITE_ORG)) { 570e630c012SRichard Henderson ret = -1; /* page not writable */ 571e630c012SRichard Henderson break; 57267ff2186SRichard Henderson } 57367ff2186SRichard Henderson /* Asking about writable, but has been protected: undo. */ 57467ff2186SRichard Henderson if (!page_unprotect(start, 0)) { 575e630c012SRichard Henderson ret = -1; 576e630c012SRichard Henderson break; 577d941c086SRichard Henderson } 57867ff2186SRichard Henderson /* TODO: page_unprotect should take a range, not a single page. */ 57967ff2186SRichard Henderson if (last - start < TARGET_PAGE_SIZE) { 580e630c012SRichard Henderson ret = 0; /* ok */ 581e630c012SRichard Henderson break; 582d941c086SRichard Henderson } 58367ff2186SRichard Henderson start += TARGET_PAGE_SIZE; 584d941c086SRichard Henderson continue; 585d941c086SRichard Henderson } 58667ff2186SRichard Henderson 58767ff2186SRichard Henderson if (last <= p->itree.last) { 588e630c012SRichard Henderson ret = 0; /* ok */ 589e630c012SRichard Henderson break; 590d941c086SRichard Henderson } 59167ff2186SRichard Henderson start = p->itree.last + 1; 59267ff2186SRichard Henderson } 593e630c012SRichard Henderson 594e630c012SRichard Henderson /* Release the lock if acquired locally. */ 595e630c012SRichard Henderson if (locked < 0) { 596e630c012SRichard Henderson mmap_unlock(); 597e630c012SRichard Henderson } 598e630c012SRichard Henderson return ret; 59967ff2186SRichard Henderson } 60067ff2186SRichard Henderson 60167ff2186SRichard Henderson void page_protect(tb_page_addr_t address) 60267ff2186SRichard Henderson { 60367ff2186SRichard Henderson PageFlagsNode *p; 60467ff2186SRichard Henderson target_ulong start, last; 60567ff2186SRichard Henderson int prot; 60667ff2186SRichard Henderson 60767ff2186SRichard Henderson assert_memory_lock(); 60867ff2186SRichard Henderson 60967ff2186SRichard Henderson if (qemu_host_page_size <= TARGET_PAGE_SIZE) { 61067ff2186SRichard Henderson start = address & TARGET_PAGE_MASK; 61167ff2186SRichard Henderson last = start + TARGET_PAGE_SIZE - 1; 61267ff2186SRichard Henderson } else { 61367ff2186SRichard Henderson start = address & qemu_host_page_mask; 61467ff2186SRichard Henderson last = start + qemu_host_page_size - 1; 61567ff2186SRichard Henderson } 61667ff2186SRichard Henderson 61767ff2186SRichard Henderson p = pageflags_find(start, last); 61867ff2186SRichard Henderson if (!p) { 61967ff2186SRichard Henderson return; 62067ff2186SRichard Henderson } 62167ff2186SRichard Henderson prot = p->flags; 62267ff2186SRichard Henderson 62367ff2186SRichard Henderson if (unlikely(p->itree.last < last)) { 62467ff2186SRichard Henderson /* More than one protection region covers the one host page. */ 62567ff2186SRichard Henderson assert(TARGET_PAGE_SIZE < qemu_host_page_size); 62667ff2186SRichard Henderson while ((p = pageflags_next(p, start, last)) != NULL) { 62767ff2186SRichard Henderson prot |= p->flags; 62867ff2186SRichard Henderson } 62967ff2186SRichard Henderson } 63067ff2186SRichard Henderson 63167ff2186SRichard Henderson if (prot & PAGE_WRITE) { 63267ff2186SRichard Henderson pageflags_set_clear(start, last, 0, PAGE_WRITE); 63367ff2186SRichard Henderson mprotect(g2h_untagged(start), qemu_host_page_size, 63467ff2186SRichard Henderson prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE); 635d941c086SRichard Henderson } 636d941c086SRichard Henderson } 637d941c086SRichard Henderson 638d941c086SRichard Henderson /* 639d941c086SRichard Henderson * Called from signal handler: invalidate the code and unprotect the 640d941c086SRichard Henderson * page. Return 0 if the fault was not handled, 1 if it was handled, 641d941c086SRichard Henderson * and 2 if it was handled but the caller must cause the TB to be 642d941c086SRichard Henderson * immediately exited. (We can only return 2 if the 'pc' argument is 643d941c086SRichard Henderson * non-zero.) 644d941c086SRichard Henderson */ 645d941c086SRichard Henderson int page_unprotect(target_ulong address, uintptr_t pc) 646d941c086SRichard Henderson { 64767ff2186SRichard Henderson PageFlagsNode *p; 648d941c086SRichard Henderson bool current_tb_invalidated; 649d941c086SRichard Henderson 650d941c086SRichard Henderson /* 651d941c086SRichard Henderson * Technically this isn't safe inside a signal handler. However we 652d941c086SRichard Henderson * know this only ever happens in a synchronous SEGV handler, so in 653d941c086SRichard Henderson * practice it seems to be ok. 654d941c086SRichard Henderson */ 655d941c086SRichard Henderson mmap_lock(); 656d941c086SRichard Henderson 65767ff2186SRichard Henderson p = pageflags_find(address, address); 65867ff2186SRichard Henderson 65967ff2186SRichard Henderson /* If this address was not really writable, nothing to do. */ 66067ff2186SRichard Henderson if (!p || !(p->flags & PAGE_WRITE_ORG)) { 661d941c086SRichard Henderson mmap_unlock(); 662d941c086SRichard Henderson return 0; 663d941c086SRichard Henderson } 664d941c086SRichard Henderson 665d941c086SRichard Henderson current_tb_invalidated = false; 666d941c086SRichard Henderson if (p->flags & PAGE_WRITE) { 667d941c086SRichard Henderson /* 668d941c086SRichard Henderson * If the page is actually marked WRITE then assume this is because 669d941c086SRichard Henderson * this thread raced with another one which got here first and 670d941c086SRichard Henderson * set the page to PAGE_WRITE and did the TB invalidate for us. 671d941c086SRichard Henderson */ 672d941c086SRichard Henderson #ifdef TARGET_HAS_PRECISE_SMC 673d941c086SRichard Henderson TranslationBlock *current_tb = tcg_tb_lookup(pc); 674d941c086SRichard Henderson if (current_tb) { 675d941c086SRichard Henderson current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; 676d941c086SRichard Henderson } 677d941c086SRichard Henderson #endif 678d941c086SRichard Henderson } else { 67967ff2186SRichard Henderson target_ulong start, len, i; 68067ff2186SRichard Henderson int prot; 681d941c086SRichard Henderson 68267ff2186SRichard Henderson if (qemu_host_page_size <= TARGET_PAGE_SIZE) { 68367ff2186SRichard Henderson start = address & TARGET_PAGE_MASK; 68467ff2186SRichard Henderson len = TARGET_PAGE_SIZE; 68567ff2186SRichard Henderson prot = p->flags | PAGE_WRITE; 68667ff2186SRichard Henderson pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0); 68767ff2186SRichard Henderson current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc); 68867ff2186SRichard Henderson } else { 68967ff2186SRichard Henderson start = address & qemu_host_page_mask; 69067ff2186SRichard Henderson len = qemu_host_page_size; 691d941c086SRichard Henderson prot = 0; 692d941c086SRichard Henderson 69367ff2186SRichard Henderson for (i = 0; i < len; i += TARGET_PAGE_SIZE) { 69467ff2186SRichard Henderson target_ulong addr = start + i; 69567ff2186SRichard Henderson 69667ff2186SRichard Henderson p = pageflags_find(addr, addr); 69767ff2186SRichard Henderson if (p) { 69867ff2186SRichard Henderson prot |= p->flags; 69967ff2186SRichard Henderson if (p->flags & PAGE_WRITE_ORG) { 70067ff2186SRichard Henderson prot |= PAGE_WRITE; 70167ff2186SRichard Henderson pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1, 70267ff2186SRichard Henderson PAGE_WRITE, 0); 70367ff2186SRichard Henderson } 70467ff2186SRichard Henderson } 705d941c086SRichard Henderson /* 706d941c086SRichard Henderson * Since the content will be modified, we must invalidate 707d941c086SRichard Henderson * the corresponding translated code. 708d941c086SRichard Henderson */ 709d941c086SRichard Henderson current_tb_invalidated |= 710d941c086SRichard Henderson tb_invalidate_phys_page_unwind(addr, pc); 711d941c086SRichard Henderson } 71267ff2186SRichard Henderson } 71367ff2186SRichard Henderson if (prot & PAGE_EXEC) { 71467ff2186SRichard Henderson prot = (prot & ~PAGE_EXEC) | PAGE_READ; 71567ff2186SRichard Henderson } 71667ff2186SRichard Henderson mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS); 717d941c086SRichard Henderson } 718d941c086SRichard Henderson mmap_unlock(); 71967ff2186SRichard Henderson 720d941c086SRichard Henderson /* If current TB was invalidated return to main loop */ 721d941c086SRichard Henderson return current_tb_invalidated ? 2 : 1; 722d941c086SRichard Henderson } 723d941c086SRichard Henderson 724069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 725069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 726069cfe77SRichard Henderson bool nonfault, uintptr_t ra) 72759e96ac6SDavid Hildenbrand { 72872d2bbf9SRichard Henderson int acc_flag; 72972d2bbf9SRichard Henderson bool maperr; 730c25c283dSDavid Hildenbrand 731c25c283dSDavid Hildenbrand switch (access_type) { 732c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 73372d2bbf9SRichard Henderson acc_flag = PAGE_WRITE_ORG; 734c25c283dSDavid Hildenbrand break; 735c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 73672d2bbf9SRichard Henderson acc_flag = PAGE_READ; 737c25c283dSDavid Hildenbrand break; 738c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 73972d2bbf9SRichard Henderson acc_flag = PAGE_EXEC; 740c25c283dSDavid Hildenbrand break; 741c25c283dSDavid Hildenbrand default: 742c25c283dSDavid Hildenbrand g_assert_not_reached(); 743c25c283dSDavid Hildenbrand } 744c25c283dSDavid Hildenbrand 74572d2bbf9SRichard Henderson if (guest_addr_valid_untagged(addr)) { 74672d2bbf9SRichard Henderson int page_flags = page_get_flags(addr); 74772d2bbf9SRichard Henderson if (page_flags & acc_flag) { 74872d2bbf9SRichard Henderson return 0; /* success */ 74972d2bbf9SRichard Henderson } 75072d2bbf9SRichard Henderson maperr = !(page_flags & PAGE_VALID); 75172d2bbf9SRichard Henderson } else { 75272d2bbf9SRichard Henderson maperr = true; 75372d2bbf9SRichard Henderson } 75472d2bbf9SRichard Henderson 755069cfe77SRichard Henderson if (nonfault) { 756069cfe77SRichard Henderson return TLB_INVALID_MASK; 75759e96ac6SDavid Hildenbrand } 75872d2bbf9SRichard Henderson 75972d2bbf9SRichard Henderson cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); 760069cfe77SRichard Henderson } 761069cfe77SRichard Henderson 7621770b2f2SDaniel Henrique Barboza int probe_access_flags(CPUArchState *env, target_ulong addr, int size, 763069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 764069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t ra) 765069cfe77SRichard Henderson { 766069cfe77SRichard Henderson int flags; 767069cfe77SRichard Henderson 7681770b2f2SDaniel Henrique Barboza g_assert(-(addr | TARGET_PAGE_MASK) >= size); 7691770b2f2SDaniel Henrique Barboza flags = probe_access_internal(env, addr, size, access_type, nonfault, ra); 7703e8f1628SRichard Henderson *phost = flags ? NULL : g2h(env_cpu(env), addr); 771069cfe77SRichard Henderson return flags; 772069cfe77SRichard Henderson } 773069cfe77SRichard Henderson 774069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 775069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t ra) 776069cfe77SRichard Henderson { 777069cfe77SRichard Henderson int flags; 778069cfe77SRichard Henderson 779069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 780069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, false, ra); 781069cfe77SRichard Henderson g_assert(flags == 0); 782fef39ccdSDavid Hildenbrand 7833e8f1628SRichard Henderson return size ? g2h(env_cpu(env), addr) : NULL; 78459e96ac6SDavid Hildenbrand } 78559e96ac6SDavid Hildenbrand 786cdf71308SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 787cdf71308SRichard Henderson void **hostp) 788cdf71308SRichard Henderson { 789cdf71308SRichard Henderson int flags; 790cdf71308SRichard Henderson 791cdf71308SRichard Henderson flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0); 792cdf71308SRichard Henderson g_assert(flags == 0); 793cdf71308SRichard Henderson 794cdf71308SRichard Henderson if (hostp) { 795cdf71308SRichard Henderson *hostp = g2h_untagged(addr); 796cdf71308SRichard Henderson } 797cdf71308SRichard Henderson return addr; 798cdf71308SRichard Henderson } 799cdf71308SRichard Henderson 800f88f3ac9SRichard Henderson #ifdef TARGET_PAGE_DATA_SIZE 801f88f3ac9SRichard Henderson /* 802f88f3ac9SRichard Henderson * Allocate chunks of target data together. For the only current user, 803f88f3ac9SRichard Henderson * if we allocate one hunk per page, we have overhead of 40/128 or 40%. 804f88f3ac9SRichard Henderson * Therefore, allocate memory for 64 pages at a time for overhead < 1%. 805f88f3ac9SRichard Henderson */ 806f88f3ac9SRichard Henderson #define TPD_PAGES 64 807f88f3ac9SRichard Henderson #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES) 808f88f3ac9SRichard Henderson 809f88f3ac9SRichard Henderson typedef struct TargetPageDataNode { 810177a8cb8SRichard Henderson struct rcu_head rcu; 811f88f3ac9SRichard Henderson IntervalTreeNode itree; 812f88f3ac9SRichard Henderson char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned)); 813f88f3ac9SRichard Henderson } TargetPageDataNode; 814f88f3ac9SRichard Henderson 815f88f3ac9SRichard Henderson static IntervalTreeRoot targetdata_root; 816f88f3ac9SRichard Henderson 8170fe61084SRichard Henderson void page_reset_target_data(target_ulong start, target_ulong end) 8180fe61084SRichard Henderson { 819f88f3ac9SRichard Henderson IntervalTreeNode *n, *next; 820f88f3ac9SRichard Henderson target_ulong last; 8210fe61084SRichard Henderson 8220fe61084SRichard Henderson assert_memory_lock(); 8230fe61084SRichard Henderson 8240fe61084SRichard Henderson start = start & TARGET_PAGE_MASK; 825f88f3ac9SRichard Henderson last = TARGET_PAGE_ALIGN(end) - 1; 8260fe61084SRichard Henderson 827f88f3ac9SRichard Henderson for (n = interval_tree_iter_first(&targetdata_root, start, last), 828f88f3ac9SRichard Henderson next = n ? interval_tree_iter_next(n, start, last) : NULL; 829f88f3ac9SRichard Henderson n != NULL; 830f88f3ac9SRichard Henderson n = next, 831f88f3ac9SRichard Henderson next = next ? interval_tree_iter_next(n, start, last) : NULL) { 832f88f3ac9SRichard Henderson target_ulong n_start, n_last, p_ofs, p_len; 833177a8cb8SRichard Henderson TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree); 8340fe61084SRichard Henderson 835f88f3ac9SRichard Henderson if (n->start >= start && n->last <= last) { 836f88f3ac9SRichard Henderson interval_tree_remove(n, &targetdata_root); 837177a8cb8SRichard Henderson g_free_rcu(t, rcu); 838f88f3ac9SRichard Henderson continue; 8390fe61084SRichard Henderson } 8400fe61084SRichard Henderson 841f88f3ac9SRichard Henderson if (n->start < start) { 842f88f3ac9SRichard Henderson n_start = start; 843f88f3ac9SRichard Henderson p_ofs = (start - n->start) >> TARGET_PAGE_BITS; 844f88f3ac9SRichard Henderson } else { 845f88f3ac9SRichard Henderson n_start = n->start; 846f88f3ac9SRichard Henderson p_ofs = 0; 847f88f3ac9SRichard Henderson } 848f88f3ac9SRichard Henderson n_last = MIN(last, n->last); 849f88f3ac9SRichard Henderson p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS; 850f88f3ac9SRichard Henderson 851f88f3ac9SRichard Henderson memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE); 852f88f3ac9SRichard Henderson } 853f88f3ac9SRichard Henderson } 854f88f3ac9SRichard Henderson 8550fe61084SRichard Henderson void *page_get_target_data(target_ulong address) 8560fe61084SRichard Henderson { 857f88f3ac9SRichard Henderson IntervalTreeNode *n; 858f88f3ac9SRichard Henderson TargetPageDataNode *t; 859f88f3ac9SRichard Henderson target_ulong page, region; 8600fe61084SRichard Henderson 861f88f3ac9SRichard Henderson page = address & TARGET_PAGE_MASK; 862f88f3ac9SRichard Henderson region = address & TBD_MASK; 863f88f3ac9SRichard Henderson 864f88f3ac9SRichard Henderson n = interval_tree_iter_first(&targetdata_root, page, page); 865f88f3ac9SRichard Henderson if (!n) { 866f88f3ac9SRichard Henderson /* 867f88f3ac9SRichard Henderson * See util/interval-tree.c re lockless lookups: no false positives 868f88f3ac9SRichard Henderson * but there are false negatives. If we find nothing, retry with 869f88f3ac9SRichard Henderson * the mmap lock acquired. We also need the lock for the 870f88f3ac9SRichard Henderson * allocation + insert. 871f88f3ac9SRichard Henderson */ 872f88f3ac9SRichard Henderson mmap_lock(); 873f88f3ac9SRichard Henderson n = interval_tree_iter_first(&targetdata_root, page, page); 874f88f3ac9SRichard Henderson if (!n) { 875f88f3ac9SRichard Henderson t = g_new0(TargetPageDataNode, 1); 876f88f3ac9SRichard Henderson n = &t->itree; 877f88f3ac9SRichard Henderson n->start = region; 878f88f3ac9SRichard Henderson n->last = region | ~TBD_MASK; 879f88f3ac9SRichard Henderson interval_tree_insert(n, &targetdata_root); 8800fe61084SRichard Henderson } 881f88f3ac9SRichard Henderson mmap_unlock(); 8820fe61084SRichard Henderson } 883f88f3ac9SRichard Henderson 884f88f3ac9SRichard Henderson t = container_of(n, TargetPageDataNode, itree); 885f88f3ac9SRichard Henderson return t->data[(page - region) >> TARGET_PAGE_BITS]; 886f88f3ac9SRichard Henderson } 887f88f3ac9SRichard Henderson #else 888f88f3ac9SRichard Henderson void page_reset_target_data(target_ulong start, target_ulong end) { } 889f88f3ac9SRichard Henderson #endif /* TARGET_PAGE_DATA_SIZE */ 8900fe61084SRichard Henderson 891a411d296SPhilippe Mathieu-Daudé /* The softmmu versions of these helpers are in cputlb.c. */ 892a411d296SPhilippe Mathieu-Daudé 893f83bcecbSRichard Henderson /* 894f83bcecbSRichard Henderson * Verify that we have passed the correct MemOp to the correct function. 895f83bcecbSRichard Henderson * 896f83bcecbSRichard Henderson * We could present one function to target code, and dispatch based on 897f83bcecbSRichard Henderson * the MemOp, but so far we have worked hard to avoid an indirect function 898f83bcecbSRichard Henderson * call along the memory path. 899f83bcecbSRichard Henderson */ 900f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected) 901ed4cfbcdSRichard Henderson { 902f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG 903f83bcecbSRichard Henderson MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 904f83bcecbSRichard Henderson assert(have == expected); 905f83bcecbSRichard Henderson #endif 906f83bcecbSRichard Henderson } 907ed4cfbcdSRichard Henderson 90837e891e3SRichard Henderson void helper_unaligned_ld(CPUArchState *env, target_ulong addr) 90937e891e3SRichard Henderson { 91037e891e3SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); 91137e891e3SRichard Henderson } 91237e891e3SRichard Henderson 91337e891e3SRichard Henderson void helper_unaligned_st(CPUArchState *env, target_ulong addr) 91437e891e3SRichard Henderson { 91537e891e3SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); 91637e891e3SRichard Henderson } 91737e891e3SRichard Henderson 918f83bcecbSRichard Henderson static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, 919f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra, MMUAccessType type) 920f83bcecbSRichard Henderson { 9219395cd0aSRichard Henderson MemOp mop = get_memop(oi); 9229395cd0aSRichard Henderson int a_bits = get_alignment_bits(mop); 923f83bcecbSRichard Henderson void *ret; 924f83bcecbSRichard Henderson 9259395cd0aSRichard Henderson /* Enforce guest required alignment. */ 9269395cd0aSRichard Henderson if (unlikely(addr & ((1 << a_bits) - 1))) { 9279395cd0aSRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); 9289395cd0aSRichard Henderson } 929f83bcecbSRichard Henderson 930f83bcecbSRichard Henderson ret = g2h(env_cpu(env), addr); 931f83bcecbSRichard Henderson set_helper_retaddr(ra); 932ed4cfbcdSRichard Henderson return ret; 933ed4cfbcdSRichard Henderson } 934ed4cfbcdSRichard Henderson 935f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, 936f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 937ed4cfbcdSRichard Henderson { 938f83bcecbSRichard Henderson void *haddr; 939f83bcecbSRichard Henderson uint8_t ret; 940ed4cfbcdSRichard Henderson 941f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 942f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 943f83bcecbSRichard Henderson ret = ldub_p(haddr); 944f83bcecbSRichard Henderson clear_helper_retaddr(); 945f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 946ed4cfbcdSRichard Henderson return ret; 947ed4cfbcdSRichard Henderson } 948ed4cfbcdSRichard Henderson 949f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 950f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 951ed4cfbcdSRichard Henderson { 952f83bcecbSRichard Henderson void *haddr; 953f83bcecbSRichard Henderson uint16_t ret; 954ed4cfbcdSRichard Henderson 955f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 956f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 957f83bcecbSRichard Henderson ret = lduw_be_p(haddr); 958f83bcecbSRichard Henderson clear_helper_retaddr(); 959f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 960ed4cfbcdSRichard Henderson return ret; 961ed4cfbcdSRichard Henderson } 962ed4cfbcdSRichard Henderson 963f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 964f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 965ed4cfbcdSRichard Henderson { 966f83bcecbSRichard Henderson void *haddr; 967f83bcecbSRichard Henderson uint32_t ret; 968f83bcecbSRichard Henderson 969f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 970f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 971f83bcecbSRichard Henderson ret = ldl_be_p(haddr); 972f83bcecbSRichard Henderson clear_helper_retaddr(); 973f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 974f83bcecbSRichard Henderson return ret; 975f83bcecbSRichard Henderson } 976f83bcecbSRichard Henderson 977f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 978f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 979f83bcecbSRichard Henderson { 980f83bcecbSRichard Henderson void *haddr; 981ed4cfbcdSRichard Henderson uint64_t ret; 982ed4cfbcdSRichard Henderson 983fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 984f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 985f83bcecbSRichard Henderson ret = ldq_be_p(haddr); 986f83bcecbSRichard Henderson clear_helper_retaddr(); 987f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 988b9e60257SRichard Henderson return ret; 989b9e60257SRichard Henderson } 990b9e60257SRichard Henderson 991f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 992f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 993b9e60257SRichard Henderson { 994f83bcecbSRichard Henderson void *haddr; 995f83bcecbSRichard Henderson uint16_t ret; 996f83bcecbSRichard Henderson 997f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 998f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 999f83bcecbSRichard Henderson ret = lduw_le_p(haddr); 1000f83bcecbSRichard Henderson clear_helper_retaddr(); 1001f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1002f83bcecbSRichard Henderson return ret; 1003f83bcecbSRichard Henderson } 1004f83bcecbSRichard Henderson 1005f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 1006f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1007f83bcecbSRichard Henderson { 1008f83bcecbSRichard Henderson void *haddr; 1009b9e60257SRichard Henderson uint32_t ret; 1010b9e60257SRichard Henderson 1011f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 1012f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1013f83bcecbSRichard Henderson ret = ldl_le_p(haddr); 1014f83bcecbSRichard Henderson clear_helper_retaddr(); 1015f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1016b9e60257SRichard Henderson return ret; 1017b9e60257SRichard Henderson } 1018b9e60257SRichard Henderson 1019f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 1020f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1021b9e60257SRichard Henderson { 1022f83bcecbSRichard Henderson void *haddr; 1023b9e60257SRichard Henderson uint64_t ret; 1024b9e60257SRichard Henderson 1025fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 1026f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1027f83bcecbSRichard Henderson ret = ldq_le_p(haddr); 1028f83bcecbSRichard Henderson clear_helper_retaddr(); 1029f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1030ed4cfbcdSRichard Henderson return ret; 1031ed4cfbcdSRichard Henderson } 1032ed4cfbcdSRichard Henderson 1033cb48f365SRichard Henderson Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, 1034cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 1035cb48f365SRichard Henderson { 1036cb48f365SRichard Henderson void *haddr; 1037cb48f365SRichard Henderson Int128 ret; 1038cb48f365SRichard Henderson 1039cb48f365SRichard Henderson validate_memop(oi, MO_128 | MO_BE); 1040cb48f365SRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1041cb48f365SRichard Henderson memcpy(&ret, haddr, 16); 1042cb48f365SRichard Henderson clear_helper_retaddr(); 1043cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1044cb48f365SRichard Henderson 1045cb48f365SRichard Henderson if (!HOST_BIG_ENDIAN) { 1046cb48f365SRichard Henderson ret = bswap128(ret); 1047cb48f365SRichard Henderson } 1048cb48f365SRichard Henderson return ret; 1049cb48f365SRichard Henderson } 1050cb48f365SRichard Henderson 1051cb48f365SRichard Henderson Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, 1052cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 1053cb48f365SRichard Henderson { 1054cb48f365SRichard Henderson void *haddr; 1055cb48f365SRichard Henderson Int128 ret; 1056cb48f365SRichard Henderson 1057cb48f365SRichard Henderson validate_memop(oi, MO_128 | MO_LE); 1058cb48f365SRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1059cb48f365SRichard Henderson memcpy(&ret, haddr, 16); 1060cb48f365SRichard Henderson clear_helper_retaddr(); 1061cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1062cb48f365SRichard Henderson 1063cb48f365SRichard Henderson if (HOST_BIG_ENDIAN) { 1064cb48f365SRichard Henderson ret = bswap128(ret); 1065cb48f365SRichard Henderson } 1066cb48f365SRichard Henderson return ret; 1067cb48f365SRichard Henderson } 1068cb48f365SRichard Henderson 1069f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 1070f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1071ed4cfbcdSRichard Henderson { 1072f83bcecbSRichard Henderson void *haddr; 1073ed4cfbcdSRichard Henderson 1074f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 1075f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1076f83bcecbSRichard Henderson stb_p(haddr, val); 1077ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1078f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1079ed4cfbcdSRichard Henderson } 1080ed4cfbcdSRichard Henderson 1081f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 1082f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1083ed4cfbcdSRichard Henderson { 1084f83bcecbSRichard Henderson void *haddr; 1085ed4cfbcdSRichard Henderson 1086f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 1087f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1088f83bcecbSRichard Henderson stw_be_p(haddr, val); 1089ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1090f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1091ed4cfbcdSRichard Henderson } 1092ed4cfbcdSRichard Henderson 1093f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 1094f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1095ed4cfbcdSRichard Henderson { 1096f83bcecbSRichard Henderson void *haddr; 1097ed4cfbcdSRichard Henderson 1098f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 1099f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1100f83bcecbSRichard Henderson stl_be_p(haddr, val); 1101ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1102f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1103ed4cfbcdSRichard Henderson } 1104ed4cfbcdSRichard Henderson 1105f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 1106f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1107ed4cfbcdSRichard Henderson { 1108f83bcecbSRichard Henderson void *haddr; 1109ed4cfbcdSRichard Henderson 1110fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 1111f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1112f83bcecbSRichard Henderson stq_be_p(haddr, val); 1113b9e60257SRichard Henderson clear_helper_retaddr(); 1114f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1115b9e60257SRichard Henderson } 1116b9e60257SRichard Henderson 1117f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 1118f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1119b9e60257SRichard Henderson { 1120f83bcecbSRichard Henderson void *haddr; 1121b9e60257SRichard Henderson 1122f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 1123f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1124f83bcecbSRichard Henderson stw_le_p(haddr, val); 1125b9e60257SRichard Henderson clear_helper_retaddr(); 1126f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1127b9e60257SRichard Henderson } 1128b9e60257SRichard Henderson 1129f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 1130f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1131b9e60257SRichard Henderson { 1132f83bcecbSRichard Henderson void *haddr; 1133b9e60257SRichard Henderson 1134f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 1135f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1136f83bcecbSRichard Henderson stl_le_p(haddr, val); 1137b9e60257SRichard Henderson clear_helper_retaddr(); 1138f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1139b9e60257SRichard Henderson } 1140b9e60257SRichard Henderson 1141f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 1142f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 1143b9e60257SRichard Henderson { 1144f83bcecbSRichard Henderson void *haddr; 1145b9e60257SRichard Henderson 1146fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 1147f83bcecbSRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1148f83bcecbSRichard Henderson stq_le_p(haddr, val); 1149ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1150f83bcecbSRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1151ed4cfbcdSRichard Henderson } 1152ed4cfbcdSRichard Henderson 1153cb48f365SRichard Henderson void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, 1154cb48f365SRichard Henderson Int128 val, MemOpIdx oi, uintptr_t ra) 1155cb48f365SRichard Henderson { 1156cb48f365SRichard Henderson void *haddr; 1157cb48f365SRichard Henderson 1158cb48f365SRichard Henderson validate_memop(oi, MO_128 | MO_BE); 1159cb48f365SRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1160cb48f365SRichard Henderson if (!HOST_BIG_ENDIAN) { 1161cb48f365SRichard Henderson val = bswap128(val); 1162cb48f365SRichard Henderson } 1163cb48f365SRichard Henderson memcpy(haddr, &val, 16); 1164cb48f365SRichard Henderson clear_helper_retaddr(); 1165cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1166cb48f365SRichard Henderson } 1167cb48f365SRichard Henderson 1168cb48f365SRichard Henderson void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, 1169cb48f365SRichard Henderson Int128 val, MemOpIdx oi, uintptr_t ra) 1170cb48f365SRichard Henderson { 1171cb48f365SRichard Henderson void *haddr; 1172cb48f365SRichard Henderson 1173cb48f365SRichard Henderson validate_memop(oi, MO_128 | MO_LE); 1174cb48f365SRichard Henderson haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 1175cb48f365SRichard Henderson if (HOST_BIG_ENDIAN) { 1176cb48f365SRichard Henderson val = bswap128(val); 1177cb48f365SRichard Henderson } 1178cb48f365SRichard Henderson memcpy(haddr, &val, 16); 1179cb48f365SRichard Henderson clear_helper_retaddr(); 1180cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1181cb48f365SRichard Henderson } 1182cb48f365SRichard Henderson 1183ed4cfbcdSRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) 1184ed4cfbcdSRichard Henderson { 1185ed4cfbcdSRichard Henderson uint32_t ret; 1186ed4cfbcdSRichard Henderson 1187ed4cfbcdSRichard Henderson set_helper_retaddr(1); 11883e8f1628SRichard Henderson ret = ldub_p(g2h_untagged(ptr)); 1189ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1190ed4cfbcdSRichard Henderson return ret; 1191ed4cfbcdSRichard Henderson } 1192ed4cfbcdSRichard Henderson 1193ed4cfbcdSRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) 1194ed4cfbcdSRichard Henderson { 1195ed4cfbcdSRichard Henderson uint32_t ret; 1196ed4cfbcdSRichard Henderson 1197ed4cfbcdSRichard Henderson set_helper_retaddr(1); 11983e8f1628SRichard Henderson ret = lduw_p(g2h_untagged(ptr)); 1199ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1200ed4cfbcdSRichard Henderson return ret; 1201ed4cfbcdSRichard Henderson } 1202ed4cfbcdSRichard Henderson 1203ed4cfbcdSRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) 1204ed4cfbcdSRichard Henderson { 1205ed4cfbcdSRichard Henderson uint32_t ret; 1206ed4cfbcdSRichard Henderson 1207ed4cfbcdSRichard Henderson set_helper_retaddr(1); 12083e8f1628SRichard Henderson ret = ldl_p(g2h_untagged(ptr)); 1209ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1210ed4cfbcdSRichard Henderson return ret; 1211ed4cfbcdSRichard Henderson } 1212ed4cfbcdSRichard Henderson 1213ed4cfbcdSRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) 1214ed4cfbcdSRichard Henderson { 1215ed4cfbcdSRichard Henderson uint64_t ret; 1216ed4cfbcdSRichard Henderson 1217ed4cfbcdSRichard Henderson set_helper_retaddr(1); 12183e8f1628SRichard Henderson ret = ldq_p(g2h_untagged(ptr)); 1219ed4cfbcdSRichard Henderson clear_helper_retaddr(); 1220ed4cfbcdSRichard Henderson return ret; 1221ed4cfbcdSRichard Henderson } 1222ed4cfbcdSRichard Henderson 1223f83bcecbSRichard Henderson #include "ldst_common.c.inc" 1224f83bcecbSRichard Henderson 1225a754f7f3SRichard Henderson /* 1226a754f7f3SRichard Henderson * Do not allow unaligned operations to proceed. Return the host address. 1227a754f7f3SRichard Henderson * 1228a754f7f3SRichard Henderson * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 1229a754f7f3SRichard Henderson */ 1230a411d296SPhilippe Mathieu-Daudé static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 12319002ffcbSRichard Henderson MemOpIdx oi, int size, int prot, 1232a754f7f3SRichard Henderson uintptr_t retaddr) 1233a411d296SPhilippe Mathieu-Daudé { 1234fce3f474SRichard Henderson MemOp mop = get_memop(oi); 1235fce3f474SRichard Henderson int a_bits = get_alignment_bits(mop); 1236fce3f474SRichard Henderson void *ret; 1237fce3f474SRichard Henderson 1238fce3f474SRichard Henderson /* Enforce guest required alignment. */ 1239fce3f474SRichard Henderson if (unlikely(addr & ((1 << a_bits) - 1))) { 1240fce3f474SRichard Henderson MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; 1241fce3f474SRichard Henderson cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); 1242fce3f474SRichard Henderson } 1243fce3f474SRichard Henderson 1244a411d296SPhilippe Mathieu-Daudé /* Enforce qemu required alignment. */ 1245a411d296SPhilippe Mathieu-Daudé if (unlikely(addr & (size - 1))) { 124629a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1247a411d296SPhilippe Mathieu-Daudé } 1248fce3f474SRichard Henderson 1249fce3f474SRichard Henderson ret = g2h(env_cpu(env), addr); 125008b97f7fSRichard Henderson set_helper_retaddr(retaddr); 125108b97f7fSRichard Henderson return ret; 1252a411d296SPhilippe Mathieu-Daudé } 1253a411d296SPhilippe Mathieu-Daudé 1254be9568b4SRichard Henderson #include "atomic_common.c.inc" 1255be9568b4SRichard Henderson 1256be9568b4SRichard Henderson /* 1257be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 1258be9568b4SRichard Henderson * This makes them callable from other helpers. 1259be9568b4SRichard Henderson */ 1260be9568b4SRichard Henderson 1261be9568b4SRichard Henderson #define ATOMIC_NAME(X) \ 1262be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 126308b97f7fSRichard Henderson #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) 1264a411d296SPhilippe Mathieu-Daudé 1265a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 1 1266a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 1267a411d296SPhilippe Mathieu-Daudé 1268a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 2 1269a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 1270a411d296SPhilippe Mathieu-Daudé 1271a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 4 1272a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 1273a411d296SPhilippe Mathieu-Daudé 1274a411d296SPhilippe Mathieu-Daudé #ifdef CONFIG_ATOMIC64 1275a411d296SPhilippe Mathieu-Daudé #define DATA_SIZE 8 1276a411d296SPhilippe Mathieu-Daudé #include "atomic_template.h" 1277a411d296SPhilippe Mathieu-Daudé #endif 1278a411d296SPhilippe Mathieu-Daudé 1279e6cd4bb5SRichard Henderson #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 1280be9568b4SRichard Henderson #define DATA_SIZE 16 1281be9568b4SRichard Henderson #include "atomic_template.h" 1282be9568b4SRichard Henderson #endif 1283