1 /* 2 * TranslationBlock internal declarations (target specific) 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * SPDX-License-Identifier: LGPL-2.1-or-later 7 */ 8 9 #ifndef ACCEL_TCG_TB_INTERNAL_TARGET_H 10 #define ACCEL_TCG_TB_INTERNAL_TARGET_H 11 12 #include "exec/cpu-all.h" 13 #include "exec/exec-all.h" 14 #include "exec/translation-block.h" 15 16 /* 17 * The true return address will often point to a host insn that is part of 18 * the next translated guest insn. Adjust the address backward to point to 19 * the middle of the call insn. Subtracting one would do the job except for 20 * several compressed mode architectures (arm, mips) which set the low bit 21 * to indicate the compressed mode; subtracting two works around that. It 22 * is also the case that there are no host isas that contain a call insn 23 * smaller than 4 bytes, so we don't worry about special-casing this. 24 */ 25 #define GETPC_ADJ 2 26 27 #ifdef CONFIG_SOFTMMU 28 29 #define CPU_TLB_DYN_MIN_BITS 6 30 #define CPU_TLB_DYN_DEFAULT_BITS 8 31 32 # if HOST_LONG_BITS == 32 33 /* Make sure we do not require a double-word shift for the TLB load */ 34 # define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) 35 # else /* HOST_LONG_BITS == 64 */ 36 /* 37 * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == 38 * 2**34 == 16G of address space. This is roughly what one would expect a 39 * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel 40 * Skylake's Level-2 STLB has 16 1G entries. 41 * Also, make sure we do not size the TLB past the guest's address space. 42 */ 43 # ifdef TARGET_PAGE_BITS_VARY 44 # define CPU_TLB_DYN_MAX_BITS \ 45 MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) 46 # else 47 # define CPU_TLB_DYN_MAX_BITS \ 48 MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) 49 # endif 50 # endif 51 52 #endif /* CONFIG_SOFTMMU */ 53 54 void tb_lock_page0(tb_page_addr_t); 55 56 #ifdef CONFIG_USER_ONLY 57 /* 58 * For user-only, page_protect sets the page read-only. 59 * Since most execution is already on read-only pages, and we'd need to 60 * account for other TBs on the same page, defer undoing any page protection 61 * until we receive the write fault. 62 */ 63 static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1) 64 { 65 tb_lock_page0(p1); 66 } 67 68 static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { } 69 static inline void tb_unlock_pages(TranslationBlock *tb) { } 70 #else 71 void tb_lock_page1(tb_page_addr_t, tb_page_addr_t); 72 void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t); 73 void tb_unlock_pages(TranslationBlock *); 74 #endif 75 76 #ifdef CONFIG_SOFTMMU 77 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, 78 unsigned size, 79 uintptr_t retaddr); 80 #endif /* CONFIG_SOFTMMU */ 81 82 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); 83 84 #endif 85