1 /* 2 * TranslationBlock internal declarations (target specific) 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * SPDX-License-Identifier: LGPL-2.1-or-later 7 */ 8 9 #ifndef ACCEL_TCG_TB_INTERNAL_TARGET_H 10 #define ACCEL_TCG_TB_INTERNAL_TARGET_H 11 12 #include "exec/translation-block.h" 13 14 /* 15 * The true return address will often point to a host insn that is part of 16 * the next translated guest insn. Adjust the address backward to point to 17 * the middle of the call insn. Subtracting one would do the job except for 18 * several compressed mode architectures (arm, mips) which set the low bit 19 * to indicate the compressed mode; subtracting two works around that. It 20 * is also the case that there are no host isas that contain a call insn 21 * smaller than 4 bytes, so we don't worry about special-casing this. 22 */ 23 #define GETPC_ADJ 2 24 25 #ifdef CONFIG_SOFTMMU 26 27 #define CPU_TLB_DYN_MIN_BITS 6 28 #define CPU_TLB_DYN_DEFAULT_BITS 8 29 30 # if HOST_LONG_BITS == 32 31 /* Make sure we do not require a double-word shift for the TLB load */ 32 # define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) 33 # else /* HOST_LONG_BITS == 64 */ 34 /* 35 * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == 36 * 2**34 == 16G of address space. This is roughly what one would expect a 37 * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel 38 * Skylake's Level-2 STLB has 16 1G entries. 39 * Also, make sure we do not size the TLB past the guest's address space. 40 */ 41 # ifdef TARGET_PAGE_BITS_VARY 42 # define CPU_TLB_DYN_MAX_BITS \ 43 MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) 44 # else 45 # define CPU_TLB_DYN_MAX_BITS \ 46 MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) 47 # endif 48 # endif 49 50 #endif /* CONFIG_SOFTMMU */ 51 52 void tb_lock_page0(tb_page_addr_t); 53 54 #ifdef CONFIG_USER_ONLY 55 /* 56 * For user-only, page_protect sets the page read-only. 57 * Since most execution is already on read-only pages, and we'd need to 58 * account for other TBs on the same page, defer undoing any page protection 59 * until we receive the write fault. 60 */ 61 static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1) 62 { 63 tb_lock_page0(p1); 64 } 65 66 static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { } 67 static inline void tb_unlock_pages(TranslationBlock *tb) { } 68 #else 69 void tb_lock_page1(tb_page_addr_t, tb_page_addr_t); 70 void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t); 71 void tb_unlock_pages(TranslationBlock *); 72 #endif 73 74 #ifdef CONFIG_SOFTMMU 75 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, 76 unsigned size, 77 uintptr_t retaddr); 78 #endif /* CONFIG_SOFTMMU */ 79 80 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); 81 82 #endif 83