1 /* SPDX-License-Identifier: LGPL-2.1-or-later */ 2 /* 3 * Definition of TranslationBlock. 4 * Copyright (c) 2003 Fabrice Bellard 5 */ 6 7 #ifndef EXEC_TRANSLATION_BLOCK_H 8 #define EXEC_TRANSLATION_BLOCK_H 9 10 #include "qemu/thread.h" 11 #include "exec/cpu-common.h" 12 #include "exec/vaddr.h" 13 #ifdef CONFIG_USER_ONLY 14 #include "qemu/interval-tree.h" 15 #endif 16 17 /* 18 * Page tracking code uses ram addresses in system mode, and virtual 19 * addresses in userspace mode. Define tb_page_addr_t to be an 20 * appropriate type. 21 */ 22 #if defined(CONFIG_USER_ONLY) 23 typedef vaddr tb_page_addr_t; 24 #define TB_PAGE_ADDR_FMT "%" VADDR_PRIx 25 #else 26 typedef ram_addr_t tb_page_addr_t; 27 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT 28 #endif 29 30 /* 31 * Translation Cache-related fields of a TB. 32 * This struct exists just for convenience; we keep track of TB's in a binary 33 * search tree, and the only fields needed to compare TB's in the tree are 34 * @ptr and @size. 35 * Note: the address of search data can be obtained by adding @size to @ptr. 36 */ 37 struct tb_tc { 38 const void *ptr; /* pointer to the translated code */ 39 size_t size; 40 }; 41 42 struct TranslationBlock { 43 /* 44 * Guest PC corresponding to this block. This must be the true 45 * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and 46 * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or 47 * privilege, must store those bits elsewhere. 48 * 49 * If CF_PCREL, the opcodes for the TranslationBlock are written 50 * such that the TB is associated only with the physical page and 51 * may be run in any virtual address context. In this case, PC 52 * must always be taken from ENV in a target-specific manner. 53 * Unwind information is taken as offsets from the page, to be 54 * deposited into the "current" PC. 55 */ 56 vaddr pc; 57 58 /* 59 * Target-specific data associated with the TranslationBlock, e.g.: 60 * x86: the original user, the Code Segment virtual base, 61 * arm: an extension of tb->flags, 62 * s390x: instruction data for EXECUTE, 63 * sparc: the next pc of the instruction queue (for delay slots). 64 */ 65 uint64_t cs_base; 66 67 uint32_t flags; /* flags defining in which context the code was generated */ 68 uint32_t cflags; /* compile flags */ 69 70 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */ 71 #define CF_COUNT_MASK 0x000001ff 72 #define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */ 73 #define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */ 74 #define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */ 75 #define CF_MEMI_ONLY 0x00001000 /* Only instrument memory ops */ 76 #define CF_USE_ICOUNT 0x00002000 77 #define CF_INVALID 0x00004000 /* TB is stale. Set with @jmp_lock held */ 78 #define CF_PARALLEL 0x00008000 /* Generate code for a parallel context */ 79 #define CF_NOIRQ 0x00010000 /* Generate an uninterruptible TB */ 80 #define CF_PCREL 0x00020000 /* Opcodes in TB are PC-relative */ 81 #define CF_BP_PAGE 0x00040000 /* Breakpoint present in code page */ 82 #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ 83 #define CF_CLUSTER_SHIFT 24 84 85 /* 86 * Above fields used for comparing 87 */ 88 89 /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ 90 uint16_t size; 91 uint16_t icount; 92 93 struct tb_tc tc; 94 95 /* 96 * Track tb_page_addr_t intervals that intersect this TB. 97 * For user-only, the virtual addresses are always contiguous, 98 * and we use a unified interval tree. For system, we use a 99 * linked list headed in each PageDesc. Within the list, the lsb 100 * of the previous pointer tells the index of page_next[], and the 101 * list is protected by the PageDesc lock(s). 102 */ 103 #ifdef CONFIG_USER_ONLY 104 IntervalTreeNode itree; 105 #else 106 uintptr_t page_next[2]; 107 tb_page_addr_t page_addr[2]; 108 #endif 109 110 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */ 111 QemuSpin jmp_lock; 112 113 /* The following data are used to directly call another TB from 114 * the code of this one. This can be done either by emitting direct or 115 * indirect native jump instructions. These jumps are reset so that the TB 116 * just continues its execution. The TB can be linked to another one by 117 * setting one of the jump targets (or patching the jump instruction). Only 118 * two of such jumps are supported. 119 */ 120 #define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */ 121 uint16_t jmp_reset_offset[2]; /* offset of original jump target */ 122 uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */ 123 uintptr_t jmp_target_addr[2]; /* target address */ 124 125 /* 126 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. 127 * Each TB can have two outgoing jumps, and therefore can participate 128 * in two lists. The list entries are kept in jmp_list_next[2]. The least 129 * significant bit (LSB) of the pointers in these lists is used to encode 130 * which of the two list entries is to be used in the pointed TB. 131 * 132 * List traversals are protected by jmp_lock. The destination TB of each 133 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock 134 * can be acquired from any origin TB. 135 * 136 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is 137 * being invalidated, so that no further outgoing jumps from it can be set. 138 * 139 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained 140 * to a destination TB that has CF_INVALID set. 141 */ 142 uintptr_t jmp_list_head; 143 uintptr_t jmp_list_next[2]; 144 uintptr_t jmp_dest[2]; 145 }; 146 147 /* The alignment given to TranslationBlock during allocation. */ 148 #define CODE_GEN_ALIGN 16 149 150 /* Hide the qatomic_read to make code a little easier on the eyes */ 151 static inline uint32_t tb_cflags(const TranslationBlock *tb) 152 { 153 return qatomic_read(&tb->cflags); 154 } 155 156 #endif /* EXEC_TRANSLATION_BLOCK_H */ 157