1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2 /*
3 * Definition of TranslationBlock.
4 * Copyright (c) 2003 Fabrice Bellard
5 */
6
7 #ifndef EXEC_TRANSLATION_BLOCK_H
8 #define EXEC_TRANSLATION_BLOCK_H
9
10 #include "qemu/atomic.h"
11 #include "qemu/thread.h"
12 #include "exec/cpu-common.h"
13 #include "exec/vaddr.h"
14 #ifdef CONFIG_USER_ONLY
15 #include "qemu/interval-tree.h"
16 #include "exec/target_page.h"
17 #endif
18
19 /*
20 * Page tracking code uses ram addresses in system mode, and virtual
21 * addresses in userspace mode. Define tb_page_addr_t to be an
22 * appropriate type.
23 */
24 #if defined(CONFIG_USER_ONLY)
25 typedef vaddr tb_page_addr_t;
26 #define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
27 #else
28 typedef ram_addr_t tb_page_addr_t;
29 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
30 #endif
31
32 /*
33 * Translation Cache-related fields of a TB.
34 * This struct exists just for convenience; we keep track of TB's in a binary
35 * search tree, and the only fields needed to compare TB's in the tree are
36 * @ptr and @size.
37 * Note: the address of search data can be obtained by adding @size to @ptr.
38 */
39 struct tb_tc {
40 const void *ptr; /* pointer to the translated code */
41 size_t size;
42 };
43
44 struct TranslationBlock {
45 /*
46 * Guest PC corresponding to this block. This must be the true
47 * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
48 * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
49 * privilege, must store those bits elsewhere.
50 *
51 * If CF_PCREL, the opcodes for the TranslationBlock are written
52 * such that the TB is associated only with the physical page and
53 * may be run in any virtual address context. In this case, PC
54 * must always be taken from ENV in a target-specific manner.
55 * Unwind information is taken as offsets from the page, to be
56 * deposited into the "current" PC.
57 */
58 vaddr pc;
59
60 /*
61 * Target-specific data associated with the TranslationBlock, e.g.:
62 * x86: the original user, the Code Segment virtual base,
63 * arm: an extension of tb->flags,
64 * s390x: instruction data for EXECUTE,
65 * sparc: the next pc of the instruction queue (for delay slots).
66 */
67 uint64_t cs_base;
68
69 uint32_t flags; /* flags defining in which context the code was generated */
70 uint32_t cflags; /* compile flags */
71
72 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
73 #define CF_COUNT_MASK 0x000001ff
74 #define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
75 #define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
76 #define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
77 #define CF_MEMI_ONLY 0x00001000 /* Only instrument memory ops */
78 #define CF_USE_ICOUNT 0x00002000
79 #define CF_INVALID 0x00004000 /* TB is stale. Set with @jmp_lock held */
80 #define CF_PARALLEL 0x00008000 /* Generate code for a parallel context */
81 #define CF_NOIRQ 0x00010000 /* Generate an uninterruptible TB */
82 #define CF_PCREL 0x00020000 /* Opcodes in TB are PC-relative */
83 #define CF_BP_PAGE 0x00040000 /* Breakpoint present in code page */
84 #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
85 #define CF_CLUSTER_SHIFT 24
86
87 /*
88 * Above fields used for comparing
89 */
90
91 /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
92 uint16_t size;
93 uint16_t icount;
94
95 struct tb_tc tc;
96
97 /*
98 * Track tb_page_addr_t intervals that intersect this TB.
99 * For user-only, the virtual addresses are always contiguous,
100 * and we use a unified interval tree. For system, we use a
101 * linked list headed in each PageDesc. Within the list, the lsb
102 * of the previous pointer tells the index of page_next[], and the
103 * list is protected by the PageDesc lock(s).
104 */
105 #ifdef CONFIG_USER_ONLY
106 IntervalTreeNode itree;
107 #else
108 uintptr_t page_next[2];
109 tb_page_addr_t page_addr[2];
110 #endif
111
112 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
113 QemuSpin jmp_lock;
114
115 /* The following data are used to directly call another TB from
116 * the code of this one. This can be done either by emitting direct or
117 * indirect native jump instructions. These jumps are reset so that the TB
118 * just continues its execution. The TB can be linked to another one by
119 * setting one of the jump targets (or patching the jump instruction). Only
120 * two of such jumps are supported.
121 */
122 #define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
123 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
124 uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
125 uintptr_t jmp_target_addr[2]; /* target address */
126
127 /*
128 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
129 * Each TB can have two outgoing jumps, and therefore can participate
130 * in two lists. The list entries are kept in jmp_list_next[2]. The least
131 * significant bit (LSB) of the pointers in these lists is used to encode
132 * which of the two list entries is to be used in the pointed TB.
133 *
134 * List traversals are protected by jmp_lock. The destination TB of each
135 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
136 * can be acquired from any origin TB.
137 *
138 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
139 * being invalidated, so that no further outgoing jumps from it can be set.
140 *
141 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
142 * to a destination TB that has CF_INVALID set.
143 */
144 uintptr_t jmp_list_head;
145 uintptr_t jmp_list_next[2];
146 uintptr_t jmp_dest[2];
147 };
148
149 /* The alignment given to TranslationBlock during allocation. */
150 #define CODE_GEN_ALIGN 16
151
152 /* Hide the qatomic_read to make code a little easier on the eyes */
tb_cflags(const TranslationBlock * tb)153 static inline uint32_t tb_cflags(const TranslationBlock *tb)
154 {
155 return qatomic_read(&tb->cflags);
156 }
157
158 bool tcg_cflags_has(CPUState *cpu, uint32_t flags);
159 void tcg_cflags_set(CPUState *cpu, uint32_t flags);
160
tb_page_addr0(const TranslationBlock * tb)161 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
162 {
163 #ifdef CONFIG_USER_ONLY
164 return tb->itree.start;
165 #else
166 return tb->page_addr[0];
167 #endif
168 }
169
tb_page_addr1(const TranslationBlock * tb)170 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
171 {
172 #ifdef CONFIG_USER_ONLY
173 tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
174 return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
175 #else
176 return tb->page_addr[1];
177 #endif
178 }
179
tb_set_page_addr0(TranslationBlock * tb,tb_page_addr_t addr)180 static inline void tb_set_page_addr0(TranslationBlock *tb,
181 tb_page_addr_t addr)
182 {
183 #ifdef CONFIG_USER_ONLY
184 tb->itree.start = addr;
185 /*
186 * To begin, we record an interval of one byte. When the translation
187 * loop encounters a second page, the interval will be extended to
188 * include the first byte of the second page, which is sufficient to
189 * allow tb_page_addr1() above to work properly. The final corrected
190 * interval will be set by tb_page_add() from tb->size before the
191 * node is added to the interval tree.
192 */
193 tb->itree.last = addr;
194 #else
195 tb->page_addr[0] = addr;
196 #endif
197 }
198
tb_set_page_addr1(TranslationBlock * tb,tb_page_addr_t addr)199 static inline void tb_set_page_addr1(TranslationBlock *tb,
200 tb_page_addr_t addr)
201 {
202 #ifdef CONFIG_USER_ONLY
203 /* Extend the interval to the first byte of the second page. See above. */
204 tb->itree.last = addr;
205 #else
206 tb->page_addr[1] = addr;
207 #endif
208 }
209
210 /* TranslationBlock invalidate API */
211 void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
212 tb_page_addr_t last);
213
214 #endif /* EXEC_TRANSLATION_BLOCK_H */
215