xref: /qemu/include/accel/tcg/cpu-ldst.h (revision 0b6426ba6c218fa807fe97258d75cb4bc84c860d)
1 /*
2  *  Software MMU support (per-target)
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16  *
17  */
18 
19 /*
20  * Generate inline load/store functions for all MMU modes (typically
21  * at least _user and _kernel) as well as _data versions, for all data
22  * sizes.
23  *
24  * Used by target op helpers.
25  *
26  * The syntax for the accessors is:
27  *
28  * load:  cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
29  *        cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
30  *        cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
31  *        cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
32  *
33  * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
34  *        cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
35  *        cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
36  *        cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
37  *
38  * sign is:
39  * (empty): for 32 and 64 bit sizes
40  *   u    : unsigned
41  *   s    : signed
42  *
43  * size is:
44  *   b: 8 bits
45  *   w: 16 bits
46  *   l: 32 bits
47  *   q: 64 bits
48  *
49  * end is:
50  * (empty): for target native endian, or for 8 bit access
51  *     _be: for forced big endian
52  *     _le: for forced little endian
53  *
54  * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
55  * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
56  * the index to use; the "data" and "code" suffixes take the index from
57  * cpu_mmu_index().
58  *
59  * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
60  * MemOp including alignment requirements.  The alignment will be enforced.
61  */
62 #ifndef CPU_LDST_H
63 #define CPU_LDST_H
64 
65 #ifndef CONFIG_TCG
66 #error Can only include this header with TCG
67 #endif
68 
69 #include "exec/cpu-ldst-common.h"
70 #include "exec/abi_ptr.h"
71 
72 #if defined(CONFIG_USER_ONLY)
73 #include "user/guest-host.h"
74 #endif /* CONFIG_USER_ONLY */
75 
76 uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
77 int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
78 uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
79 int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
80 uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
81 uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
82 uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
83 int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
84 uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
85 uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
86 
87 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
88 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
89 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
90 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
91 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
92 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
93 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
94 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
95 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
96 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
97 
98 void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
99 void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
100 void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
101 void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
102 void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
103 void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
104 void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
105 
106 void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
107                      uint32_t val, uintptr_t ra);
108 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
109                         uint32_t val, uintptr_t ra);
110 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
111                         uint32_t val, uintptr_t ra);
112 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
113                         uint64_t val, uintptr_t ra);
114 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
115                         uint32_t val, uintptr_t ra);
116 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
117                         uint32_t val, uintptr_t ra);
118 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
119                         uint64_t val, uintptr_t ra);
120 
121 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
122                             int mmu_idx, uintptr_t ra);
123 int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
124                        int mmu_idx, uintptr_t ra);
125 uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
126                                int mmu_idx, uintptr_t ra);
127 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
128                           int mmu_idx, uintptr_t ra);
129 uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
130                               int mmu_idx, uintptr_t ra);
131 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
132                               int mmu_idx, uintptr_t ra);
133 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
134                                int mmu_idx, uintptr_t ra);
135 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
136                           int mmu_idx, uintptr_t ra);
137 uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
138                               int mmu_idx, uintptr_t ra);
139 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
140                               int mmu_idx, uintptr_t ra);
141 
142 void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
143                        int mmu_idx, uintptr_t ra);
144 void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
145                           int mmu_idx, uintptr_t ra);
146 void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
147                           int mmu_idx, uintptr_t ra);
148 void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
149                           int mmu_idx, uintptr_t ra);
150 void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
151                           int mmu_idx, uintptr_t ra);
152 void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
153                           int mmu_idx, uintptr_t ra);
154 void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
155                           int mmu_idx, uintptr_t ra);
156 
157 #if TARGET_BIG_ENDIAN
158 # define cpu_lduw_data        cpu_lduw_be_data
159 # define cpu_ldsw_data        cpu_ldsw_be_data
160 # define cpu_ldl_data         cpu_ldl_be_data
161 # define cpu_ldq_data         cpu_ldq_be_data
162 # define cpu_lduw_data_ra     cpu_lduw_be_data_ra
163 # define cpu_ldsw_data_ra     cpu_ldsw_be_data_ra
164 # define cpu_ldl_data_ra      cpu_ldl_be_data_ra
165 # define cpu_ldq_data_ra      cpu_ldq_be_data_ra
166 # define cpu_lduw_mmuidx_ra   cpu_lduw_be_mmuidx_ra
167 # define cpu_ldsw_mmuidx_ra   cpu_ldsw_be_mmuidx_ra
168 # define cpu_ldl_mmuidx_ra    cpu_ldl_be_mmuidx_ra
169 # define cpu_ldq_mmuidx_ra    cpu_ldq_be_mmuidx_ra
170 # define cpu_stw_data         cpu_stw_be_data
171 # define cpu_stl_data         cpu_stl_be_data
172 # define cpu_stq_data         cpu_stq_be_data
173 # define cpu_stw_data_ra      cpu_stw_be_data_ra
174 # define cpu_stl_data_ra      cpu_stl_be_data_ra
175 # define cpu_stq_data_ra      cpu_stq_be_data_ra
176 # define cpu_stw_mmuidx_ra    cpu_stw_be_mmuidx_ra
177 # define cpu_stl_mmuidx_ra    cpu_stl_be_mmuidx_ra
178 # define cpu_stq_mmuidx_ra    cpu_stq_be_mmuidx_ra
179 #else
180 # define cpu_lduw_data        cpu_lduw_le_data
181 # define cpu_ldsw_data        cpu_ldsw_le_data
182 # define cpu_ldl_data         cpu_ldl_le_data
183 # define cpu_ldq_data         cpu_ldq_le_data
184 # define cpu_lduw_data_ra     cpu_lduw_le_data_ra
185 # define cpu_ldsw_data_ra     cpu_ldsw_le_data_ra
186 # define cpu_ldl_data_ra      cpu_ldl_le_data_ra
187 # define cpu_ldq_data_ra      cpu_ldq_le_data_ra
188 # define cpu_lduw_mmuidx_ra   cpu_lduw_le_mmuidx_ra
189 # define cpu_ldsw_mmuidx_ra   cpu_ldsw_le_mmuidx_ra
190 # define cpu_ldl_mmuidx_ra    cpu_ldl_le_mmuidx_ra
191 # define cpu_ldq_mmuidx_ra    cpu_ldq_le_mmuidx_ra
192 # define cpu_stw_data         cpu_stw_le_data
193 # define cpu_stl_data         cpu_stl_le_data
194 # define cpu_stq_data         cpu_stq_le_data
195 # define cpu_stw_data_ra      cpu_stw_le_data_ra
196 # define cpu_stl_data_ra      cpu_stl_le_data_ra
197 # define cpu_stq_data_ra      cpu_stq_le_data_ra
198 # define cpu_stw_mmuidx_ra    cpu_stw_le_mmuidx_ra
199 # define cpu_stl_mmuidx_ra    cpu_stl_le_mmuidx_ra
200 # define cpu_stq_mmuidx_ra    cpu_stq_le_mmuidx_ra
201 #endif
202 
203 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
204 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
205 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
206 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
207 
208 /**
209  * tlb_vaddr_to_host:
210  * @env: CPUArchState
211  * @addr: guest virtual address to look up
212  * @access_type: 0 for read, 1 for write, 2 for execute
213  * @mmu_idx: MMU index to use for lookup
214  *
215  * Look up the specified guest virtual index in the TCG softmmu TLB.
216  * If we can translate a host virtual address suitable for direct RAM
217  * access, without causing a guest exception, then return it.
218  * Otherwise (TLB entry is for an I/O access, guest software
219  * TLB fill required, etc) return NULL.
220  */
221 #ifdef CONFIG_USER_ONLY
222 static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
223                                       MMUAccessType access_type, int mmu_idx)
224 {
225     return g2h(env_cpu(env), addr);
226 }
227 #else
228 void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
229                         MMUAccessType access_type, int mmu_idx);
230 #endif
231 
232 /*
233  * For user-only, helpers that use guest to host address translation
234  * must protect the actual host memory access by recording 'retaddr'
235  * for the signal handler.  This is required for a race condition in
236  * which another thread unmaps the page between a probe and the
237  * actual access.
238  */
239 #ifdef CONFIG_USER_ONLY
240 extern __thread uintptr_t helper_retaddr;
241 
242 static inline void set_helper_retaddr(uintptr_t ra)
243 {
244     helper_retaddr = ra;
245     /*
246      * Ensure that this write is visible to the SIGSEGV handler that
247      * may be invoked due to a subsequent invalid memory operation.
248      */
249     signal_barrier();
250 }
251 
252 static inline void clear_helper_retaddr(void)
253 {
254     /*
255      * Ensure that previous memory operations have succeeded before
256      * removing the data visible to the signal handler.
257      */
258     signal_barrier();
259     helper_retaddr = 0;
260 }
261 #else
262 #define set_helper_retaddr(ra)   do { } while (0)
263 #define clear_helper_retaddr()   do { } while (0)
264 #endif
265 
266 #endif /* CPU_LDST_H */
267