xref: /qemu/include/accel/tcg/cpu-ldst.h (revision 0a29f11676d5b834f980a818d35b23d20d7ea226)
1 /*
2  *  Software MMU support (per-target)
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16  *
17  */
18 
19 /*
20  * Generate inline load/store functions for all MMU modes (typically
21  * at least _user and _kernel) as well as _data versions, for all data
22  * sizes.
23  *
24  * Used by target op helpers.
25  *
26  * The syntax for the accessors is:
27  *
28  * load:  cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
29  *        cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
30  *        cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
31  *        cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
32  *
33  * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
34  *        cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
35  *        cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
36  *        cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
37  *
38  * sign is:
39  * (empty): for 32 and 64 bit sizes
40  *   u    : unsigned
41  *   s    : signed
42  *
43  * size is:
44  *   b: 8 bits
45  *   w: 16 bits
46  *   l: 32 bits
47  *   q: 64 bits
48  *
49  * end is:
50  * (empty): for target native endian, or for 8 bit access
51  *     _be: for forced big endian
52  *     _le: for forced little endian
53  *
54  * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
55  * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
56  * the index to use; the "data" and "code" suffixes take the index from
57  * cpu_mmu_index().
58  *
59  * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
60  * MemOp including alignment requirements.  The alignment will be enforced.
61  */
62 #ifndef CPU_LDST_H
63 #define CPU_LDST_H
64 
65 #ifndef CONFIG_TCG
66 #error Can only include this header with TCG
67 #endif
68 
69 #include "exec/cpu-ldst-common.h"
70 #include "accel/tcg/cpu-mmu-index.h"
71 #include "exec/abi_ptr.h"
72 
73 #if defined(CONFIG_USER_ONLY)
74 #include "user/guest-host.h"
75 #endif /* CONFIG_USER_ONLY */
76 
77 static inline uint32_t
78 cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra)
79 {
80     MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
81     return cpu_ldb_mmu(env, addr, oi, ra);
82 }
83 
84 static inline int
85 cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra)
86 {
87     return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
88 }
89 
90 static inline uint32_t
91 cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
92                       int mmu_idx, uintptr_t ra)
93 {
94     MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
95     return cpu_ldw_mmu(env, addr, oi, ra);
96 }
97 
98 static inline int
99 cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
100                       int mmu_idx, uintptr_t ra)
101 {
102     return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
103 }
104 
105 static inline uint32_t
106 cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
107                      int mmu_idx, uintptr_t ra)
108 {
109     MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
110     return cpu_ldl_mmu(env, addr, oi, ra);
111 }
112 
113 static inline uint64_t
114 cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
115                      int mmu_idx, uintptr_t ra)
116 {
117     MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
118     return cpu_ldq_mmu(env, addr, oi, ra);
119 }
120 
121 static inline uint32_t
122 cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
123                       int mmu_idx, uintptr_t ra)
124 {
125     MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
126     return cpu_ldw_mmu(env, addr, oi, ra);
127 }
128 
129 static inline int
130 cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
131                       int mmu_idx, uintptr_t ra)
132 {
133     return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
134 }
135 
136 static inline uint32_t
137 cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
138                      int mmu_idx, uintptr_t ra)
139 {
140     MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
141     return cpu_ldl_mmu(env, addr, oi, ra);
142 }
143 
144 static inline uint64_t
145 cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
146                      int mmu_idx, uintptr_t ra)
147 {
148     MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
149     return cpu_ldq_mmu(env, addr, oi, ra);
150 }
151 
152 static inline void
153 cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
154                   int mmu_idx, uintptr_t ra)
155 {
156     MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
157     cpu_stb_mmu(env, addr, val, oi, ra);
158 }
159 
160 static inline void
161 cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
162                      int mmu_idx, uintptr_t ra)
163 {
164     MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
165     cpu_stw_mmu(env, addr, val, oi, ra);
166 }
167 
168 static inline void
169 cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
170                      int mmu_idx, uintptr_t ra)
171 {
172     MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
173     cpu_stl_mmu(env, addr, val, oi, ra);
174 }
175 
176 static inline void
177 cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
178                      int mmu_idx, uintptr_t ra)
179 {
180     MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
181     cpu_stq_mmu(env, addr, val, oi, ra);
182 }
183 
184 static inline void
185 cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
186                      int mmu_idx, uintptr_t ra)
187 {
188     MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
189     cpu_stw_mmu(env, addr, val, oi, ra);
190 }
191 
192 static inline void
193 cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
194                      int mmu_idx, uintptr_t ra)
195 {
196     MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
197     cpu_stl_mmu(env, addr, val, oi, ra);
198 }
199 
200 static inline void
201 cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
202                      int mmu_idx, uintptr_t ra)
203 {
204     MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
205     cpu_stq_mmu(env, addr, val, oi, ra);
206 }
207 
208 /*--------------------------*/
209 
210 static inline uint32_t
211 cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
212 {
213     int mmu_index = cpu_mmu_index(env_cpu(env), false);
214     return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
215 }
216 
217 static inline int
218 cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
219 {
220     return (int8_t)cpu_ldub_data_ra(env, addr, ra);
221 }
222 
223 static inline uint32_t
224 cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
225 {
226     int mmu_index = cpu_mmu_index(env_cpu(env), false);
227     return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
228 }
229 
230 static inline int
231 cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
232 {
233     return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
234 }
235 
236 static inline uint32_t
237 cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
238 {
239     int mmu_index = cpu_mmu_index(env_cpu(env), false);
240     return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
241 }
242 
243 static inline uint64_t
244 cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
245 {
246     int mmu_index = cpu_mmu_index(env_cpu(env), false);
247     return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
248 }
249 
250 static inline uint32_t
251 cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
252 {
253     int mmu_index = cpu_mmu_index(env_cpu(env), false);
254     return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
255 }
256 
257 static inline int
258 cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
259 {
260     return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
261 }
262 
263 static inline uint32_t
264 cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
265 {
266     int mmu_index = cpu_mmu_index(env_cpu(env), false);
267     return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
268 }
269 
270 static inline uint64_t
271 cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
272 {
273     int mmu_index = cpu_mmu_index(env_cpu(env), false);
274     return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
275 }
276 
277 static inline void
278 cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
279 {
280     int mmu_index = cpu_mmu_index(env_cpu(env), false);
281     cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
282 }
283 
284 static inline void
285 cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
286 {
287     int mmu_index = cpu_mmu_index(env_cpu(env), false);
288     cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
289 }
290 
291 static inline void
292 cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
293 {
294     int mmu_index = cpu_mmu_index(env_cpu(env), false);
295     cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
296 }
297 
298 static inline void
299 cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra)
300 {
301     int mmu_index = cpu_mmu_index(env_cpu(env), false);
302     cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
303 }
304 
305 static inline void
306 cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
307 {
308     int mmu_index = cpu_mmu_index(env_cpu(env), false);
309     cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
310 }
311 
312 static inline void
313 cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
314 {
315     int mmu_index = cpu_mmu_index(env_cpu(env), false);
316     cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
317 }
318 
319 static inline void
320 cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra)
321 {
322     int mmu_index = cpu_mmu_index(env_cpu(env), false);
323     cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
324 }
325 
326 /*--------------------------*/
327 
328 static inline uint32_t
329 cpu_ldub_data(CPUArchState *env, abi_ptr addr)
330 {
331     return cpu_ldub_data_ra(env, addr, 0);
332 }
333 
334 static inline int
335 cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
336 {
337     return (int8_t)cpu_ldub_data(env, addr);
338 }
339 
340 static inline uint32_t
341 cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
342 {
343     return cpu_lduw_be_data_ra(env, addr, 0);
344 }
345 
346 static inline int
347 cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
348 {
349     return (int16_t)cpu_lduw_be_data(env, addr);
350 }
351 
352 static inline uint32_t
353 cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
354 {
355     return cpu_ldl_be_data_ra(env, addr, 0);
356 }
357 
358 static inline uint64_t
359 cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
360 {
361     return cpu_ldq_be_data_ra(env, addr, 0);
362 }
363 
364 static inline uint32_t
365 cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
366 {
367     return cpu_lduw_le_data_ra(env, addr, 0);
368 }
369 
370 static inline int
371 cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
372 {
373     return (int16_t)cpu_lduw_le_data(env, addr);
374 }
375 
376 static inline uint32_t
377 cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
378 {
379     return cpu_ldl_le_data_ra(env, addr, 0);
380 }
381 
382 static inline uint64_t
383 cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
384 {
385     return cpu_ldq_le_data_ra(env, addr, 0);
386 }
387 
388 static inline void
389 cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
390 {
391     cpu_stb_data_ra(env, addr, val, 0);
392 }
393 
394 static inline void
395 cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
396 {
397     cpu_stw_be_data_ra(env, addr, val, 0);
398 }
399 
400 static inline void
401 cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
402 {
403     cpu_stl_be_data_ra(env, addr, val, 0);
404 }
405 
406 static inline void
407 cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
408 {
409     cpu_stq_be_data_ra(env, addr, val, 0);
410 }
411 
412 static inline void
413 cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
414 {
415     cpu_stw_le_data_ra(env, addr, val, 0);
416 }
417 
418 static inline void
419 cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
420 {
421     cpu_stl_le_data_ra(env, addr, val, 0);
422 }
423 
424 static inline void
425 cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
426 {
427     cpu_stq_le_data_ra(env, addr, val, 0);
428 }
429 
430 #if TARGET_BIG_ENDIAN
431 # define cpu_lduw_data        cpu_lduw_be_data
432 # define cpu_ldsw_data        cpu_ldsw_be_data
433 # define cpu_ldl_data         cpu_ldl_be_data
434 # define cpu_ldq_data         cpu_ldq_be_data
435 # define cpu_lduw_data_ra     cpu_lduw_be_data_ra
436 # define cpu_ldsw_data_ra     cpu_ldsw_be_data_ra
437 # define cpu_ldl_data_ra      cpu_ldl_be_data_ra
438 # define cpu_ldq_data_ra      cpu_ldq_be_data_ra
439 # define cpu_lduw_mmuidx_ra   cpu_lduw_be_mmuidx_ra
440 # define cpu_ldsw_mmuidx_ra   cpu_ldsw_be_mmuidx_ra
441 # define cpu_ldl_mmuidx_ra    cpu_ldl_be_mmuidx_ra
442 # define cpu_ldq_mmuidx_ra    cpu_ldq_be_mmuidx_ra
443 # define cpu_stw_data         cpu_stw_be_data
444 # define cpu_stl_data         cpu_stl_be_data
445 # define cpu_stq_data         cpu_stq_be_data
446 # define cpu_stw_data_ra      cpu_stw_be_data_ra
447 # define cpu_stl_data_ra      cpu_stl_be_data_ra
448 # define cpu_stq_data_ra      cpu_stq_be_data_ra
449 # define cpu_stw_mmuidx_ra    cpu_stw_be_mmuidx_ra
450 # define cpu_stl_mmuidx_ra    cpu_stl_be_mmuidx_ra
451 # define cpu_stq_mmuidx_ra    cpu_stq_be_mmuidx_ra
452 #else
453 # define cpu_lduw_data        cpu_lduw_le_data
454 # define cpu_ldsw_data        cpu_ldsw_le_data
455 # define cpu_ldl_data         cpu_ldl_le_data
456 # define cpu_ldq_data         cpu_ldq_le_data
457 # define cpu_lduw_data_ra     cpu_lduw_le_data_ra
458 # define cpu_ldsw_data_ra     cpu_ldsw_le_data_ra
459 # define cpu_ldl_data_ra      cpu_ldl_le_data_ra
460 # define cpu_ldq_data_ra      cpu_ldq_le_data_ra
461 # define cpu_lduw_mmuidx_ra   cpu_lduw_le_mmuidx_ra
462 # define cpu_ldsw_mmuidx_ra   cpu_ldsw_le_mmuidx_ra
463 # define cpu_ldl_mmuidx_ra    cpu_ldl_le_mmuidx_ra
464 # define cpu_ldq_mmuidx_ra    cpu_ldq_le_mmuidx_ra
465 # define cpu_stw_data         cpu_stw_le_data
466 # define cpu_stl_data         cpu_stl_le_data
467 # define cpu_stq_data         cpu_stq_le_data
468 # define cpu_stw_data_ra      cpu_stw_le_data_ra
469 # define cpu_stl_data_ra      cpu_stl_le_data_ra
470 # define cpu_stq_data_ra      cpu_stq_le_data_ra
471 # define cpu_stw_mmuidx_ra    cpu_stw_le_mmuidx_ra
472 # define cpu_stl_mmuidx_ra    cpu_stl_le_mmuidx_ra
473 # define cpu_stq_mmuidx_ra    cpu_stq_le_mmuidx_ra
474 #endif
475 
476 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
477 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
478 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
479 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
480 
481 /**
482  * tlb_vaddr_to_host:
483  * @env: CPUArchState
484  * @addr: guest virtual address to look up
485  * @access_type: 0 for read, 1 for write, 2 for execute
486  * @mmu_idx: MMU index to use for lookup
487  *
488  * Look up the specified guest virtual index in the TCG softmmu TLB.
489  * If we can translate a host virtual address suitable for direct RAM
490  * access, without causing a guest exception, then return it.
491  * Otherwise (TLB entry is for an I/O access, guest software
492  * TLB fill required, etc) return NULL.
493  */
494 #ifdef CONFIG_USER_ONLY
495 static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
496                                       MMUAccessType access_type, int mmu_idx)
497 {
498     return g2h(env_cpu(env), addr);
499 }
500 #else
501 void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
502                         MMUAccessType access_type, int mmu_idx);
503 #endif
504 
505 /*
506  * For user-only, helpers that use guest to host address translation
507  * must protect the actual host memory access by recording 'retaddr'
508  * for the signal handler.  This is required for a race condition in
509  * which another thread unmaps the page between a probe and the
510  * actual access.
511  */
512 #ifdef CONFIG_USER_ONLY
513 extern __thread uintptr_t helper_retaddr;
514 
515 static inline void set_helper_retaddr(uintptr_t ra)
516 {
517     helper_retaddr = ra;
518     /*
519      * Ensure that this write is visible to the SIGSEGV handler that
520      * may be invoked due to a subsequent invalid memory operation.
521      */
522     signal_barrier();
523 }
524 
525 static inline void clear_helper_retaddr(void)
526 {
527     /*
528      * Ensure that previous memory operations have succeeded before
529      * removing the data visible to the signal handler.
530      */
531     signal_barrier();
532     helper_retaddr = 0;
533 }
534 #else
535 #define set_helper_retaddr(ra)   do { } while (0)
536 #define clear_helper_retaddr()   do { } while (0)
537 #endif
538 
539 #endif /* CPU_LDST_H */
540