xref: /qemu/tcg/tcg-op-ldst.c (revision 0700ceb3939f08619d7f323209597ef62b489514)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg.h"
28 #include "tcg/tcg-temp-internal.h"
29 #include "tcg/tcg-op.h"
30 #include "tcg/tcg-mo.h"
31 #include "exec/plugin-gen.h"
32 #include "tcg-internal.h"
33 
34 
35 static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
36 {
37     /* Trigger the asserts within as early as possible.  */
38     unsigned a_bits = get_alignment_bits(op);
39 
40     /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
41     if (a_bits == (op & MO_SIZE)) {
42         op = (op & ~MO_AMASK) | MO_ALIGN;
43     }
44 
45     switch (op & MO_SIZE) {
46     case MO_8:
47         op &= ~MO_BSWAP;
48         break;
49     case MO_16:
50         break;
51     case MO_32:
52         if (!is64) {
53             op &= ~MO_SIGN;
54         }
55         break;
56     case MO_64:
57         if (is64) {
58             op &= ~MO_SIGN;
59             break;
60         }
61         /* fall through */
62     default:
63         g_assert_not_reached();
64     }
65     if (st) {
66         op &= ~MO_SIGN;
67     }
68     return op;
69 }
70 
71 static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh,
72                      TCGTemp *addr, MemOpIdx oi)
73 {
74     if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
75         if (vh) {
76             tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
77         } else {
78             tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi);
79         }
80     } else {
81         /* See TCGV_LOW/HIGH. */
82         TCGTemp *al = addr + HOST_BIG_ENDIAN;
83         TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
84 
85         if (vh) {
86             tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh),
87                         temp_arg(al), temp_arg(ah), oi);
88         } else {
89             tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi);
90         }
91     }
92 }
93 
94 static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
95 {
96     if (TCG_TARGET_REG_BITS == 32) {
97         TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
98         TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
99         gen_ldst(opc, vl, vh, addr, oi);
100     } else {
101         gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi);
102     }
103 }
104 
105 static void tcg_gen_req_mo(TCGBar type)
106 {
107 #ifdef TCG_GUEST_DEFAULT_MO
108     type &= TCG_GUEST_DEFAULT_MO;
109 #endif
110     type &= ~TCG_TARGET_DEFAULT_MO;
111     if (type) {
112         tcg_gen_mb(type | TCG_BAR_SC);
113     }
114 }
115 
116 /* Only required for loads, where value might overlap addr. */
117 static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
118 {
119 #ifdef CONFIG_PLUGIN
120     if (tcg_ctx->plugin_insn != NULL) {
121         /* Save a copy of the vaddr for use after a load.  */
122         TCGv_i64 temp = tcg_temp_ebb_new_i64();
123         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
124             tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
125         } else {
126             tcg_gen_mov_i64(temp, temp_tcgv_i64(addr));
127         }
128         return temp;
129     }
130 #endif
131     return NULL;
132 }
133 
134 static void
135 plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
136                          enum qemu_plugin_mem_rw rw)
137 {
138 #ifdef CONFIG_PLUGIN
139     if (tcg_ctx->plugin_insn != NULL) {
140         qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
141 
142         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
143             if (!copy_addr) {
144                 copy_addr = tcg_temp_ebb_new_i64();
145                 tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr));
146             }
147             plugin_gen_empty_mem_callback(copy_addr, info);
148             tcg_temp_free_i64(copy_addr);
149         } else {
150             if (copy_addr) {
151                 plugin_gen_empty_mem_callback(copy_addr, info);
152                 tcg_temp_free_i64(copy_addr);
153             } else {
154                 plugin_gen_empty_mem_callback(temp_tcgv_i64(orig_addr), info);
155             }
156         }
157     }
158 #endif
159 }
160 
161 static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
162                                     TCGArg idx, MemOp memop)
163 {
164     MemOp orig_memop;
165     MemOpIdx orig_oi, oi;
166     TCGv_i64 copy_addr;
167 
168     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
169     orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
170     orig_oi = oi = make_memop_idx(memop, idx);
171 
172     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
173         memop &= ~MO_BSWAP;
174         /* The bswap primitive benefits from zero-extended input.  */
175         if ((memop & MO_SSIZE) == MO_SW) {
176             memop &= ~MO_SIGN;
177         }
178         oi = make_memop_idx(memop, idx);
179     }
180 
181     copy_addr = plugin_maybe_preserve_addr(addr);
182     gen_ldst(INDEX_op_qemu_ld_i32, tcgv_i32_temp(val), NULL, addr, oi);
183     plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
184 
185     if ((orig_memop ^ memop) & MO_BSWAP) {
186         switch (orig_memop & MO_SIZE) {
187         case MO_16:
188             tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN
189                                            ? TCG_BSWAP_IZ | TCG_BSWAP_OS
190                                            : TCG_BSWAP_IZ | TCG_BSWAP_OZ));
191             break;
192         case MO_32:
193             tcg_gen_bswap32_i32(val, val);
194             break;
195         default:
196             g_assert_not_reached();
197         }
198     }
199 }
200 
201 void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
202                              MemOp memop, TCGType addr_type)
203 {
204     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
205     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
206     tcg_gen_qemu_ld_i32_int(val, addr, idx, memop);
207 }
208 
209 static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
210                                     TCGArg idx, MemOp memop)
211 {
212     TCGv_i32 swap = NULL;
213     MemOpIdx orig_oi, oi;
214     TCGOpcode opc;
215 
216     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
217     memop = tcg_canonicalize_memop(memop, 0, 1);
218     orig_oi = oi = make_memop_idx(memop, idx);
219 
220     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
221         swap = tcg_temp_ebb_new_i32();
222         switch (memop & MO_SIZE) {
223         case MO_16:
224             tcg_gen_bswap16_i32(swap, val, 0);
225             break;
226         case MO_32:
227             tcg_gen_bswap32_i32(swap, val);
228             break;
229         default:
230             g_assert_not_reached();
231         }
232         val = swap;
233         memop &= ~MO_BSWAP;
234         oi = make_memop_idx(memop, idx);
235     }
236 
237     if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
238         opc = INDEX_op_qemu_st8_i32;
239     } else {
240         opc = INDEX_op_qemu_st_i32;
241     }
242     gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
243     plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
244 
245     if (swap) {
246         tcg_temp_free_i32(swap);
247     }
248 }
249 
250 void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
251                              MemOp memop, TCGType addr_type)
252 {
253     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
254     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
255     tcg_gen_qemu_st_i32_int(val, addr, idx, memop);
256 }
257 
258 static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
259                                     TCGArg idx, MemOp memop)
260 {
261     MemOp orig_memop;
262     MemOpIdx orig_oi, oi;
263     TCGv_i64 copy_addr;
264 
265     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
266         tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
267         if (memop & MO_SIGN) {
268             tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
269         } else {
270             tcg_gen_movi_i32(TCGV_HIGH(val), 0);
271         }
272         return;
273     }
274 
275     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
276     orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0);
277     orig_oi = oi = make_memop_idx(memop, idx);
278 
279     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
280         memop &= ~MO_BSWAP;
281         /* The bswap primitive benefits from zero-extended input.  */
282         if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
283             memop &= ~MO_SIGN;
284         }
285         oi = make_memop_idx(memop, idx);
286     }
287 
288     copy_addr = plugin_maybe_preserve_addr(addr);
289     gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi);
290     plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
291 
292     if ((orig_memop ^ memop) & MO_BSWAP) {
293         int flags = (orig_memop & MO_SIGN
294                      ? TCG_BSWAP_IZ | TCG_BSWAP_OS
295                      : TCG_BSWAP_IZ | TCG_BSWAP_OZ);
296         switch (orig_memop & MO_SIZE) {
297         case MO_16:
298             tcg_gen_bswap16_i64(val, val, flags);
299             break;
300         case MO_32:
301             tcg_gen_bswap32_i64(val, val, flags);
302             break;
303         case MO_64:
304             tcg_gen_bswap64_i64(val, val);
305             break;
306         default:
307             g_assert_not_reached();
308         }
309     }
310 }
311 
312 void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
313                              MemOp memop, TCGType addr_type)
314 {
315     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
316     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
317     tcg_gen_qemu_ld_i64_int(val, addr, idx, memop);
318 }
319 
320 static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
321                                     TCGArg idx, MemOp memop)
322 {
323     TCGv_i64 swap = NULL;
324     MemOpIdx orig_oi, oi;
325 
326     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
327         tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
328         return;
329     }
330 
331     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
332     memop = tcg_canonicalize_memop(memop, 1, 1);
333     orig_oi = oi = make_memop_idx(memop, idx);
334 
335     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
336         swap = tcg_temp_ebb_new_i64();
337         switch (memop & MO_SIZE) {
338         case MO_16:
339             tcg_gen_bswap16_i64(swap, val, 0);
340             break;
341         case MO_32:
342             tcg_gen_bswap32_i64(swap, val, 0);
343             break;
344         case MO_64:
345             tcg_gen_bswap64_i64(swap, val);
346             break;
347         default:
348             g_assert_not_reached();
349         }
350         val = swap;
351         memop &= ~MO_BSWAP;
352         oi = make_memop_idx(memop, idx);
353     }
354 
355     gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi);
356     plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
357 
358     if (swap) {
359         tcg_temp_free_i64(swap);
360     }
361 }
362 
363 void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
364                              MemOp memop, TCGType addr_type)
365 {
366     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
367     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
368     tcg_gen_qemu_st_i64_int(val, addr, idx, memop);
369 }
370 
371 /*
372  * Return true if @mop, without knowledge of the pointer alignment,
373  * does not require 16-byte atomicity, and it would be adventagous
374  * to avoid a call to a helper function.
375  */
376 static bool use_two_i64_for_i128(MemOp mop)
377 {
378 #ifdef CONFIG_SOFTMMU
379     /* Two softmmu tlb lookups is larger than one function call. */
380     return false;
381 #else
382     /*
383      * For user-only, two 64-bit operations may well be smaller than a call.
384      * Determine if that would be legal for the requested atomicity.
385      */
386     switch (mop & MO_ATOM_MASK) {
387     case MO_ATOM_NONE:
388     case MO_ATOM_IFALIGN_PAIR:
389         return true;
390     case MO_ATOM_IFALIGN:
391     case MO_ATOM_SUBALIGN:
392     case MO_ATOM_WITHIN16:
393     case MO_ATOM_WITHIN16_PAIR:
394         /* In a serialized context, no atomicity is required. */
395         return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL);
396     default:
397         g_assert_not_reached();
398     }
399 #endif
400 }
401 
402 static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
403 {
404     MemOp mop_1 = orig, mop_2;
405 
406     /* Reduce the size to 64-bit. */
407     mop_1 = (mop_1 & ~MO_SIZE) | MO_64;
408 
409     /* Retain the alignment constraints of the original. */
410     switch (orig & MO_AMASK) {
411     case MO_UNALN:
412     case MO_ALIGN_2:
413     case MO_ALIGN_4:
414         mop_2 = mop_1;
415         break;
416     case MO_ALIGN_8:
417         /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */
418         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
419         mop_2 = mop_1;
420         break;
421     case MO_ALIGN:
422         /* Second has 8-byte alignment; first has 16-byte alignment. */
423         mop_2 = mop_1;
424         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16;
425         break;
426     case MO_ALIGN_16:
427     case MO_ALIGN_32:
428     case MO_ALIGN_64:
429         /* Second has 8-byte alignment; first retains original. */
430         mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
431         break;
432     default:
433         g_assert_not_reached();
434     }
435 
436     /* Use a memory ordering implemented by the host. */
437     if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) {
438         mop_1 &= ~MO_BSWAP;
439         mop_2 &= ~MO_BSWAP;
440     }
441 
442     ret[0] = mop_1;
443     ret[1] = mop_2;
444 }
445 
446 static TCGv_i64 maybe_extend_addr64(TCGTemp *addr)
447 {
448     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
449         TCGv_i64 a64 = tcg_temp_ebb_new_i64();
450         tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr));
451         return a64;
452     }
453     return temp_tcgv_i64(addr);
454 }
455 
456 static void maybe_free_addr64(TCGv_i64 a64)
457 {
458     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
459         tcg_temp_free_i64(a64);
460     }
461 }
462 
463 static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
464                                      TCGArg idx, MemOp memop)
465 {
466     const MemOpIdx orig_oi = make_memop_idx(memop, idx);
467     TCGv_i64 ext_addr = NULL;
468 
469     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
470 
471     /* TODO: For now, force 32-bit hosts to use the helper. */
472     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
473         TCGv_i64 lo, hi;
474         bool need_bswap = false;
475         MemOpIdx oi = orig_oi;
476 
477         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
478             lo = TCGV128_HIGH(val);
479             hi = TCGV128_LOW(val);
480             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
481             need_bswap = true;
482         } else {
483             lo = TCGV128_LOW(val);
484             hi = TCGV128_HIGH(val);
485         }
486 
487         gen_ldst(INDEX_op_qemu_ld_i128, tcgv_i64_temp(lo),
488                  tcgv_i64_temp(hi), addr, oi);
489 
490         if (need_bswap) {
491             tcg_gen_bswap64_i64(lo, lo);
492             tcg_gen_bswap64_i64(hi, hi);
493         }
494     } else if (use_two_i64_for_i128(memop)) {
495         MemOp mop[2];
496         TCGTemp *addr_p8;
497         TCGv_i64 x, y;
498         bool need_bswap;
499 
500         canonicalize_memop_i128_as_i64(mop, memop);
501         need_bswap = (mop[0] ^ memop) & MO_BSWAP;
502 
503         /*
504          * Since there are no global TCGv_i128, there is no visible state
505          * changed if the second load faults.  Load directly into the two
506          * subwords.
507          */
508         if ((memop & MO_BSWAP) == MO_LE) {
509             x = TCGV128_LOW(val);
510             y = TCGV128_HIGH(val);
511         } else {
512             x = TCGV128_HIGH(val);
513             y = TCGV128_LOW(val);
514         }
515 
516         gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr,
517                      make_memop_idx(mop[0], idx));
518 
519         if (need_bswap) {
520             tcg_gen_bswap64_i64(x, x);
521         }
522 
523         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
524             TCGv_i32 t = tcg_temp_ebb_new_i32();
525             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
526             addr_p8 = tcgv_i32_temp(t);
527         } else {
528             TCGv_i64 t = tcg_temp_ebb_new_i64();
529             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
530             addr_p8 = tcgv_i64_temp(t);
531         }
532 
533         gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8,
534                      make_memop_idx(mop[1], idx));
535         tcg_temp_free_internal(addr_p8);
536 
537         if (need_bswap) {
538             tcg_gen_bswap64_i64(y, y);
539         }
540     } else {
541         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
542             ext_addr = tcg_temp_ebb_new_i64();
543             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
544             addr = tcgv_i64_temp(ext_addr);
545         }
546         gen_helper_ld_i128(val, cpu_env, temp_tcgv_i64(addr),
547                            tcg_constant_i32(orig_oi));
548     }
549 
550     plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
551 }
552 
553 void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
554                               MemOp memop, TCGType addr_type)
555 {
556     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
557     tcg_debug_assert((memop & MO_SIZE) == MO_128);
558     tcg_debug_assert((memop & MO_SIGN) == 0);
559     tcg_gen_qemu_ld_i128_int(val, addr, idx, memop);
560 }
561 
562 static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
563                                      TCGArg idx, MemOp memop)
564 {
565     const MemOpIdx orig_oi = make_memop_idx(memop, idx);
566     TCGv_i64 ext_addr = NULL;
567 
568     tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
569 
570     /* TODO: For now, force 32-bit hosts to use the helper. */
571 
572     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
573         TCGv_i64 lo, hi;
574         MemOpIdx oi = orig_oi;
575         bool need_bswap = false;
576 
577         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
578             lo = tcg_temp_ebb_new_i64();
579             hi = tcg_temp_ebb_new_i64();
580             tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
581             tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
582             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
583             need_bswap = true;
584         } else {
585             lo = TCGV128_LOW(val);
586             hi = TCGV128_HIGH(val);
587         }
588 
589         gen_ldst(INDEX_op_qemu_st_i128, tcgv_i64_temp(lo),
590                  tcgv_i64_temp(hi), addr, oi);
591 
592         if (need_bswap) {
593             tcg_temp_free_i64(lo);
594             tcg_temp_free_i64(hi);
595         }
596     } else if (use_two_i64_for_i128(memop)) {
597         MemOp mop[2];
598         TCGTemp *addr_p8;
599         TCGv_i64 x, y, b = NULL;
600 
601         canonicalize_memop_i128_as_i64(mop, memop);
602 
603         if ((memop & MO_BSWAP) == MO_LE) {
604             x = TCGV128_LOW(val);
605             y = TCGV128_HIGH(val);
606         } else {
607             x = TCGV128_HIGH(val);
608             y = TCGV128_LOW(val);
609         }
610 
611         if ((mop[0] ^ memop) & MO_BSWAP) {
612             b = tcg_temp_ebb_new_i64();
613             tcg_gen_bswap64_i64(b, x);
614             x = b;
615         }
616         gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr,
617                      make_memop_idx(mop[0], idx));
618 
619         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
620             TCGv_i32 t = tcg_temp_ebb_new_i32();
621             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
622             addr_p8 = tcgv_i32_temp(t);
623         } else {
624             TCGv_i64 t = tcg_temp_ebb_new_i64();
625             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
626             addr_p8 = tcgv_i64_temp(t);
627         }
628 
629         if (b) {
630             tcg_gen_bswap64_i64(b, y);
631             y = b;
632         }
633         gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8,
634                      make_memop_idx(mop[1], idx));
635 
636         if (b) {
637             tcg_temp_free_i64(b);
638         }
639         tcg_temp_free_internal(addr_p8);
640     } else {
641         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
642             ext_addr = tcg_temp_ebb_new_i64();
643             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
644             addr = tcgv_i64_temp(ext_addr);
645         }
646         gen_helper_st_i128(cpu_env, temp_tcgv_i64(addr), val,
647                            tcg_constant_i32(orig_oi));
648     }
649 
650     plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_W);
651 }
652 
653 void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
654                               MemOp memop, TCGType addr_type)
655 {
656     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
657     tcg_debug_assert((memop & MO_SIZE) == MO_128);
658     tcg_debug_assert((memop & MO_SIGN) == 0);
659     tcg_gen_qemu_st_i128_int(val, addr, idx, memop);
660 }
661 
662 static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
663 {
664     switch (opc & MO_SSIZE) {
665     case MO_SB:
666         tcg_gen_ext8s_i32(ret, val);
667         break;
668     case MO_UB:
669         tcg_gen_ext8u_i32(ret, val);
670         break;
671     case MO_SW:
672         tcg_gen_ext16s_i32(ret, val);
673         break;
674     case MO_UW:
675         tcg_gen_ext16u_i32(ret, val);
676         break;
677     default:
678         tcg_gen_mov_i32(ret, val);
679         break;
680     }
681 }
682 
683 static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
684 {
685     switch (opc & MO_SSIZE) {
686     case MO_SB:
687         tcg_gen_ext8s_i64(ret, val);
688         break;
689     case MO_UB:
690         tcg_gen_ext8u_i64(ret, val);
691         break;
692     case MO_SW:
693         tcg_gen_ext16s_i64(ret, val);
694         break;
695     case MO_UW:
696         tcg_gen_ext16u_i64(ret, val);
697         break;
698     case MO_SL:
699         tcg_gen_ext32s_i64(ret, val);
700         break;
701     case MO_UL:
702         tcg_gen_ext32u_i64(ret, val);
703         break;
704     default:
705         tcg_gen_mov_i64(ret, val);
706         break;
707     }
708 }
709 
710 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64,
711                                   TCGv_i32, TCGv_i32, TCGv_i32);
712 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64,
713                                   TCGv_i64, TCGv_i64, TCGv_i32);
714 typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64,
715                                    TCGv_i128, TCGv_i128, TCGv_i32);
716 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64,
717                                   TCGv_i32, TCGv_i32);
718 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
719                                   TCGv_i64, TCGv_i32);
720 
721 #ifdef CONFIG_ATOMIC64
722 # define WITH_ATOMIC64(X) X,
723 #else
724 # define WITH_ATOMIC64(X)
725 #endif
726 #ifdef CONFIG_CMPXCHG128
727 # define WITH_ATOMIC128(X) X,
728 #else
729 # define WITH_ATOMIC128(X)
730 #endif
731 
732 static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
733     [MO_8] = gen_helper_atomic_cmpxchgb,
734     [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
735     [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
736     [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
737     [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
738     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
739     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
740     WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le)
741     WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be)
742 };
743 
744 static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
745                                               TCGv_i32 cmpv, TCGv_i32 newv,
746                                               TCGArg idx, MemOp memop)
747 {
748     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
749     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
750 
751     tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
752 
753     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN);
754     tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
755     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
756     tcg_temp_free_i32(t2);
757 
758     if (memop & MO_SIGN) {
759         tcg_gen_ext_i32(retv, t1, memop);
760     } else {
761         tcg_gen_mov_i32(retv, t1);
762     }
763     tcg_temp_free_i32(t1);
764 }
765 
766 void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
767                                        TCGv_i32 cmpv, TCGv_i32 newv,
768                                        TCGArg idx, MemOp memop,
769                                        TCGType addr_type)
770 {
771     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
772     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
773     tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
774 }
775 
776 static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
777                                            TCGv_i32 cmpv, TCGv_i32 newv,
778                                            TCGArg idx, MemOp memop)
779 {
780     gen_atomic_cx_i32 gen;
781     TCGv_i64 a64;
782     MemOpIdx oi;
783 
784     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
785         tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
786         return;
787     }
788 
789     memop = tcg_canonicalize_memop(memop, 0, 0);
790     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
791     tcg_debug_assert(gen != NULL);
792 
793     oi = make_memop_idx(memop & ~MO_SIGN, idx);
794     a64 = maybe_extend_addr64(addr);
795     gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
796     maybe_free_addr64(a64);
797 
798     if (memop & MO_SIGN) {
799         tcg_gen_ext_i32(retv, retv, memop);
800     }
801 }
802 
803 void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
804                                     TCGv_i32 cmpv, TCGv_i32 newv,
805                                     TCGArg idx, MemOp memop,
806                                     TCGType addr_type)
807 {
808     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
809     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
810     tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
811 }
812 
813 static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
814                                               TCGv_i64 cmpv, TCGv_i64 newv,
815                                               TCGArg idx, MemOp memop)
816 {
817     TCGv_i64 t1, t2;
818 
819     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
820         tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
821                                           TCGV_LOW(newv), idx, memop);
822         if (memop & MO_SIGN) {
823             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
824         } else {
825             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
826         }
827         return;
828     }
829 
830     t1 = tcg_temp_ebb_new_i64();
831     t2 = tcg_temp_ebb_new_i64();
832 
833     tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
834 
835     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN);
836     tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
837     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
838     tcg_temp_free_i64(t2);
839 
840     if (memop & MO_SIGN) {
841         tcg_gen_ext_i64(retv, t1, memop);
842     } else {
843         tcg_gen_mov_i64(retv, t1);
844     }
845     tcg_temp_free_i64(t1);
846 }
847 
848 void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
849                                        TCGv_i64 cmpv, TCGv_i64 newv,
850                                        TCGArg idx, MemOp memop,
851                                        TCGType addr_type)
852 {
853     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
854     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
855     tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
856 }
857 
858 static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
859                                            TCGv_i64 cmpv, TCGv_i64 newv,
860                                            TCGArg idx, MemOp memop)
861 {
862     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
863         tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
864         return;
865     }
866 
867     if ((memop & MO_SIZE) == MO_64) {
868         gen_atomic_cx_i64 gen;
869 
870         memop = tcg_canonicalize_memop(memop, 1, 0);
871         gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
872         if (gen) {
873             MemOpIdx oi = make_memop_idx(memop, idx);
874             TCGv_i64 a64 = maybe_extend_addr64(addr);
875             gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
876             maybe_free_addr64(a64);
877             return;
878         }
879 
880         gen_helper_exit_atomic(cpu_env);
881 
882         /*
883          * Produce a result for a well-formed opcode stream.  This satisfies
884          * liveness for set before used, which happens before this dead code
885          * is removed.
886          */
887         tcg_gen_movi_i64(retv, 0);
888         return;
889     }
890 
891     if (TCG_TARGET_REG_BITS == 32) {
892         tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
893                                        TCGV_LOW(newv), idx, memop);
894         if (memop & MO_SIGN) {
895             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
896         } else {
897             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
898         }
899     } else {
900         TCGv_i32 c32 = tcg_temp_ebb_new_i32();
901         TCGv_i32 n32 = tcg_temp_ebb_new_i32();
902         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
903 
904         tcg_gen_extrl_i64_i32(c32, cmpv);
905         tcg_gen_extrl_i64_i32(n32, newv);
906         tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32,
907                                        idx, memop & ~MO_SIGN);
908         tcg_temp_free_i32(c32);
909         tcg_temp_free_i32(n32);
910 
911         tcg_gen_extu_i32_i64(retv, r32);
912         tcg_temp_free_i32(r32);
913 
914         if (memop & MO_SIGN) {
915             tcg_gen_ext_i64(retv, retv, memop);
916         }
917     }
918 }
919 
920 void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
921                                     TCGv_i64 cmpv, TCGv_i64 newv,
922                                     TCGArg idx, MemOp memop, TCGType addr_type)
923 {
924     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
925     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
926     tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
927 }
928 
929 static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
930                                                TCGv_i128 cmpv, TCGv_i128 newv,
931                                                TCGArg idx, MemOp memop)
932 {
933     if (TCG_TARGET_REG_BITS == 32) {
934         /* Inline expansion below is simply too large for 32-bit hosts. */
935         gen_atomic_cx_i128 gen = ((memop & MO_BSWAP) == MO_LE
936                                   ? gen_helper_nonatomic_cmpxchgo_le
937                                   : gen_helper_nonatomic_cmpxchgo_be);
938         MemOpIdx oi = make_memop_idx(memop, idx);
939         TCGv_i64 a64 = maybe_extend_addr64(addr);
940 
941         gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
942         maybe_free_addr64(a64);
943     } else {
944         TCGv_i128 oldv = tcg_temp_ebb_new_i128();
945         TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
946         TCGv_i64 t0 = tcg_temp_ebb_new_i64();
947         TCGv_i64 t1 = tcg_temp_ebb_new_i64();
948         TCGv_i64 z = tcg_constant_i64(0);
949 
950         tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
951 
952         /* Compare i128 */
953         tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
954         tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
955         tcg_gen_or_i64(t0, t0, t1);
956 
957         /* tmpv = equal ? newv : oldv */
958         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
959                             TCGV128_LOW(newv), TCGV128_LOW(oldv));
960         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
961                             TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
962 
963         /* Unconditional writeback. */
964         tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
965         tcg_gen_mov_i128(retv, oldv);
966 
967         tcg_temp_free_i64(t0);
968         tcg_temp_free_i64(t1);
969         tcg_temp_free_i128(tmpv);
970         tcg_temp_free_i128(oldv);
971     }
972 }
973 
974 void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
975                                         TCGv_i128 cmpv, TCGv_i128 newv,
976                                         TCGArg idx, MemOp memop,
977                                         TCGType addr_type)
978 {
979     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
980     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
981     tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
982 }
983 
984 static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
985                                             TCGv_i128 cmpv, TCGv_i128 newv,
986                                             TCGArg idx, MemOp memop)
987 {
988     gen_atomic_cx_i128 gen;
989 
990     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
991         tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
992         return;
993     }
994 
995     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
996     if (gen) {
997         MemOpIdx oi = make_memop_idx(memop, idx);
998         TCGv_i64 a64 = maybe_extend_addr64(addr);
999         gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
1000         maybe_free_addr64(a64);
1001         return;
1002     }
1003 
1004     gen_helper_exit_atomic(cpu_env);
1005 
1006     /*
1007      * Produce a result for a well-formed opcode stream.  This satisfies
1008      * liveness for set before used, which happens before this dead code
1009      * is removed.
1010      */
1011     tcg_gen_movi_i64(TCGV128_LOW(retv), 0);
1012     tcg_gen_movi_i64(TCGV128_HIGH(retv), 0);
1013 }
1014 
1015 void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1016                                      TCGv_i128 cmpv, TCGv_i128 newv,
1017                                      TCGArg idx, MemOp memop,
1018                                      TCGType addr_type)
1019 {
1020     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1021     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1022     tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1023 }
1024 
1025 static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1026                                 TCGArg idx, MemOp memop, bool new_val,
1027                                 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1028 {
1029     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
1030     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
1031 
1032     memop = tcg_canonicalize_memop(memop, 0, 0);
1033 
1034     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop);
1035     tcg_gen_ext_i32(t2, val, memop);
1036     gen(t2, t1, t2);
1037     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
1038 
1039     tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
1040     tcg_temp_free_i32(t1);
1041     tcg_temp_free_i32(t2);
1042 }
1043 
1044 static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1045                              TCGArg idx, MemOp memop, void * const table[])
1046 {
1047     gen_atomic_op_i32 gen;
1048     TCGv_i64 a64;
1049     MemOpIdx oi;
1050 
1051     memop = tcg_canonicalize_memop(memop, 0, 0);
1052 
1053     gen = table[memop & (MO_SIZE | MO_BSWAP)];
1054     tcg_debug_assert(gen != NULL);
1055 
1056     oi = make_memop_idx(memop & ~MO_SIGN, idx);
1057     a64 = maybe_extend_addr64(addr);
1058     gen(ret, cpu_env, a64, val, tcg_constant_i32(oi));
1059     maybe_free_addr64(a64);
1060 
1061     if (memop & MO_SIGN) {
1062         tcg_gen_ext_i32(ret, ret, memop);
1063     }
1064 }
1065 
1066 static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1067                                 TCGArg idx, MemOp memop, bool new_val,
1068                                 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1069 {
1070     TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1071     TCGv_i64 t2 = tcg_temp_ebb_new_i64();
1072 
1073     memop = tcg_canonicalize_memop(memop, 1, 0);
1074 
1075     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop);
1076     tcg_gen_ext_i64(t2, val, memop);
1077     gen(t2, t1, t2);
1078     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
1079 
1080     tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
1081     tcg_temp_free_i64(t1);
1082     tcg_temp_free_i64(t2);
1083 }
1084 
1085 static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1086                              TCGArg idx, MemOp memop, void * const table[])
1087 {
1088     memop = tcg_canonicalize_memop(memop, 1, 0);
1089 
1090     if ((memop & MO_SIZE) == MO_64) {
1091         gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)];
1092 
1093         if (gen) {
1094             MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
1095             TCGv_i64 a64 = maybe_extend_addr64(addr);
1096             gen(ret, cpu_env, a64, val, tcg_constant_i32(oi));
1097             maybe_free_addr64(a64);
1098             return;
1099         }
1100 
1101         gen_helper_exit_atomic(cpu_env);
1102         /* Produce a result, so that we have a well-formed opcode stream
1103            with respect to uses of the result in the (dead) code following.  */
1104         tcg_gen_movi_i64(ret, 0);
1105     } else {
1106         TCGv_i32 v32 = tcg_temp_ebb_new_i32();
1107         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
1108 
1109         tcg_gen_extrl_i64_i32(v32, val);
1110         do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
1111         tcg_temp_free_i32(v32);
1112 
1113         tcg_gen_extu_i32_i64(ret, r32);
1114         tcg_temp_free_i32(r32);
1115 
1116         if (memop & MO_SIGN) {
1117             tcg_gen_ext_i64(ret, ret, memop);
1118         }
1119     }
1120 }
1121 
1122 #define GEN_ATOMIC_HELPER(NAME, OP, NEW)                                \
1123 static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = {          \
1124     [MO_8] = gen_helper_atomic_##NAME##b,                               \
1125     [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le,                   \
1126     [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be,                   \
1127     [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le,                   \
1128     [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be,                   \
1129     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le)     \
1130     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be)     \
1131 };                                                                      \
1132 void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr,       \
1133                                      TCGv_i32 val, TCGArg idx,          \
1134                                      MemOp memop, TCGType addr_type)    \
1135 {                                                                       \
1136     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1137     tcg_debug_assert((memop & MO_SIZE) <= MO_32);                       \
1138     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1139         do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME);     \
1140     } else {                                                            \
1141         do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW,            \
1142                             tcg_gen_##OP##_i32);                        \
1143     }                                                                   \
1144 }                                                                       \
1145 void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr,       \
1146                                      TCGv_i64 val, TCGArg idx,          \
1147                                      MemOp memop, TCGType addr_type)    \
1148 {                                                                       \
1149     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1150     tcg_debug_assert((memop & MO_SIZE) <= MO_64);                       \
1151     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1152         do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME);     \
1153     } else {                                                            \
1154         do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW,            \
1155                             tcg_gen_##OP##_i64);                        \
1156     }                                                                   \
1157 }
1158 
1159 GEN_ATOMIC_HELPER(fetch_add, add, 0)
1160 GEN_ATOMIC_HELPER(fetch_and, and, 0)
1161 GEN_ATOMIC_HELPER(fetch_or, or, 0)
1162 GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
1163 GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
1164 GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
1165 GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
1166 GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
1167 
1168 GEN_ATOMIC_HELPER(add_fetch, add, 1)
1169 GEN_ATOMIC_HELPER(and_fetch, and, 1)
1170 GEN_ATOMIC_HELPER(or_fetch, or, 1)
1171 GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
1172 GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
1173 GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
1174 GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
1175 GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
1176 
1177 static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
1178 {
1179     tcg_gen_mov_i32(r, b);
1180 }
1181 
1182 static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
1183 {
1184     tcg_gen_mov_i64(r, b);
1185 }
1186 
1187 GEN_ATOMIC_HELPER(xchg, mov2, 0)
1188 
1189 #undef GEN_ATOMIC_HELPER
1190