xref: /qemu/tcg/tcg-op-ldst.c (revision bf7ca5fb3032b95fd83dbb9883e904ee28baa229)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "tcg/tcg.h"
27 #include "tcg/tcg-temp-internal.h"
28 #include "tcg/tcg-op-common.h"
29 #include "tcg/tcg-mo.h"
30 #include "exec/translation-block.h"
31 #include "exec/plugin-gen.h"
32 #include "tcg-internal.h"
33 #include "tcg-has.h"
34 #include "tcg-target-mo.h"
35 
36 static void check_max_alignment(unsigned a_bits)
37 {
38     /*
39      * The requested alignment cannot overlap the TLB flags.
40      * FIXME: Must keep the count up-to-date with "exec/tlb-flags.h".
41      */
42     if (tcg_use_softmmu) {
43         tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
44     }
45 }
46 
47 static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
48 {
49     unsigned a_bits = memop_alignment_bits(op);
50 
51     check_max_alignment(a_bits);
52 
53     /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
54     if (a_bits == (op & MO_SIZE)) {
55         op = (op & ~MO_AMASK) | MO_ALIGN;
56     }
57 
58     switch (op & MO_SIZE) {
59     case MO_8:
60         op &= ~MO_BSWAP;
61         break;
62     case MO_16:
63         break;
64     case MO_32:
65         if (!is64) {
66             op &= ~MO_SIGN;
67         }
68         break;
69     case MO_64:
70         if (is64) {
71             op &= ~MO_SIGN;
72             break;
73         }
74         /* fall through */
75     default:
76         g_assert_not_reached();
77     }
78     if (st) {
79         op &= ~MO_SIGN;
80     }
81 
82     /* In serial mode, reduce atomicity. */
83     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
84         op &= ~MO_ATOM_MASK;
85         op |= MO_ATOM_NONE;
86     }
87 
88     return op;
89 }
90 
91 static void gen_ldst(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
92                      TCGTemp *addr, MemOpIdx oi)
93 {
94     TCGOp *op;
95 
96     if (vh) {
97         op = tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
98                          temp_arg(addr), oi);
99     } else {
100         op = tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
101     }
102     TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE;
103 }
104 
105 static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
106 {
107     if (TCG_TARGET_REG_BITS == 32) {
108         TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
109         TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
110         gen_ldst(opc, TCG_TYPE_I64, vl, vh, addr, oi);
111     } else {
112         gen_ldst(opc, TCG_TYPE_I64, tcgv_i64_temp(v), NULL, addr, oi);
113     }
114 }
115 
116 static void tcg_gen_req_mo(TCGBar type)
117 {
118     type &= tcg_ctx->guest_mo;
119     type &= ~TCG_TARGET_DEFAULT_MO;
120     if (type) {
121         tcg_gen_mb(type | TCG_BAR_SC);
122     }
123 }
124 
125 /* Only required for loads, where value might overlap addr. */
126 static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
127 {
128 #ifdef CONFIG_PLUGIN
129     if (tcg_ctx->plugin_insn != NULL) {
130         /* Save a copy of the vaddr for use after a load.  */
131         TCGv_i64 temp = tcg_temp_ebb_new_i64();
132         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
133             tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
134         } else {
135             tcg_gen_mov_i64(temp, temp_tcgv_i64(addr));
136         }
137         return temp;
138     }
139 #endif
140     return NULL;
141 }
142 
143 #ifdef CONFIG_PLUGIN
144 static void
145 plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
146                          enum qemu_plugin_mem_rw rw)
147 {
148     if (tcg_ctx->plugin_insn != NULL) {
149         qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
150 
151         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
152             if (!copy_addr) {
153                 copy_addr = tcg_temp_ebb_new_i64();
154                 tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr));
155             }
156             tcg_gen_plugin_mem_cb(copy_addr, info);
157             tcg_temp_free_i64(copy_addr);
158         } else {
159             if (copy_addr) {
160                 tcg_gen_plugin_mem_cb(copy_addr, info);
161                 tcg_temp_free_i64(copy_addr);
162             } else {
163                 tcg_gen_plugin_mem_cb(temp_tcgv_i64(orig_addr), info);
164             }
165         }
166     }
167 }
168 #endif
169 
170 static void
171 plugin_gen_mem_callbacks_i32(TCGv_i32 val,
172                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
173                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
174 {
175 #ifdef CONFIG_PLUGIN
176     if (tcg_ctx->plugin_insn != NULL) {
177         tcg_gen_st_i32(val, tcg_env,
178                        offsetof(CPUState, neg.plugin_mem_value_low) -
179                        sizeof(CPUState) + (HOST_BIG_ENDIAN * 4));
180         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
181     }
182 #endif
183 }
184 
185 static void
186 plugin_gen_mem_callbacks_i64(TCGv_i64 val,
187                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
188                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
189 {
190 #ifdef CONFIG_PLUGIN
191     if (tcg_ctx->plugin_insn != NULL) {
192         tcg_gen_st_i64(val, tcg_env,
193                        offsetof(CPUState, neg.plugin_mem_value_low) -
194                        sizeof(CPUState));
195         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
196     }
197 #endif
198 }
199 
200 static void
201 plugin_gen_mem_callbacks_i128(TCGv_i128 val,
202                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
203                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
204 {
205 #ifdef CONFIG_PLUGIN
206     if (tcg_ctx->plugin_insn != NULL) {
207         tcg_gen_st_i64(TCGV128_LOW(val), tcg_env,
208                        offsetof(CPUState, neg.plugin_mem_value_low) -
209                        sizeof(CPUState));
210         tcg_gen_st_i64(TCGV128_HIGH(val), tcg_env,
211                        offsetof(CPUState, neg.plugin_mem_value_high) -
212                        sizeof(CPUState));
213         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
214     }
215 #endif
216 }
217 
218 static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
219                                     TCGArg idx, MemOp memop)
220 {
221     MemOp orig_memop;
222     MemOpIdx orig_oi, oi;
223     TCGv_i64 copy_addr;
224 
225     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
226     orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
227     orig_oi = oi = make_memop_idx(memop, idx);
228 
229     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
230         memop &= ~MO_BSWAP;
231         /* The bswap primitive benefits from zero-extended input.  */
232         if ((memop & MO_SSIZE) == MO_SW) {
233             memop &= ~MO_SIGN;
234         }
235         oi = make_memop_idx(memop, idx);
236     }
237 
238     copy_addr = plugin_maybe_preserve_addr(addr);
239     gen_ldst(INDEX_op_qemu_ld_i32, TCG_TYPE_I32,
240              tcgv_i32_temp(val), NULL, addr, oi);
241     plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
242                                  QEMU_PLUGIN_MEM_R);
243 
244     if ((orig_memop ^ memop) & MO_BSWAP) {
245         switch (orig_memop & MO_SIZE) {
246         case MO_16:
247             tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN
248                                            ? TCG_BSWAP_IZ | TCG_BSWAP_OS
249                                            : TCG_BSWAP_IZ | TCG_BSWAP_OZ));
250             break;
251         case MO_32:
252             tcg_gen_bswap32_i32(val, val);
253             break;
254         default:
255             g_assert_not_reached();
256         }
257     }
258 }
259 
260 void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
261                              MemOp memop, TCGType addr_type)
262 {
263     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
264     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
265     tcg_gen_qemu_ld_i32_int(val, addr, idx, memop);
266 }
267 
268 static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
269                                     TCGArg idx, MemOp memop)
270 {
271     TCGv_i32 swap = NULL;
272     MemOpIdx orig_oi, oi;
273     TCGOpcode opc;
274 
275     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
276     memop = tcg_canonicalize_memop(memop, 0, 1);
277     orig_oi = oi = make_memop_idx(memop, idx);
278 
279     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
280         swap = tcg_temp_ebb_new_i32();
281         switch (memop & MO_SIZE) {
282         case MO_16:
283             tcg_gen_bswap16_i32(swap, val, 0);
284             break;
285         case MO_32:
286             tcg_gen_bswap32_i32(swap, val);
287             break;
288         default:
289             g_assert_not_reached();
290         }
291         val = swap;
292         memop &= ~MO_BSWAP;
293         oi = make_memop_idx(memop, idx);
294     }
295 
296     if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
297         opc = INDEX_op_qemu_st8_i32;
298     } else {
299         opc = INDEX_op_qemu_st_i32;
300     }
301     gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
302     plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
303 
304     if (swap) {
305         tcg_temp_free_i32(swap);
306     }
307 }
308 
309 void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
310                              MemOp memop, TCGType addr_type)
311 {
312     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
313     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
314     tcg_gen_qemu_st_i32_int(val, addr, idx, memop);
315 }
316 
317 static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
318                                     TCGArg idx, MemOp memop)
319 {
320     MemOp orig_memop;
321     MemOpIdx orig_oi, oi;
322     TCGv_i64 copy_addr;
323 
324     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
325         tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
326         if (memop & MO_SIGN) {
327             tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
328         } else {
329             tcg_gen_movi_i32(TCGV_HIGH(val), 0);
330         }
331         return;
332     }
333 
334     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
335     orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0);
336     orig_oi = oi = make_memop_idx(memop, idx);
337 
338     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
339         memop &= ~MO_BSWAP;
340         /* The bswap primitive benefits from zero-extended input.  */
341         if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
342             memop &= ~MO_SIGN;
343         }
344         oi = make_memop_idx(memop, idx);
345     }
346 
347     copy_addr = plugin_maybe_preserve_addr(addr);
348     gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi);
349     plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
350                                  QEMU_PLUGIN_MEM_R);
351 
352     if ((orig_memop ^ memop) & MO_BSWAP) {
353         int flags = (orig_memop & MO_SIGN
354                      ? TCG_BSWAP_IZ | TCG_BSWAP_OS
355                      : TCG_BSWAP_IZ | TCG_BSWAP_OZ);
356         switch (orig_memop & MO_SIZE) {
357         case MO_16:
358             tcg_gen_bswap16_i64(val, val, flags);
359             break;
360         case MO_32:
361             tcg_gen_bswap32_i64(val, val, flags);
362             break;
363         case MO_64:
364             tcg_gen_bswap64_i64(val, val);
365             break;
366         default:
367             g_assert_not_reached();
368         }
369     }
370 }
371 
372 void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
373                              MemOp memop, TCGType addr_type)
374 {
375     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
376     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
377     tcg_gen_qemu_ld_i64_int(val, addr, idx, memop);
378 }
379 
380 static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
381                                     TCGArg idx, MemOp memop)
382 {
383     TCGv_i64 swap = NULL;
384     MemOpIdx orig_oi, oi;
385 
386     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
387         tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
388         return;
389     }
390 
391     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
392     memop = tcg_canonicalize_memop(memop, 1, 1);
393     orig_oi = oi = make_memop_idx(memop, idx);
394 
395     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
396         swap = tcg_temp_ebb_new_i64();
397         switch (memop & MO_SIZE) {
398         case MO_16:
399             tcg_gen_bswap16_i64(swap, val, 0);
400             break;
401         case MO_32:
402             tcg_gen_bswap32_i64(swap, val, 0);
403             break;
404         case MO_64:
405             tcg_gen_bswap64_i64(swap, val);
406             break;
407         default:
408             g_assert_not_reached();
409         }
410         val = swap;
411         memop &= ~MO_BSWAP;
412         oi = make_memop_idx(memop, idx);
413     }
414 
415     gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi);
416     plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
417 
418     if (swap) {
419         tcg_temp_free_i64(swap);
420     }
421 }
422 
423 void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
424                              MemOp memop, TCGType addr_type)
425 {
426     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
427     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
428     tcg_gen_qemu_st_i64_int(val, addr, idx, memop);
429 }
430 
431 /*
432  * Return true if @mop, without knowledge of the pointer alignment,
433  * does not require 16-byte atomicity, and it would be adventagous
434  * to avoid a call to a helper function.
435  */
436 static bool use_two_i64_for_i128(MemOp mop)
437 {
438     /* Two softmmu tlb lookups is larger than one function call. */
439     if (tcg_use_softmmu) {
440         return false;
441     }
442 
443     /*
444      * For user-only, two 64-bit operations may well be smaller than a call.
445      * Determine if that would be legal for the requested atomicity.
446      */
447     switch (mop & MO_ATOM_MASK) {
448     case MO_ATOM_NONE:
449     case MO_ATOM_IFALIGN_PAIR:
450         return true;
451     case MO_ATOM_IFALIGN:
452     case MO_ATOM_SUBALIGN:
453     case MO_ATOM_WITHIN16:
454     case MO_ATOM_WITHIN16_PAIR:
455         return false;
456     default:
457         g_assert_not_reached();
458     }
459 }
460 
461 static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
462 {
463     MemOp mop_1 = orig, mop_2;
464 
465     /* Reduce the size to 64-bit. */
466     mop_1 = (mop_1 & ~MO_SIZE) | MO_64;
467 
468     /* Retain the alignment constraints of the original. */
469     switch (orig & MO_AMASK) {
470     case MO_UNALN:
471     case MO_ALIGN_2:
472     case MO_ALIGN_4:
473         mop_2 = mop_1;
474         break;
475     case MO_ALIGN_8:
476         /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */
477         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
478         mop_2 = mop_1;
479         break;
480     case MO_ALIGN:
481         /* Second has 8-byte alignment; first has 16-byte alignment. */
482         mop_2 = mop_1;
483         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16;
484         break;
485     case MO_ALIGN_16:
486     case MO_ALIGN_32:
487     case MO_ALIGN_64:
488         /* Second has 8-byte alignment; first retains original. */
489         mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
490         break;
491     default:
492         g_assert_not_reached();
493     }
494 
495     /* Use a memory ordering implemented by the host. */
496     if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) {
497         mop_1 &= ~MO_BSWAP;
498         mop_2 &= ~MO_BSWAP;
499     }
500 
501     ret[0] = mop_1;
502     ret[1] = mop_2;
503 }
504 
505 static TCGv_i64 maybe_extend_addr64(TCGTemp *addr)
506 {
507     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
508         TCGv_i64 a64 = tcg_temp_ebb_new_i64();
509         tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr));
510         return a64;
511     }
512     return temp_tcgv_i64(addr);
513 }
514 
515 static void maybe_free_addr64(TCGv_i64 a64)
516 {
517     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
518         tcg_temp_free_i64(a64);
519     }
520 }
521 
522 static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
523                                      TCGArg idx, MemOp memop)
524 {
525     MemOpIdx orig_oi;
526     TCGv_i64 ext_addr = NULL;
527 
528     check_max_alignment(memop_alignment_bits(memop));
529     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
530 
531     /* In serial mode, reduce atomicity. */
532     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
533         memop &= ~MO_ATOM_MASK;
534         memop |= MO_ATOM_NONE;
535     }
536     orig_oi = make_memop_idx(memop, idx);
537 
538     /* TODO: For now, force 32-bit hosts to use the helper. */
539     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
540         TCGv_i64 lo, hi;
541         bool need_bswap = false;
542         MemOpIdx oi = orig_oi;
543 
544         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
545             lo = TCGV128_HIGH(val);
546             hi = TCGV128_LOW(val);
547             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
548             need_bswap = true;
549         } else {
550             lo = TCGV128_LOW(val);
551             hi = TCGV128_HIGH(val);
552         }
553 
554         gen_ldst(INDEX_op_qemu_ld_i128, TCG_TYPE_I128, tcgv_i64_temp(lo),
555                  tcgv_i64_temp(hi), addr, oi);
556 
557         if (need_bswap) {
558             tcg_gen_bswap64_i64(lo, lo);
559             tcg_gen_bswap64_i64(hi, hi);
560         }
561     } else if (use_two_i64_for_i128(memop)) {
562         MemOp mop[2];
563         TCGTemp *addr_p8;
564         TCGv_i64 x, y;
565         bool need_bswap;
566 
567         canonicalize_memop_i128_as_i64(mop, memop);
568         need_bswap = (mop[0] ^ memop) & MO_BSWAP;
569 
570         /*
571          * Since there are no global TCGv_i128, there is no visible state
572          * changed if the second load faults.  Load directly into the two
573          * subwords.
574          */
575         if ((memop & MO_BSWAP) == MO_LE) {
576             x = TCGV128_LOW(val);
577             y = TCGV128_HIGH(val);
578         } else {
579             x = TCGV128_HIGH(val);
580             y = TCGV128_LOW(val);
581         }
582 
583         gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr,
584                      make_memop_idx(mop[0], idx));
585 
586         if (need_bswap) {
587             tcg_gen_bswap64_i64(x, x);
588         }
589 
590         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
591             TCGv_i32 t = tcg_temp_ebb_new_i32();
592             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
593             addr_p8 = tcgv_i32_temp(t);
594         } else {
595             TCGv_i64 t = tcg_temp_ebb_new_i64();
596             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
597             addr_p8 = tcgv_i64_temp(t);
598         }
599 
600         gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8,
601                      make_memop_idx(mop[1], idx));
602         tcg_temp_free_internal(addr_p8);
603 
604         if (need_bswap) {
605             tcg_gen_bswap64_i64(y, y);
606         }
607     } else {
608         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
609             ext_addr = tcg_temp_ebb_new_i64();
610             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
611             addr = tcgv_i64_temp(ext_addr);
612         }
613         gen_helper_ld_i128(val, tcg_env, temp_tcgv_i64(addr),
614                            tcg_constant_i32(orig_oi));
615     }
616 
617     plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi,
618                                   QEMU_PLUGIN_MEM_R);
619 }
620 
621 void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
622                               MemOp memop, TCGType addr_type)
623 {
624     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
625     tcg_debug_assert((memop & MO_SIZE) == MO_128);
626     tcg_debug_assert((memop & MO_SIGN) == 0);
627     tcg_gen_qemu_ld_i128_int(val, addr, idx, memop);
628 }
629 
630 static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
631                                      TCGArg idx, MemOp memop)
632 {
633     MemOpIdx orig_oi;
634     TCGv_i64 ext_addr = NULL;
635 
636     check_max_alignment(memop_alignment_bits(memop));
637     tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
638 
639     /* In serial mode, reduce atomicity. */
640     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
641         memop &= ~MO_ATOM_MASK;
642         memop |= MO_ATOM_NONE;
643     }
644     orig_oi = make_memop_idx(memop, idx);
645 
646     /* TODO: For now, force 32-bit hosts to use the helper. */
647 
648     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
649         TCGv_i64 lo, hi;
650         MemOpIdx oi = orig_oi;
651         bool need_bswap = false;
652 
653         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
654             lo = tcg_temp_ebb_new_i64();
655             hi = tcg_temp_ebb_new_i64();
656             tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
657             tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
658             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
659             need_bswap = true;
660         } else {
661             lo = TCGV128_LOW(val);
662             hi = TCGV128_HIGH(val);
663         }
664 
665         gen_ldst(INDEX_op_qemu_st_i128, TCG_TYPE_I128,
666                  tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
667 
668         if (need_bswap) {
669             tcg_temp_free_i64(lo);
670             tcg_temp_free_i64(hi);
671         }
672     } else if (use_two_i64_for_i128(memop)) {
673         MemOp mop[2];
674         TCGTemp *addr_p8;
675         TCGv_i64 x, y, b = NULL;
676 
677         canonicalize_memop_i128_as_i64(mop, memop);
678 
679         if ((memop & MO_BSWAP) == MO_LE) {
680             x = TCGV128_LOW(val);
681             y = TCGV128_HIGH(val);
682         } else {
683             x = TCGV128_HIGH(val);
684             y = TCGV128_LOW(val);
685         }
686 
687         if ((mop[0] ^ memop) & MO_BSWAP) {
688             b = tcg_temp_ebb_new_i64();
689             tcg_gen_bswap64_i64(b, x);
690             x = b;
691         }
692 
693         gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr,
694                      make_memop_idx(mop[0], idx));
695 
696         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
697             TCGv_i32 t = tcg_temp_ebb_new_i32();
698             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
699             addr_p8 = tcgv_i32_temp(t);
700         } else {
701             TCGv_i64 t = tcg_temp_ebb_new_i64();
702             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
703             addr_p8 = tcgv_i64_temp(t);
704         }
705 
706         if (b) {
707             tcg_gen_bswap64_i64(b, y);
708             gen_ldst_i64(INDEX_op_qemu_st_i64, b, addr_p8,
709                          make_memop_idx(mop[1], idx));
710             tcg_temp_free_i64(b);
711         } else {
712             gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8,
713                          make_memop_idx(mop[1], idx));
714         }
715         tcg_temp_free_internal(addr_p8);
716     } else {
717         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
718             ext_addr = tcg_temp_ebb_new_i64();
719             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
720             addr = tcgv_i64_temp(ext_addr);
721         }
722         gen_helper_st_i128(tcg_env, temp_tcgv_i64(addr), val,
723                            tcg_constant_i32(orig_oi));
724     }
725 
726     plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi,
727                                   QEMU_PLUGIN_MEM_W);
728 }
729 
730 void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
731                               MemOp memop, TCGType addr_type)
732 {
733     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
734     tcg_debug_assert((memop & MO_SIZE) == MO_128);
735     tcg_debug_assert((memop & MO_SIGN) == 0);
736     tcg_gen_qemu_st_i128_int(val, addr, idx, memop);
737 }
738 
739 void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
740 {
741     switch (opc & MO_SSIZE) {
742     case MO_SB:
743         tcg_gen_ext8s_i32(ret, val);
744         break;
745     case MO_UB:
746         tcg_gen_ext8u_i32(ret, val);
747         break;
748     case MO_SW:
749         tcg_gen_ext16s_i32(ret, val);
750         break;
751     case MO_UW:
752         tcg_gen_ext16u_i32(ret, val);
753         break;
754     case MO_UL:
755     case MO_SL:
756         tcg_gen_mov_i32(ret, val);
757         break;
758     default:
759         g_assert_not_reached();
760     }
761 }
762 
763 void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
764 {
765     switch (opc & MO_SSIZE) {
766     case MO_SB:
767         tcg_gen_ext8s_i64(ret, val);
768         break;
769     case MO_UB:
770         tcg_gen_ext8u_i64(ret, val);
771         break;
772     case MO_SW:
773         tcg_gen_ext16s_i64(ret, val);
774         break;
775     case MO_UW:
776         tcg_gen_ext16u_i64(ret, val);
777         break;
778     case MO_SL:
779         tcg_gen_ext32s_i64(ret, val);
780         break;
781     case MO_UL:
782         tcg_gen_ext32u_i64(ret, val);
783         break;
784     case MO_UQ:
785     case MO_SQ:
786         tcg_gen_mov_i64(ret, val);
787         break;
788     default:
789         g_assert_not_reached();
790     }
791 }
792 
793 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64,
794                                   TCGv_i32, TCGv_i32, TCGv_i32);
795 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64,
796                                   TCGv_i64, TCGv_i64, TCGv_i32);
797 typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64,
798                                    TCGv_i128, TCGv_i128, TCGv_i32);
799 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64,
800                                   TCGv_i32, TCGv_i32);
801 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
802                                   TCGv_i64, TCGv_i32);
803 
804 #ifdef CONFIG_ATOMIC64
805 # define WITH_ATOMIC64(X) X,
806 #else
807 # define WITH_ATOMIC64(X)
808 #endif
809 #if HAVE_CMPXCHG128
810 # define WITH_ATOMIC128(X) X,
811 #else
812 # define WITH_ATOMIC128(X)
813 #endif
814 
815 static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
816     [MO_8] = gen_helper_atomic_cmpxchgb,
817     [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
818     [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
819     [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
820     [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
821     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
822     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
823     WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le)
824     WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be)
825 };
826 
827 static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
828                                               TCGv_i32 cmpv, TCGv_i32 newv,
829                                               TCGArg idx, MemOp memop)
830 {
831     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
832     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
833 
834     tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
835 
836     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN);
837     tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
838     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
839     tcg_temp_free_i32(t2);
840 
841     if (memop & MO_SIGN) {
842         tcg_gen_ext_i32(retv, t1, memop);
843     } else {
844         tcg_gen_mov_i32(retv, t1);
845     }
846     tcg_temp_free_i32(t1);
847 }
848 
849 void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
850                                        TCGv_i32 cmpv, TCGv_i32 newv,
851                                        TCGArg idx, MemOp memop,
852                                        TCGType addr_type)
853 {
854     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
855     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
856     tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
857 }
858 
859 static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
860                                            TCGv_i32 cmpv, TCGv_i32 newv,
861                                            TCGArg idx, MemOp memop)
862 {
863     gen_atomic_cx_i32 gen;
864     TCGv_i64 a64;
865     MemOpIdx oi;
866 
867     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
868         tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
869         return;
870     }
871 
872     memop = tcg_canonicalize_memop(memop, 0, 0);
873     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
874     tcg_debug_assert(gen != NULL);
875 
876     oi = make_memop_idx(memop & ~MO_SIGN, idx);
877     a64 = maybe_extend_addr64(addr);
878     gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
879     maybe_free_addr64(a64);
880 
881     if (memop & MO_SIGN) {
882         tcg_gen_ext_i32(retv, retv, memop);
883     }
884 }
885 
886 void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
887                                     TCGv_i32 cmpv, TCGv_i32 newv,
888                                     TCGArg idx, MemOp memop,
889                                     TCGType addr_type)
890 {
891     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
892     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
893     tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
894 }
895 
896 static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
897                                               TCGv_i64 cmpv, TCGv_i64 newv,
898                                               TCGArg idx, MemOp memop)
899 {
900     TCGv_i64 t1, t2;
901 
902     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
903         tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
904                                           TCGV_LOW(newv), idx, memop);
905         if (memop & MO_SIGN) {
906             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
907         } else {
908             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
909         }
910         return;
911     }
912 
913     t1 = tcg_temp_ebb_new_i64();
914     t2 = tcg_temp_ebb_new_i64();
915 
916     tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
917 
918     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN);
919     tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
920     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
921     tcg_temp_free_i64(t2);
922 
923     if (memop & MO_SIGN) {
924         tcg_gen_ext_i64(retv, t1, memop);
925     } else {
926         tcg_gen_mov_i64(retv, t1);
927     }
928     tcg_temp_free_i64(t1);
929 }
930 
931 void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
932                                        TCGv_i64 cmpv, TCGv_i64 newv,
933                                        TCGArg idx, MemOp memop,
934                                        TCGType addr_type)
935 {
936     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
937     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
938     tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
939 }
940 
941 static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
942                                            TCGv_i64 cmpv, TCGv_i64 newv,
943                                            TCGArg idx, MemOp memop)
944 {
945     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
946         tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
947         return;
948     }
949 
950     if ((memop & MO_SIZE) == MO_64) {
951         gen_atomic_cx_i64 gen;
952 
953         memop = tcg_canonicalize_memop(memop, 1, 0);
954         gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
955         if (gen) {
956             MemOpIdx oi = make_memop_idx(memop, idx);
957             TCGv_i64 a64 = maybe_extend_addr64(addr);
958             gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
959             maybe_free_addr64(a64);
960             return;
961         }
962 
963         gen_helper_exit_atomic(tcg_env);
964 
965         /*
966          * Produce a result for a well-formed opcode stream.  This satisfies
967          * liveness for set before used, which happens before this dead code
968          * is removed.
969          */
970         tcg_gen_movi_i64(retv, 0);
971         return;
972     }
973 
974     if (TCG_TARGET_REG_BITS == 32) {
975         tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
976                                        TCGV_LOW(newv), idx, memop);
977         if (memop & MO_SIGN) {
978             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
979         } else {
980             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
981         }
982     } else {
983         TCGv_i32 c32 = tcg_temp_ebb_new_i32();
984         TCGv_i32 n32 = tcg_temp_ebb_new_i32();
985         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
986 
987         tcg_gen_extrl_i64_i32(c32, cmpv);
988         tcg_gen_extrl_i64_i32(n32, newv);
989         tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32,
990                                        idx, memop & ~MO_SIGN);
991         tcg_temp_free_i32(c32);
992         tcg_temp_free_i32(n32);
993 
994         tcg_gen_extu_i32_i64(retv, r32);
995         tcg_temp_free_i32(r32);
996 
997         if (memop & MO_SIGN) {
998             tcg_gen_ext_i64(retv, retv, memop);
999         }
1000     }
1001 }
1002 
1003 void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
1004                                     TCGv_i64 cmpv, TCGv_i64 newv,
1005                                     TCGArg idx, MemOp memop, TCGType addr_type)
1006 {
1007     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1008     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
1009     tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
1010 }
1011 
1012 static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1013                                                TCGv_i128 cmpv, TCGv_i128 newv,
1014                                                TCGArg idx, MemOp memop)
1015 {
1016     if (TCG_TARGET_REG_BITS == 32) {
1017         /* Inline expansion below is simply too large for 32-bit hosts. */
1018         MemOpIdx oi = make_memop_idx(memop, idx);
1019         TCGv_i64 a64 = maybe_extend_addr64(addr);
1020 
1021         gen_helper_nonatomic_cmpxchgo(retv, tcg_env, a64, cmpv, newv,
1022                                       tcg_constant_i32(oi));
1023         maybe_free_addr64(a64);
1024     } else {
1025         TCGv_i128 oldv = tcg_temp_ebb_new_i128();
1026         TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
1027         TCGv_i64 t0 = tcg_temp_ebb_new_i64();
1028         TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1029         TCGv_i64 z = tcg_constant_i64(0);
1030 
1031         tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
1032 
1033         /* Compare i128 */
1034         tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
1035         tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
1036         tcg_gen_or_i64(t0, t0, t1);
1037 
1038         /* tmpv = equal ? newv : oldv */
1039         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
1040                             TCGV128_LOW(newv), TCGV128_LOW(oldv));
1041         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
1042                             TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
1043 
1044         /* Unconditional writeback. */
1045         tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
1046         tcg_gen_mov_i128(retv, oldv);
1047 
1048         tcg_temp_free_i64(t0);
1049         tcg_temp_free_i64(t1);
1050         tcg_temp_free_i128(tmpv);
1051         tcg_temp_free_i128(oldv);
1052     }
1053 }
1054 
1055 void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1056                                         TCGv_i128 cmpv, TCGv_i128 newv,
1057                                         TCGArg idx, MemOp memop,
1058                                         TCGType addr_type)
1059 {
1060     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1061     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1062     tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1063 }
1064 
1065 static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1066                                             TCGv_i128 cmpv, TCGv_i128 newv,
1067                                             TCGArg idx, MemOp memop)
1068 {
1069     gen_atomic_cx_i128 gen;
1070 
1071     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
1072         tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1073         return;
1074     }
1075 
1076     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
1077     if (gen) {
1078         MemOpIdx oi = make_memop_idx(memop, idx);
1079         TCGv_i64 a64 = maybe_extend_addr64(addr);
1080         gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
1081         maybe_free_addr64(a64);
1082         return;
1083     }
1084 
1085     gen_helper_exit_atomic(tcg_env);
1086 
1087     /*
1088      * Produce a result for a well-formed opcode stream.  This satisfies
1089      * liveness for set before used, which happens before this dead code
1090      * is removed.
1091      */
1092     tcg_gen_movi_i64(TCGV128_LOW(retv), 0);
1093     tcg_gen_movi_i64(TCGV128_HIGH(retv), 0);
1094 }
1095 
1096 void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1097                                      TCGv_i128 cmpv, TCGv_i128 newv,
1098                                      TCGArg idx, MemOp memop,
1099                                      TCGType addr_type)
1100 {
1101     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1102     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1103     tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1104 }
1105 
1106 static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1107                                 TCGArg idx, MemOp memop, bool new_val,
1108                                 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1109 {
1110     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
1111     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
1112 
1113     memop = tcg_canonicalize_memop(memop, 0, 0);
1114 
1115     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop);
1116     tcg_gen_ext_i32(t2, val, memop);
1117     gen(t2, t1, t2);
1118     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
1119 
1120     tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
1121     tcg_temp_free_i32(t1);
1122     tcg_temp_free_i32(t2);
1123 }
1124 
1125 static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1126                              TCGArg idx, MemOp memop, void * const table[])
1127 {
1128     gen_atomic_op_i32 gen;
1129     TCGv_i64 a64;
1130     MemOpIdx oi;
1131 
1132     memop = tcg_canonicalize_memop(memop, 0, 0);
1133 
1134     gen = table[memop & (MO_SIZE | MO_BSWAP)];
1135     tcg_debug_assert(gen != NULL);
1136 
1137     oi = make_memop_idx(memop & ~MO_SIGN, idx);
1138     a64 = maybe_extend_addr64(addr);
1139     gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
1140     maybe_free_addr64(a64);
1141 
1142     if (memop & MO_SIGN) {
1143         tcg_gen_ext_i32(ret, ret, memop);
1144     }
1145 }
1146 
1147 static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1148                                 TCGArg idx, MemOp memop, bool new_val,
1149                                 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1150 {
1151     TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1152     TCGv_i64 t2 = tcg_temp_ebb_new_i64();
1153 
1154     memop = tcg_canonicalize_memop(memop, 1, 0);
1155 
1156     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop);
1157     tcg_gen_ext_i64(t2, val, memop);
1158     gen(t2, t1, t2);
1159     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
1160 
1161     tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
1162     tcg_temp_free_i64(t1);
1163     tcg_temp_free_i64(t2);
1164 }
1165 
1166 static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1167                              TCGArg idx, MemOp memop, void * const table[])
1168 {
1169     memop = tcg_canonicalize_memop(memop, 1, 0);
1170 
1171     if ((memop & MO_SIZE) == MO_64) {
1172         gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)];
1173 
1174         if (gen) {
1175             MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
1176             TCGv_i64 a64 = maybe_extend_addr64(addr);
1177             gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
1178             maybe_free_addr64(a64);
1179             return;
1180         }
1181 
1182         gen_helper_exit_atomic(tcg_env);
1183         /* Produce a result, so that we have a well-formed opcode stream
1184            with respect to uses of the result in the (dead) code following.  */
1185         tcg_gen_movi_i64(ret, 0);
1186     } else {
1187         TCGv_i32 v32 = tcg_temp_ebb_new_i32();
1188         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
1189 
1190         tcg_gen_extrl_i64_i32(v32, val);
1191         do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
1192         tcg_temp_free_i32(v32);
1193 
1194         tcg_gen_extu_i32_i64(ret, r32);
1195         tcg_temp_free_i32(r32);
1196 
1197         if (memop & MO_SIGN) {
1198             tcg_gen_ext_i64(ret, ret, memop);
1199         }
1200     }
1201 }
1202 
1203 #define GEN_ATOMIC_HELPER(NAME, OP, NEW)                                \
1204 static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = {          \
1205     [MO_8] = gen_helper_atomic_##NAME##b,                               \
1206     [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le,                   \
1207     [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be,                   \
1208     [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le,                   \
1209     [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be,                   \
1210     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le)     \
1211     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be)     \
1212 };                                                                      \
1213 void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr,       \
1214                                      TCGv_i32 val, TCGArg idx,          \
1215                                      MemOp memop, TCGType addr_type)    \
1216 {                                                                       \
1217     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1218     tcg_debug_assert((memop & MO_SIZE) <= MO_32);                       \
1219     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1220         do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME);     \
1221     } else {                                                            \
1222         do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW,            \
1223                             tcg_gen_##OP##_i32);                        \
1224     }                                                                   \
1225 }                                                                       \
1226 void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr,       \
1227                                      TCGv_i64 val, TCGArg idx,          \
1228                                      MemOp memop, TCGType addr_type)    \
1229 {                                                                       \
1230     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1231     tcg_debug_assert((memop & MO_SIZE) <= MO_64);                       \
1232     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1233         do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME);     \
1234     } else {                                                            \
1235         do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW,            \
1236                             tcg_gen_##OP##_i64);                        \
1237     }                                                                   \
1238 }
1239 
1240 GEN_ATOMIC_HELPER(fetch_add, add, 0)
1241 GEN_ATOMIC_HELPER(fetch_and, and, 0)
1242 GEN_ATOMIC_HELPER(fetch_or, or, 0)
1243 GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
1244 GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
1245 GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
1246 GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
1247 GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
1248 
1249 GEN_ATOMIC_HELPER(add_fetch, add, 1)
1250 GEN_ATOMIC_HELPER(and_fetch, and, 1)
1251 GEN_ATOMIC_HELPER(or_fetch, or, 1)
1252 GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
1253 GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
1254 GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
1255 GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
1256 GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
1257 
1258 static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
1259 {
1260     tcg_gen_mov_i32(r, b);
1261 }
1262 
1263 static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
1264 {
1265     tcg_gen_mov_i64(r, b);
1266 }
1267 
1268 GEN_ATOMIC_HELPER(xchg, mov2, 0)
1269 
1270 #undef GEN_ATOMIC_HELPER
1271