xref: /qemu/tcg/tcg-op-ldst.c (revision 70ce076fa6dff60585c229a4b641b13e64bf03cf)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "tcg/tcg.h"
27 #include "tcg/tcg-temp-internal.h"
28 #include "tcg/tcg-op-common.h"
29 #include "tcg/tcg-mo.h"
30 #include "exec/translation-block.h"
31 #include "exec/plugin-gen.h"
32 #include "tcg-internal.h"
33 #include "tcg-has.h"
34 #include "tcg-target-mo.h"
35 
36 static void check_max_alignment(unsigned a_bits)
37 {
38     /*
39      * The requested alignment cannot overlap the TLB flags.
40      * FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
41      */
42     if (tcg_use_softmmu) {
43         tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
44     }
45 }
46 
47 static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
48 {
49     unsigned a_bits = memop_alignment_bits(op);
50 
51     check_max_alignment(a_bits);
52 
53     /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
54     if (a_bits == (op & MO_SIZE)) {
55         op = (op & ~MO_AMASK) | MO_ALIGN;
56     }
57 
58     switch (op & MO_SIZE) {
59     case MO_8:
60         op &= ~MO_BSWAP;
61         break;
62     case MO_16:
63         break;
64     case MO_32:
65         if (!is64) {
66             op &= ~MO_SIGN;
67         }
68         break;
69     case MO_64:
70         if (is64) {
71             op &= ~MO_SIGN;
72             break;
73         }
74         /* fall through */
75     default:
76         g_assert_not_reached();
77     }
78     if (st) {
79         op &= ~MO_SIGN;
80     }
81 
82     /* In serial mode, reduce atomicity. */
83     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
84         op &= ~MO_ATOM_MASK;
85         op |= MO_ATOM_NONE;
86     }
87 
88     return op;
89 }
90 
91 static void gen_ldst(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
92                      TCGTemp *addr, MemOpIdx oi)
93 {
94     if (vh) {
95         tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
96     } else {
97         tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
98     }
99 }
100 
101 static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
102 {
103     if (TCG_TARGET_REG_BITS == 32) {
104         TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
105         TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
106         gen_ldst(opc, TCG_TYPE_I64, vl, vh, addr, oi);
107     } else {
108         gen_ldst(opc, TCG_TYPE_I64, tcgv_i64_temp(v), NULL, addr, oi);
109     }
110 }
111 
112 static void tcg_gen_req_mo(TCGBar type)
113 {
114     type &= tcg_ctx->guest_mo;
115     type &= ~TCG_TARGET_DEFAULT_MO;
116     if (type) {
117         tcg_gen_mb(type | TCG_BAR_SC);
118     }
119 }
120 
121 /* Only required for loads, where value might overlap addr. */
122 static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
123 {
124 #ifdef CONFIG_PLUGIN
125     if (tcg_ctx->plugin_insn != NULL) {
126         /* Save a copy of the vaddr for use after a load.  */
127         TCGv_i64 temp = tcg_temp_ebb_new_i64();
128         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
129             tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
130         } else {
131             tcg_gen_mov_i64(temp, temp_tcgv_i64(addr));
132         }
133         return temp;
134     }
135 #endif
136     return NULL;
137 }
138 
139 #ifdef CONFIG_PLUGIN
140 static void
141 plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
142                          enum qemu_plugin_mem_rw rw)
143 {
144     if (tcg_ctx->plugin_insn != NULL) {
145         qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
146 
147         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
148             if (!copy_addr) {
149                 copy_addr = tcg_temp_ebb_new_i64();
150                 tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr));
151             }
152             tcg_gen_plugin_mem_cb(copy_addr, info);
153             tcg_temp_free_i64(copy_addr);
154         } else {
155             if (copy_addr) {
156                 tcg_gen_plugin_mem_cb(copy_addr, info);
157                 tcg_temp_free_i64(copy_addr);
158             } else {
159                 tcg_gen_plugin_mem_cb(temp_tcgv_i64(orig_addr), info);
160             }
161         }
162     }
163 }
164 #endif
165 
166 static void
167 plugin_gen_mem_callbacks_i32(TCGv_i32 val,
168                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
169                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
170 {
171 #ifdef CONFIG_PLUGIN
172     if (tcg_ctx->plugin_insn != NULL) {
173         tcg_gen_st_i32(val, tcg_env,
174                        offsetof(CPUState, neg.plugin_mem_value_low) -
175                        sizeof(CPUState) + (HOST_BIG_ENDIAN * 4));
176         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
177     }
178 #endif
179 }
180 
181 static void
182 plugin_gen_mem_callbacks_i64(TCGv_i64 val,
183                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
184                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
185 {
186 #ifdef CONFIG_PLUGIN
187     if (tcg_ctx->plugin_insn != NULL) {
188         tcg_gen_st_i64(val, tcg_env,
189                        offsetof(CPUState, neg.plugin_mem_value_low) -
190                        sizeof(CPUState));
191         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
192     }
193 #endif
194 }
195 
196 static void
197 plugin_gen_mem_callbacks_i128(TCGv_i128 val,
198                              TCGv_i64 copy_addr, TCGTemp *orig_addr,
199                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
200 {
201 #ifdef CONFIG_PLUGIN
202     if (tcg_ctx->plugin_insn != NULL) {
203         tcg_gen_st_i64(TCGV128_LOW(val), tcg_env,
204                        offsetof(CPUState, neg.plugin_mem_value_low) -
205                        sizeof(CPUState));
206         tcg_gen_st_i64(TCGV128_HIGH(val), tcg_env,
207                        offsetof(CPUState, neg.plugin_mem_value_high) -
208                        sizeof(CPUState));
209         plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
210     }
211 #endif
212 }
213 
214 static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
215                                     TCGArg idx, MemOp memop)
216 {
217     MemOp orig_memop;
218     MemOpIdx orig_oi, oi;
219     TCGv_i64 copy_addr;
220 
221     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
222     orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
223     orig_oi = oi = make_memop_idx(memop, idx);
224 
225     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
226         memop &= ~MO_BSWAP;
227         /* The bswap primitive benefits from zero-extended input.  */
228         if ((memop & MO_SSIZE) == MO_SW) {
229             memop &= ~MO_SIGN;
230         }
231         oi = make_memop_idx(memop, idx);
232     }
233 
234     copy_addr = plugin_maybe_preserve_addr(addr);
235     gen_ldst(INDEX_op_qemu_ld_i32, TCG_TYPE_I32,
236              tcgv_i32_temp(val), NULL, addr, oi);
237     plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
238                                  QEMU_PLUGIN_MEM_R);
239 
240     if ((orig_memop ^ memop) & MO_BSWAP) {
241         switch (orig_memop & MO_SIZE) {
242         case MO_16:
243             tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN
244                                            ? TCG_BSWAP_IZ | TCG_BSWAP_OS
245                                            : TCG_BSWAP_IZ | TCG_BSWAP_OZ));
246             break;
247         case MO_32:
248             tcg_gen_bswap32_i32(val, val);
249             break;
250         default:
251             g_assert_not_reached();
252         }
253     }
254 }
255 
256 void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
257                              MemOp memop, TCGType addr_type)
258 {
259     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
260     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
261     tcg_gen_qemu_ld_i32_int(val, addr, idx, memop);
262 }
263 
264 static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
265                                     TCGArg idx, MemOp memop)
266 {
267     TCGv_i32 swap = NULL;
268     MemOpIdx orig_oi, oi;
269     TCGOpcode opc;
270 
271     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
272     memop = tcg_canonicalize_memop(memop, 0, 1);
273     orig_oi = oi = make_memop_idx(memop, idx);
274 
275     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
276         swap = tcg_temp_ebb_new_i32();
277         switch (memop & MO_SIZE) {
278         case MO_16:
279             tcg_gen_bswap16_i32(swap, val, 0);
280             break;
281         case MO_32:
282             tcg_gen_bswap32_i32(swap, val);
283             break;
284         default:
285             g_assert_not_reached();
286         }
287         val = swap;
288         memop &= ~MO_BSWAP;
289         oi = make_memop_idx(memop, idx);
290     }
291 
292     if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
293         opc = INDEX_op_qemu_st8_i32;
294     } else {
295         opc = INDEX_op_qemu_st_i32;
296     }
297     gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
298     plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
299 
300     if (swap) {
301         tcg_temp_free_i32(swap);
302     }
303 }
304 
305 void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
306                              MemOp memop, TCGType addr_type)
307 {
308     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
309     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
310     tcg_gen_qemu_st_i32_int(val, addr, idx, memop);
311 }
312 
313 static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
314                                     TCGArg idx, MemOp memop)
315 {
316     MemOp orig_memop;
317     MemOpIdx orig_oi, oi;
318     TCGv_i64 copy_addr;
319 
320     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
321         tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
322         if (memop & MO_SIGN) {
323             tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
324         } else {
325             tcg_gen_movi_i32(TCGV_HIGH(val), 0);
326         }
327         return;
328     }
329 
330     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
331     orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0);
332     orig_oi = oi = make_memop_idx(memop, idx);
333 
334     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
335         memop &= ~MO_BSWAP;
336         /* The bswap primitive benefits from zero-extended input.  */
337         if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
338             memop &= ~MO_SIGN;
339         }
340         oi = make_memop_idx(memop, idx);
341     }
342 
343     copy_addr = plugin_maybe_preserve_addr(addr);
344     gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi);
345     plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
346                                  QEMU_PLUGIN_MEM_R);
347 
348     if ((orig_memop ^ memop) & MO_BSWAP) {
349         int flags = (orig_memop & MO_SIGN
350                      ? TCG_BSWAP_IZ | TCG_BSWAP_OS
351                      : TCG_BSWAP_IZ | TCG_BSWAP_OZ);
352         switch (orig_memop & MO_SIZE) {
353         case MO_16:
354             tcg_gen_bswap16_i64(val, val, flags);
355             break;
356         case MO_32:
357             tcg_gen_bswap32_i64(val, val, flags);
358             break;
359         case MO_64:
360             tcg_gen_bswap64_i64(val, val);
361             break;
362         default:
363             g_assert_not_reached();
364         }
365     }
366 }
367 
368 void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
369                              MemOp memop, TCGType addr_type)
370 {
371     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
372     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
373     tcg_gen_qemu_ld_i64_int(val, addr, idx, memop);
374 }
375 
376 static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
377                                     TCGArg idx, MemOp memop)
378 {
379     TCGv_i64 swap = NULL;
380     MemOpIdx orig_oi, oi;
381 
382     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
383         tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
384         return;
385     }
386 
387     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
388     memop = tcg_canonicalize_memop(memop, 1, 1);
389     orig_oi = oi = make_memop_idx(memop, idx);
390 
391     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
392         swap = tcg_temp_ebb_new_i64();
393         switch (memop & MO_SIZE) {
394         case MO_16:
395             tcg_gen_bswap16_i64(swap, val, 0);
396             break;
397         case MO_32:
398             tcg_gen_bswap32_i64(swap, val, 0);
399             break;
400         case MO_64:
401             tcg_gen_bswap64_i64(swap, val);
402             break;
403         default:
404             g_assert_not_reached();
405         }
406         val = swap;
407         memop &= ~MO_BSWAP;
408         oi = make_memop_idx(memop, idx);
409     }
410 
411     gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi);
412     plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
413 
414     if (swap) {
415         tcg_temp_free_i64(swap);
416     }
417 }
418 
419 void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
420                              MemOp memop, TCGType addr_type)
421 {
422     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
423     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
424     tcg_gen_qemu_st_i64_int(val, addr, idx, memop);
425 }
426 
427 /*
428  * Return true if @mop, without knowledge of the pointer alignment,
429  * does not require 16-byte atomicity, and it would be adventagous
430  * to avoid a call to a helper function.
431  */
432 static bool use_two_i64_for_i128(MemOp mop)
433 {
434     /* Two softmmu tlb lookups is larger than one function call. */
435     if (tcg_use_softmmu) {
436         return false;
437     }
438 
439     /*
440      * For user-only, two 64-bit operations may well be smaller than a call.
441      * Determine if that would be legal for the requested atomicity.
442      */
443     switch (mop & MO_ATOM_MASK) {
444     case MO_ATOM_NONE:
445     case MO_ATOM_IFALIGN_PAIR:
446         return true;
447     case MO_ATOM_IFALIGN:
448     case MO_ATOM_SUBALIGN:
449     case MO_ATOM_WITHIN16:
450     case MO_ATOM_WITHIN16_PAIR:
451         return false;
452     default:
453         g_assert_not_reached();
454     }
455 }
456 
457 static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
458 {
459     MemOp mop_1 = orig, mop_2;
460 
461     /* Reduce the size to 64-bit. */
462     mop_1 = (mop_1 & ~MO_SIZE) | MO_64;
463 
464     /* Retain the alignment constraints of the original. */
465     switch (orig & MO_AMASK) {
466     case MO_UNALN:
467     case MO_ALIGN_2:
468     case MO_ALIGN_4:
469         mop_2 = mop_1;
470         break;
471     case MO_ALIGN_8:
472         /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */
473         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
474         mop_2 = mop_1;
475         break;
476     case MO_ALIGN:
477         /* Second has 8-byte alignment; first has 16-byte alignment. */
478         mop_2 = mop_1;
479         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16;
480         break;
481     case MO_ALIGN_16:
482     case MO_ALIGN_32:
483     case MO_ALIGN_64:
484         /* Second has 8-byte alignment; first retains original. */
485         mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
486         break;
487     default:
488         g_assert_not_reached();
489     }
490 
491     /* Use a memory ordering implemented by the host. */
492     if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) {
493         mop_1 &= ~MO_BSWAP;
494         mop_2 &= ~MO_BSWAP;
495     }
496 
497     ret[0] = mop_1;
498     ret[1] = mop_2;
499 }
500 
501 static TCGv_i64 maybe_extend_addr64(TCGTemp *addr)
502 {
503     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
504         TCGv_i64 a64 = tcg_temp_ebb_new_i64();
505         tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr));
506         return a64;
507     }
508     return temp_tcgv_i64(addr);
509 }
510 
511 static void maybe_free_addr64(TCGv_i64 a64)
512 {
513     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
514         tcg_temp_free_i64(a64);
515     }
516 }
517 
518 static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
519                                      TCGArg idx, MemOp memop)
520 {
521     MemOpIdx orig_oi;
522     TCGv_i64 ext_addr = NULL;
523 
524     check_max_alignment(memop_alignment_bits(memop));
525     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
526 
527     /* In serial mode, reduce atomicity. */
528     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
529         memop &= ~MO_ATOM_MASK;
530         memop |= MO_ATOM_NONE;
531     }
532     orig_oi = make_memop_idx(memop, idx);
533 
534     /* TODO: For now, force 32-bit hosts to use the helper. */
535     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
536         TCGv_i64 lo, hi;
537         bool need_bswap = false;
538         MemOpIdx oi = orig_oi;
539 
540         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
541             lo = TCGV128_HIGH(val);
542             hi = TCGV128_LOW(val);
543             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
544             need_bswap = true;
545         } else {
546             lo = TCGV128_LOW(val);
547             hi = TCGV128_HIGH(val);
548         }
549 
550         gen_ldst(INDEX_op_qemu_ld_i128, TCG_TYPE_I128, tcgv_i64_temp(lo),
551                  tcgv_i64_temp(hi), addr, oi);
552 
553         if (need_bswap) {
554             tcg_gen_bswap64_i64(lo, lo);
555             tcg_gen_bswap64_i64(hi, hi);
556         }
557     } else if (use_two_i64_for_i128(memop)) {
558         MemOp mop[2];
559         TCGTemp *addr_p8;
560         TCGv_i64 x, y;
561         bool need_bswap;
562 
563         canonicalize_memop_i128_as_i64(mop, memop);
564         need_bswap = (mop[0] ^ memop) & MO_BSWAP;
565 
566         /*
567          * Since there are no global TCGv_i128, there is no visible state
568          * changed if the second load faults.  Load directly into the two
569          * subwords.
570          */
571         if ((memop & MO_BSWAP) == MO_LE) {
572             x = TCGV128_LOW(val);
573             y = TCGV128_HIGH(val);
574         } else {
575             x = TCGV128_HIGH(val);
576             y = TCGV128_LOW(val);
577         }
578 
579         gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr,
580                      make_memop_idx(mop[0], idx));
581 
582         if (need_bswap) {
583             tcg_gen_bswap64_i64(x, x);
584         }
585 
586         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
587             TCGv_i32 t = tcg_temp_ebb_new_i32();
588             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
589             addr_p8 = tcgv_i32_temp(t);
590         } else {
591             TCGv_i64 t = tcg_temp_ebb_new_i64();
592             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
593             addr_p8 = tcgv_i64_temp(t);
594         }
595 
596         gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8,
597                      make_memop_idx(mop[1], idx));
598         tcg_temp_free_internal(addr_p8);
599 
600         if (need_bswap) {
601             tcg_gen_bswap64_i64(y, y);
602         }
603     } else {
604         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
605             ext_addr = tcg_temp_ebb_new_i64();
606             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
607             addr = tcgv_i64_temp(ext_addr);
608         }
609         gen_helper_ld_i128(val, tcg_env, temp_tcgv_i64(addr),
610                            tcg_constant_i32(orig_oi));
611     }
612 
613     plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi,
614                                   QEMU_PLUGIN_MEM_R);
615 }
616 
617 void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
618                               MemOp memop, TCGType addr_type)
619 {
620     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
621     tcg_debug_assert((memop & MO_SIZE) == MO_128);
622     tcg_debug_assert((memop & MO_SIGN) == 0);
623     tcg_gen_qemu_ld_i128_int(val, addr, idx, memop);
624 }
625 
626 static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
627                                      TCGArg idx, MemOp memop)
628 {
629     MemOpIdx orig_oi;
630     TCGv_i64 ext_addr = NULL;
631 
632     check_max_alignment(memop_alignment_bits(memop));
633     tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
634 
635     /* In serial mode, reduce atomicity. */
636     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
637         memop &= ~MO_ATOM_MASK;
638         memop |= MO_ATOM_NONE;
639     }
640     orig_oi = make_memop_idx(memop, idx);
641 
642     /* TODO: For now, force 32-bit hosts to use the helper. */
643 
644     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
645         TCGv_i64 lo, hi;
646         MemOpIdx oi = orig_oi;
647         bool need_bswap = false;
648 
649         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
650             lo = tcg_temp_ebb_new_i64();
651             hi = tcg_temp_ebb_new_i64();
652             tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
653             tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
654             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
655             need_bswap = true;
656         } else {
657             lo = TCGV128_LOW(val);
658             hi = TCGV128_HIGH(val);
659         }
660 
661         gen_ldst(INDEX_op_qemu_st_i128, TCG_TYPE_I128,
662                  tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
663 
664         if (need_bswap) {
665             tcg_temp_free_i64(lo);
666             tcg_temp_free_i64(hi);
667         }
668     } else if (use_two_i64_for_i128(memop)) {
669         MemOp mop[2];
670         TCGTemp *addr_p8;
671         TCGv_i64 x, y, b = NULL;
672 
673         canonicalize_memop_i128_as_i64(mop, memop);
674 
675         if ((memop & MO_BSWAP) == MO_LE) {
676             x = TCGV128_LOW(val);
677             y = TCGV128_HIGH(val);
678         } else {
679             x = TCGV128_HIGH(val);
680             y = TCGV128_LOW(val);
681         }
682 
683         if ((mop[0] ^ memop) & MO_BSWAP) {
684             b = tcg_temp_ebb_new_i64();
685             tcg_gen_bswap64_i64(b, x);
686             x = b;
687         }
688 
689         gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr,
690                      make_memop_idx(mop[0], idx));
691 
692         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
693             TCGv_i32 t = tcg_temp_ebb_new_i32();
694             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
695             addr_p8 = tcgv_i32_temp(t);
696         } else {
697             TCGv_i64 t = tcg_temp_ebb_new_i64();
698             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
699             addr_p8 = tcgv_i64_temp(t);
700         }
701 
702         if (b) {
703             tcg_gen_bswap64_i64(b, y);
704             gen_ldst_i64(INDEX_op_qemu_st_i64, b, addr_p8,
705                          make_memop_idx(mop[1], idx));
706             tcg_temp_free_i64(b);
707         } else {
708             gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8,
709                          make_memop_idx(mop[1], idx));
710         }
711         tcg_temp_free_internal(addr_p8);
712     } else {
713         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
714             ext_addr = tcg_temp_ebb_new_i64();
715             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
716             addr = tcgv_i64_temp(ext_addr);
717         }
718         gen_helper_st_i128(tcg_env, temp_tcgv_i64(addr), val,
719                            tcg_constant_i32(orig_oi));
720     }
721 
722     plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi,
723                                   QEMU_PLUGIN_MEM_W);
724 }
725 
726 void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
727                               MemOp memop, TCGType addr_type)
728 {
729     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
730     tcg_debug_assert((memop & MO_SIZE) == MO_128);
731     tcg_debug_assert((memop & MO_SIGN) == 0);
732     tcg_gen_qemu_st_i128_int(val, addr, idx, memop);
733 }
734 
735 void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
736 {
737     switch (opc & MO_SSIZE) {
738     case MO_SB:
739         tcg_gen_ext8s_i32(ret, val);
740         break;
741     case MO_UB:
742         tcg_gen_ext8u_i32(ret, val);
743         break;
744     case MO_SW:
745         tcg_gen_ext16s_i32(ret, val);
746         break;
747     case MO_UW:
748         tcg_gen_ext16u_i32(ret, val);
749         break;
750     case MO_UL:
751     case MO_SL:
752         tcg_gen_mov_i32(ret, val);
753         break;
754     default:
755         g_assert_not_reached();
756     }
757 }
758 
759 void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
760 {
761     switch (opc & MO_SSIZE) {
762     case MO_SB:
763         tcg_gen_ext8s_i64(ret, val);
764         break;
765     case MO_UB:
766         tcg_gen_ext8u_i64(ret, val);
767         break;
768     case MO_SW:
769         tcg_gen_ext16s_i64(ret, val);
770         break;
771     case MO_UW:
772         tcg_gen_ext16u_i64(ret, val);
773         break;
774     case MO_SL:
775         tcg_gen_ext32s_i64(ret, val);
776         break;
777     case MO_UL:
778         tcg_gen_ext32u_i64(ret, val);
779         break;
780     case MO_UQ:
781     case MO_SQ:
782         tcg_gen_mov_i64(ret, val);
783         break;
784     default:
785         g_assert_not_reached();
786     }
787 }
788 
789 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64,
790                                   TCGv_i32, TCGv_i32, TCGv_i32);
791 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64,
792                                   TCGv_i64, TCGv_i64, TCGv_i32);
793 typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64,
794                                    TCGv_i128, TCGv_i128, TCGv_i32);
795 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64,
796                                   TCGv_i32, TCGv_i32);
797 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
798                                   TCGv_i64, TCGv_i32);
799 
800 #ifdef CONFIG_ATOMIC64
801 # define WITH_ATOMIC64(X) X,
802 #else
803 # define WITH_ATOMIC64(X)
804 #endif
805 #if HAVE_CMPXCHG128
806 # define WITH_ATOMIC128(X) X,
807 #else
808 # define WITH_ATOMIC128(X)
809 #endif
810 
811 static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
812     [MO_8] = gen_helper_atomic_cmpxchgb,
813     [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
814     [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
815     [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
816     [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
817     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
818     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
819     WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le)
820     WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be)
821 };
822 
823 static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
824                                               TCGv_i32 cmpv, TCGv_i32 newv,
825                                               TCGArg idx, MemOp memop)
826 {
827     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
828     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
829 
830     tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
831 
832     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN);
833     tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
834     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
835     tcg_temp_free_i32(t2);
836 
837     if (memop & MO_SIGN) {
838         tcg_gen_ext_i32(retv, t1, memop);
839     } else {
840         tcg_gen_mov_i32(retv, t1);
841     }
842     tcg_temp_free_i32(t1);
843 }
844 
845 void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
846                                        TCGv_i32 cmpv, TCGv_i32 newv,
847                                        TCGArg idx, MemOp memop,
848                                        TCGType addr_type)
849 {
850     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
851     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
852     tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
853 }
854 
855 static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
856                                            TCGv_i32 cmpv, TCGv_i32 newv,
857                                            TCGArg idx, MemOp memop)
858 {
859     gen_atomic_cx_i32 gen;
860     TCGv_i64 a64;
861     MemOpIdx oi;
862 
863     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
864         tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
865         return;
866     }
867 
868     memop = tcg_canonicalize_memop(memop, 0, 0);
869     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
870     tcg_debug_assert(gen != NULL);
871 
872     oi = make_memop_idx(memop & ~MO_SIGN, idx);
873     a64 = maybe_extend_addr64(addr);
874     gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
875     maybe_free_addr64(a64);
876 
877     if (memop & MO_SIGN) {
878         tcg_gen_ext_i32(retv, retv, memop);
879     }
880 }
881 
882 void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
883                                     TCGv_i32 cmpv, TCGv_i32 newv,
884                                     TCGArg idx, MemOp memop,
885                                     TCGType addr_type)
886 {
887     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
888     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
889     tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
890 }
891 
892 static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
893                                               TCGv_i64 cmpv, TCGv_i64 newv,
894                                               TCGArg idx, MemOp memop)
895 {
896     TCGv_i64 t1, t2;
897 
898     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
899         tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
900                                           TCGV_LOW(newv), idx, memop);
901         if (memop & MO_SIGN) {
902             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
903         } else {
904             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
905         }
906         return;
907     }
908 
909     t1 = tcg_temp_ebb_new_i64();
910     t2 = tcg_temp_ebb_new_i64();
911 
912     tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
913 
914     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN);
915     tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
916     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
917     tcg_temp_free_i64(t2);
918 
919     if (memop & MO_SIGN) {
920         tcg_gen_ext_i64(retv, t1, memop);
921     } else {
922         tcg_gen_mov_i64(retv, t1);
923     }
924     tcg_temp_free_i64(t1);
925 }
926 
927 void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
928                                        TCGv_i64 cmpv, TCGv_i64 newv,
929                                        TCGArg idx, MemOp memop,
930                                        TCGType addr_type)
931 {
932     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
933     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
934     tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
935 }
936 
937 static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
938                                            TCGv_i64 cmpv, TCGv_i64 newv,
939                                            TCGArg idx, MemOp memop)
940 {
941     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
942         tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
943         return;
944     }
945 
946     if ((memop & MO_SIZE) == MO_64) {
947         gen_atomic_cx_i64 gen;
948 
949         memop = tcg_canonicalize_memop(memop, 1, 0);
950         gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
951         if (gen) {
952             MemOpIdx oi = make_memop_idx(memop, idx);
953             TCGv_i64 a64 = maybe_extend_addr64(addr);
954             gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
955             maybe_free_addr64(a64);
956             return;
957         }
958 
959         gen_helper_exit_atomic(tcg_env);
960 
961         /*
962          * Produce a result for a well-formed opcode stream.  This satisfies
963          * liveness for set before used, which happens before this dead code
964          * is removed.
965          */
966         tcg_gen_movi_i64(retv, 0);
967         return;
968     }
969 
970     if (TCG_TARGET_REG_BITS == 32) {
971         tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
972                                        TCGV_LOW(newv), idx, memop);
973         if (memop & MO_SIGN) {
974             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
975         } else {
976             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
977         }
978     } else {
979         TCGv_i32 c32 = tcg_temp_ebb_new_i32();
980         TCGv_i32 n32 = tcg_temp_ebb_new_i32();
981         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
982 
983         tcg_gen_extrl_i64_i32(c32, cmpv);
984         tcg_gen_extrl_i64_i32(n32, newv);
985         tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32,
986                                        idx, memop & ~MO_SIGN);
987         tcg_temp_free_i32(c32);
988         tcg_temp_free_i32(n32);
989 
990         tcg_gen_extu_i32_i64(retv, r32);
991         tcg_temp_free_i32(r32);
992 
993         if (memop & MO_SIGN) {
994             tcg_gen_ext_i64(retv, retv, memop);
995         }
996     }
997 }
998 
999 void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
1000                                     TCGv_i64 cmpv, TCGv_i64 newv,
1001                                     TCGArg idx, MemOp memop, TCGType addr_type)
1002 {
1003     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1004     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
1005     tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
1006 }
1007 
1008 static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1009                                                TCGv_i128 cmpv, TCGv_i128 newv,
1010                                                TCGArg idx, MemOp memop)
1011 {
1012     if (TCG_TARGET_REG_BITS == 32) {
1013         /* Inline expansion below is simply too large for 32-bit hosts. */
1014         MemOpIdx oi = make_memop_idx(memop, idx);
1015         TCGv_i64 a64 = maybe_extend_addr64(addr);
1016 
1017         gen_helper_nonatomic_cmpxchgo(retv, tcg_env, a64, cmpv, newv,
1018                                       tcg_constant_i32(oi));
1019         maybe_free_addr64(a64);
1020     } else {
1021         TCGv_i128 oldv = tcg_temp_ebb_new_i128();
1022         TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
1023         TCGv_i64 t0 = tcg_temp_ebb_new_i64();
1024         TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1025         TCGv_i64 z = tcg_constant_i64(0);
1026 
1027         tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
1028 
1029         /* Compare i128 */
1030         tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
1031         tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
1032         tcg_gen_or_i64(t0, t0, t1);
1033 
1034         /* tmpv = equal ? newv : oldv */
1035         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
1036                             TCGV128_LOW(newv), TCGV128_LOW(oldv));
1037         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
1038                             TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
1039 
1040         /* Unconditional writeback. */
1041         tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
1042         tcg_gen_mov_i128(retv, oldv);
1043 
1044         tcg_temp_free_i64(t0);
1045         tcg_temp_free_i64(t1);
1046         tcg_temp_free_i128(tmpv);
1047         tcg_temp_free_i128(oldv);
1048     }
1049 }
1050 
1051 void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1052                                         TCGv_i128 cmpv, TCGv_i128 newv,
1053                                         TCGArg idx, MemOp memop,
1054                                         TCGType addr_type)
1055 {
1056     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1057     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1058     tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1059 }
1060 
1061 static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1062                                             TCGv_i128 cmpv, TCGv_i128 newv,
1063                                             TCGArg idx, MemOp memop)
1064 {
1065     gen_atomic_cx_i128 gen;
1066 
1067     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
1068         tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1069         return;
1070     }
1071 
1072     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
1073     if (gen) {
1074         MemOpIdx oi = make_memop_idx(memop, idx);
1075         TCGv_i64 a64 = maybe_extend_addr64(addr);
1076         gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
1077         maybe_free_addr64(a64);
1078         return;
1079     }
1080 
1081     gen_helper_exit_atomic(tcg_env);
1082 
1083     /*
1084      * Produce a result for a well-formed opcode stream.  This satisfies
1085      * liveness for set before used, which happens before this dead code
1086      * is removed.
1087      */
1088     tcg_gen_movi_i64(TCGV128_LOW(retv), 0);
1089     tcg_gen_movi_i64(TCGV128_HIGH(retv), 0);
1090 }
1091 
1092 void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1093                                      TCGv_i128 cmpv, TCGv_i128 newv,
1094                                      TCGArg idx, MemOp memop,
1095                                      TCGType addr_type)
1096 {
1097     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1098     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1099     tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1100 }
1101 
1102 static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1103                                 TCGArg idx, MemOp memop, bool new_val,
1104                                 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1105 {
1106     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
1107     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
1108 
1109     memop = tcg_canonicalize_memop(memop, 0, 0);
1110 
1111     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop);
1112     tcg_gen_ext_i32(t2, val, memop);
1113     gen(t2, t1, t2);
1114     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
1115 
1116     tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
1117     tcg_temp_free_i32(t1);
1118     tcg_temp_free_i32(t2);
1119 }
1120 
1121 static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1122                              TCGArg idx, MemOp memop, void * const table[])
1123 {
1124     gen_atomic_op_i32 gen;
1125     TCGv_i64 a64;
1126     MemOpIdx oi;
1127 
1128     memop = tcg_canonicalize_memop(memop, 0, 0);
1129 
1130     gen = table[memop & (MO_SIZE | MO_BSWAP)];
1131     tcg_debug_assert(gen != NULL);
1132 
1133     oi = make_memop_idx(memop & ~MO_SIGN, idx);
1134     a64 = maybe_extend_addr64(addr);
1135     gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
1136     maybe_free_addr64(a64);
1137 
1138     if (memop & MO_SIGN) {
1139         tcg_gen_ext_i32(ret, ret, memop);
1140     }
1141 }
1142 
1143 static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1144                                 TCGArg idx, MemOp memop, bool new_val,
1145                                 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1146 {
1147     TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1148     TCGv_i64 t2 = tcg_temp_ebb_new_i64();
1149 
1150     memop = tcg_canonicalize_memop(memop, 1, 0);
1151 
1152     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop);
1153     tcg_gen_ext_i64(t2, val, memop);
1154     gen(t2, t1, t2);
1155     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
1156 
1157     tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
1158     tcg_temp_free_i64(t1);
1159     tcg_temp_free_i64(t2);
1160 }
1161 
1162 static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1163                              TCGArg idx, MemOp memop, void * const table[])
1164 {
1165     memop = tcg_canonicalize_memop(memop, 1, 0);
1166 
1167     if ((memop & MO_SIZE) == MO_64) {
1168         gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)];
1169 
1170         if (gen) {
1171             MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
1172             TCGv_i64 a64 = maybe_extend_addr64(addr);
1173             gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
1174             maybe_free_addr64(a64);
1175             return;
1176         }
1177 
1178         gen_helper_exit_atomic(tcg_env);
1179         /* Produce a result, so that we have a well-formed opcode stream
1180            with respect to uses of the result in the (dead) code following.  */
1181         tcg_gen_movi_i64(ret, 0);
1182     } else {
1183         TCGv_i32 v32 = tcg_temp_ebb_new_i32();
1184         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
1185 
1186         tcg_gen_extrl_i64_i32(v32, val);
1187         do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
1188         tcg_temp_free_i32(v32);
1189 
1190         tcg_gen_extu_i32_i64(ret, r32);
1191         tcg_temp_free_i32(r32);
1192 
1193         if (memop & MO_SIGN) {
1194             tcg_gen_ext_i64(ret, ret, memop);
1195         }
1196     }
1197 }
1198 
1199 #define GEN_ATOMIC_HELPER(NAME, OP, NEW)                                \
1200 static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = {          \
1201     [MO_8] = gen_helper_atomic_##NAME##b,                               \
1202     [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le,                   \
1203     [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be,                   \
1204     [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le,                   \
1205     [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be,                   \
1206     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le)     \
1207     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be)     \
1208 };                                                                      \
1209 void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr,       \
1210                                      TCGv_i32 val, TCGArg idx,          \
1211                                      MemOp memop, TCGType addr_type)    \
1212 {                                                                       \
1213     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1214     tcg_debug_assert((memop & MO_SIZE) <= MO_32);                       \
1215     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1216         do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME);     \
1217     } else {                                                            \
1218         do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW,            \
1219                             tcg_gen_##OP##_i32);                        \
1220     }                                                                   \
1221 }                                                                       \
1222 void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr,       \
1223                                      TCGv_i64 val, TCGArg idx,          \
1224                                      MemOp memop, TCGType addr_type)    \
1225 {                                                                       \
1226     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1227     tcg_debug_assert((memop & MO_SIZE) <= MO_64);                       \
1228     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1229         do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME);     \
1230     } else {                                                            \
1231         do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW,            \
1232                             tcg_gen_##OP##_i64);                        \
1233     }                                                                   \
1234 }
1235 
1236 GEN_ATOMIC_HELPER(fetch_add, add, 0)
1237 GEN_ATOMIC_HELPER(fetch_and, and, 0)
1238 GEN_ATOMIC_HELPER(fetch_or, or, 0)
1239 GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
1240 GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
1241 GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
1242 GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
1243 GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
1244 
1245 GEN_ATOMIC_HELPER(add_fetch, add, 1)
1246 GEN_ATOMIC_HELPER(and_fetch, and, 1)
1247 GEN_ATOMIC_HELPER(or_fetch, or, 1)
1248 GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
1249 GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
1250 GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
1251 GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
1252 GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
1253 
1254 static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
1255 {
1256     tcg_gen_mov_i32(r, b);
1257 }
1258 
1259 static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
1260 {
1261     tcg_gen_mov_i64(r, b);
1262 }
1263 
1264 GEN_ATOMIC_HELPER(xchg, mov2, 0)
1265 
1266 #undef GEN_ATOMIC_HELPER
1267