xref: /qemu/accel/tcg/atomic_template.h (revision ec4a9629a14c45157d0e89daaff3c982df818cd6)
1 /*
2  * Atomic helper templates
3  * Included from tcg-runtime.c and cputlb.c.
4  *
5  * Copyright (c) 2016 Red Hat, Inc
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/plugin.h"
22 
23 #if DATA_SIZE == 16
24 # define SUFFIX     o
25 # define DATA_TYPE  Int128
26 # define BSWAP      bswap128
27 # define SHIFT      4
28 #elif DATA_SIZE == 8
29 # define SUFFIX     q
30 # define DATA_TYPE  aligned_uint64_t
31 # define SDATA_TYPE aligned_int64_t
32 # define BSWAP      bswap64
33 # define SHIFT      3
34 #elif DATA_SIZE == 4
35 # define SUFFIX     l
36 # define DATA_TYPE  uint32_t
37 # define SDATA_TYPE int32_t
38 # define BSWAP      bswap32
39 # define SHIFT      2
40 #elif DATA_SIZE == 2
41 # define SUFFIX     w
42 # define DATA_TYPE  uint16_t
43 # define SDATA_TYPE int16_t
44 # define BSWAP      bswap16
45 # define SHIFT      1
46 #elif DATA_SIZE == 1
47 # define SUFFIX     b
48 # define DATA_TYPE  uint8_t
49 # define SDATA_TYPE int8_t
50 # define BSWAP
51 # define SHIFT      0
52 #else
53 # error unsupported data size
54 #endif
55 
56 #if DATA_SIZE >= 4
57 # define ABI_TYPE  DATA_TYPE
58 #else
59 # define ABI_TYPE  uint32_t
60 #endif
61 
62 /* Define host-endian atomic operations.  Note that END is used within
63    the ATOMIC_NAME macro, and redefined below.  */
64 #if DATA_SIZE == 1
65 # define END
66 #elif HOST_BIG_ENDIAN
67 # define END  _be
68 #else
69 # define END  _le
70 #endif
71 
72 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
73                               ABI_TYPE cmpv, ABI_TYPE newv,
74                               MemOpIdx oi, uintptr_t retaddr)
75 {
76     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
77                                          PAGE_READ | PAGE_WRITE, retaddr);
78     DATA_TYPE ret;
79 
80 #if DATA_SIZE == 16
81     ret = atomic16_cmpxchg(haddr, cmpv, newv);
82 #else
83     ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
84 #endif
85     ATOMIC_MMU_CLEANUP;
86     atomic_trace_rmw_post(env, addr, oi);
87     return ret;
88 }
89 
90 #if DATA_SIZE < 16
91 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
92                            MemOpIdx oi, uintptr_t retaddr)
93 {
94     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
95                                          PAGE_READ | PAGE_WRITE, retaddr);
96     DATA_TYPE ret;
97 
98     ret = qatomic_xchg__nocheck(haddr, val);
99     ATOMIC_MMU_CLEANUP;
100     atomic_trace_rmw_post(env, addr, oi);
101     return ret;
102 }
103 
104 #define GEN_ATOMIC_HELPER(X)                                        \
105 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
106                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
107 {                                                                   \
108     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \
109                                          PAGE_READ | PAGE_WRITE, retaddr); \
110     DATA_TYPE ret;                                                  \
111     ret = qatomic_##X(haddr, val);                                  \
112     ATOMIC_MMU_CLEANUP;                                             \
113     atomic_trace_rmw_post(env, addr, oi);                           \
114     return ret;                                                     \
115 }
116 
117 GEN_ATOMIC_HELPER(fetch_add)
118 GEN_ATOMIC_HELPER(fetch_and)
119 GEN_ATOMIC_HELPER(fetch_or)
120 GEN_ATOMIC_HELPER(fetch_xor)
121 GEN_ATOMIC_HELPER(add_fetch)
122 GEN_ATOMIC_HELPER(and_fetch)
123 GEN_ATOMIC_HELPER(or_fetch)
124 GEN_ATOMIC_HELPER(xor_fetch)
125 
126 #undef GEN_ATOMIC_HELPER
127 
128 /*
129  * These helpers are, as a whole, full barriers.  Within the helper,
130  * the leading barrier is explicit and the trailing barrier is within
131  * cmpxchg primitive.
132  *
133  * Trace this load + RMW loop as a single RMW op. This way, regardless
134  * of CF_PARALLEL's value, we'll trace just a read and a write.
135  */
136 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
137 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
138                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
139 {                                                                   \
140     XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
141                                           PAGE_READ | PAGE_WRITE, retaddr); \
142     XDATA_TYPE cmp, old, new, val = xval;                           \
143     smp_mb();                                                       \
144     cmp = qatomic_read__nocheck(haddr);                             \
145     do {                                                            \
146         old = cmp; new = FN(old, val);                              \
147         cmp = qatomic_cmpxchg__nocheck(haddr, old, new);            \
148     } while (cmp != old);                                           \
149     ATOMIC_MMU_CLEANUP;                                             \
150     atomic_trace_rmw_post(env, addr, oi);                           \
151     return RET;                                                     \
152 }
153 
154 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
155 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN,  DATA_TYPE, old)
156 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
157 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX,  DATA_TYPE, old)
158 
159 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
160 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN,  DATA_TYPE, new)
161 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
162 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
163 
164 #undef GEN_ATOMIC_HELPER_FN
165 #endif /* DATA SIZE < 16 */
166 
167 #undef END
168 
169 #if DATA_SIZE > 1
170 
171 /* Define reverse-host-endian atomic operations.  Note that END is used
172    within the ATOMIC_NAME macro.  */
173 #if HOST_BIG_ENDIAN
174 # define END  _le
175 #else
176 # define END  _be
177 #endif
178 
179 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
180                               ABI_TYPE cmpv, ABI_TYPE newv,
181                               MemOpIdx oi, uintptr_t retaddr)
182 {
183     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
184                                          PAGE_READ | PAGE_WRITE, retaddr);
185     DATA_TYPE ret;
186 
187 #if DATA_SIZE == 16
188     ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
189 #else
190     ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
191 #endif
192     ATOMIC_MMU_CLEANUP;
193     atomic_trace_rmw_post(env, addr, oi);
194     return BSWAP(ret);
195 }
196 
197 #if DATA_SIZE < 16
198 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
199                            MemOpIdx oi, uintptr_t retaddr)
200 {
201     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
202                                          PAGE_READ | PAGE_WRITE, retaddr);
203     ABI_TYPE ret;
204 
205     ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
206     ATOMIC_MMU_CLEANUP;
207     atomic_trace_rmw_post(env, addr, oi);
208     return BSWAP(ret);
209 }
210 
211 #define GEN_ATOMIC_HELPER(X)                                        \
212 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
213                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
214 {                                                                   \
215     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \
216                                          PAGE_READ | PAGE_WRITE, retaddr); \
217     DATA_TYPE ret;                                                  \
218     ret = qatomic_##X(haddr, BSWAP(val));                           \
219     ATOMIC_MMU_CLEANUP;                                             \
220     atomic_trace_rmw_post(env, addr, oi);                           \
221     return BSWAP(ret);                                              \
222 }
223 
224 GEN_ATOMIC_HELPER(fetch_and)
225 GEN_ATOMIC_HELPER(fetch_or)
226 GEN_ATOMIC_HELPER(fetch_xor)
227 GEN_ATOMIC_HELPER(and_fetch)
228 GEN_ATOMIC_HELPER(or_fetch)
229 GEN_ATOMIC_HELPER(xor_fetch)
230 
231 #undef GEN_ATOMIC_HELPER
232 
233 /* These helpers are, as a whole, full barriers.  Within the helper,
234  * the leading barrier is explicit and the trailing barrier is within
235  * cmpxchg primitive.
236  *
237  * Trace this load + RMW loop as a single RMW op. This way, regardless
238  * of CF_PARALLEL's value, we'll trace just a read and a write.
239  */
240 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
241 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
242                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
243 {                                                                   \
244     XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
245                                           PAGE_READ | PAGE_WRITE, retaddr); \
246     XDATA_TYPE ldo, ldn, old, new, val = xval;                      \
247     smp_mb();                                                       \
248     ldn = qatomic_read__nocheck(haddr);                             \
249     do {                                                            \
250         ldo = ldn; old = BSWAP(ldo); new = FN(old, val);            \
251         ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new));     \
252     } while (ldo != ldn);                                           \
253     ATOMIC_MMU_CLEANUP;                                             \
254     atomic_trace_rmw_post(env, addr, oi);                           \
255     return RET;                                                     \
256 }
257 
258 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
259 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN,  DATA_TYPE, old)
260 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
261 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX,  DATA_TYPE, old)
262 
263 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
264 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN,  DATA_TYPE, new)
265 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
266 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
267 
268 /* Note that for addition, we need to use a separate cmpxchg loop instead
269    of bswaps for the reverse-host-endian helpers.  */
270 #define ADD(X, Y)   (X + Y)
271 GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
272 GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
273 #undef ADD
274 
275 #undef GEN_ATOMIC_HELPER_FN
276 #endif /* DATA_SIZE < 16 */
277 
278 #undef END
279 #endif /* DATA_SIZE > 1 */
280 
281 #undef BSWAP
282 #undef ABI_TYPE
283 #undef DATA_TYPE
284 #undef SDATA_TYPE
285 #undef SUFFIX
286 #undef DATA_SIZE
287 #undef SHIFT
288