1 // SPDX-License-Identifier: GPL-2.0
2
3 // Generated by scripts/atomic/gen-atomic-fallback.sh
4 // DO NOT MODIFY THIS FILE DIRECTLY
5
6 #ifndef _LINUX_ATOMIC_FALLBACK_H
7 #define _LINUX_ATOMIC_FALLBACK_H
8
9 #include <linux/compiler.h>
10
11 #if defined(arch_xchg)
12 #define raw_xchg arch_xchg
13 #elif defined(arch_xchg_relaxed)
14 #define raw_xchg(...) \
15 __atomic_op_fence(arch_xchg, __VA_ARGS__)
16 #else
17 extern void raw_xchg_not_implemented(void);
18 #define raw_xchg(...) raw_xchg_not_implemented()
19 #endif
20
21 #if defined(arch_xchg_acquire)
22 #define raw_xchg_acquire arch_xchg_acquire
23 #elif defined(arch_xchg_relaxed)
24 #define raw_xchg_acquire(...) \
25 __atomic_op_acquire(arch_xchg, __VA_ARGS__)
26 #elif defined(arch_xchg)
27 #define raw_xchg_acquire arch_xchg
28 #else
29 extern void raw_xchg_acquire_not_implemented(void);
30 #define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
31 #endif
32
33 #if defined(arch_xchg_release)
34 #define raw_xchg_release arch_xchg_release
35 #elif defined(arch_xchg_relaxed)
36 #define raw_xchg_release(...) \
37 __atomic_op_release(arch_xchg, __VA_ARGS__)
38 #elif defined(arch_xchg)
39 #define raw_xchg_release arch_xchg
40 #else
41 extern void raw_xchg_release_not_implemented(void);
42 #define raw_xchg_release(...) raw_xchg_release_not_implemented()
43 #endif
44
45 #if defined(arch_xchg_relaxed)
46 #define raw_xchg_relaxed arch_xchg_relaxed
47 #elif defined(arch_xchg)
48 #define raw_xchg_relaxed arch_xchg
49 #else
50 extern void raw_xchg_relaxed_not_implemented(void);
51 #define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
52 #endif
53
54 #if defined(arch_cmpxchg)
55 #define raw_cmpxchg arch_cmpxchg
56 #elif defined(arch_cmpxchg_relaxed)
57 #define raw_cmpxchg(...) \
58 __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
59 #else
60 extern void raw_cmpxchg_not_implemented(void);
61 #define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
62 #endif
63
64 #if defined(arch_cmpxchg_acquire)
65 #define raw_cmpxchg_acquire arch_cmpxchg_acquire
66 #elif defined(arch_cmpxchg_relaxed)
67 #define raw_cmpxchg_acquire(...) \
68 __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
69 #elif defined(arch_cmpxchg)
70 #define raw_cmpxchg_acquire arch_cmpxchg
71 #else
72 extern void raw_cmpxchg_acquire_not_implemented(void);
73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
74 #endif
75
76 #if defined(arch_cmpxchg_release)
77 #define raw_cmpxchg_release arch_cmpxchg_release
78 #elif defined(arch_cmpxchg_relaxed)
79 #define raw_cmpxchg_release(...) \
80 __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
81 #elif defined(arch_cmpxchg)
82 #define raw_cmpxchg_release arch_cmpxchg
83 #else
84 extern void raw_cmpxchg_release_not_implemented(void);
85 #define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
86 #endif
87
88 #if defined(arch_cmpxchg_relaxed)
89 #define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
90 #elif defined(arch_cmpxchg)
91 #define raw_cmpxchg_relaxed arch_cmpxchg
92 #else
93 extern void raw_cmpxchg_relaxed_not_implemented(void);
94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
95 #endif
96
97 #if defined(arch_cmpxchg64)
98 #define raw_cmpxchg64 arch_cmpxchg64
99 #elif defined(arch_cmpxchg64_relaxed)
100 #define raw_cmpxchg64(...) \
101 __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
102 #else
103 extern void raw_cmpxchg64_not_implemented(void);
104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
105 #endif
106
107 #if defined(arch_cmpxchg64_acquire)
108 #define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
109 #elif defined(arch_cmpxchg64_relaxed)
110 #define raw_cmpxchg64_acquire(...) \
111 __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
112 #elif defined(arch_cmpxchg64)
113 #define raw_cmpxchg64_acquire arch_cmpxchg64
114 #else
115 extern void raw_cmpxchg64_acquire_not_implemented(void);
116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
117 #endif
118
119 #if defined(arch_cmpxchg64_release)
120 #define raw_cmpxchg64_release arch_cmpxchg64_release
121 #elif defined(arch_cmpxchg64_relaxed)
122 #define raw_cmpxchg64_release(...) \
123 __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
124 #elif defined(arch_cmpxchg64)
125 #define raw_cmpxchg64_release arch_cmpxchg64
126 #else
127 extern void raw_cmpxchg64_release_not_implemented(void);
128 #define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
129 #endif
130
131 #if defined(arch_cmpxchg64_relaxed)
132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
133 #elif defined(arch_cmpxchg64)
134 #define raw_cmpxchg64_relaxed arch_cmpxchg64
135 #else
136 extern void raw_cmpxchg64_relaxed_not_implemented(void);
137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
138 #endif
139
140 #if defined(arch_cmpxchg128)
141 #define raw_cmpxchg128 arch_cmpxchg128
142 #elif defined(arch_cmpxchg128_relaxed)
143 #define raw_cmpxchg128(...) \
144 __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
145 #else
146 extern void raw_cmpxchg128_not_implemented(void);
147 #define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
148 #endif
149
150 #if defined(arch_cmpxchg128_acquire)
151 #define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
152 #elif defined(arch_cmpxchg128_relaxed)
153 #define raw_cmpxchg128_acquire(...) \
154 __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
155 #elif defined(arch_cmpxchg128)
156 #define raw_cmpxchg128_acquire arch_cmpxchg128
157 #else
158 extern void raw_cmpxchg128_acquire_not_implemented(void);
159 #define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
160 #endif
161
162 #if defined(arch_cmpxchg128_release)
163 #define raw_cmpxchg128_release arch_cmpxchg128_release
164 #elif defined(arch_cmpxchg128_relaxed)
165 #define raw_cmpxchg128_release(...) \
166 __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
167 #elif defined(arch_cmpxchg128)
168 #define raw_cmpxchg128_release arch_cmpxchg128
169 #else
170 extern void raw_cmpxchg128_release_not_implemented(void);
171 #define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
172 #endif
173
174 #if defined(arch_cmpxchg128_relaxed)
175 #define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
176 #elif defined(arch_cmpxchg128)
177 #define raw_cmpxchg128_relaxed arch_cmpxchg128
178 #else
179 extern void raw_cmpxchg128_relaxed_not_implemented(void);
180 #define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
181 #endif
182
183 #if defined(arch_try_cmpxchg)
184 #define raw_try_cmpxchg arch_try_cmpxchg
185 #elif defined(arch_try_cmpxchg_relaxed)
186 #define raw_try_cmpxchg(...) \
187 __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
188 #else
189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \
190 ({ \
191 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
192 ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
193 if (unlikely(___r != ___o)) \
194 *___op = ___r; \
195 likely(___r == ___o); \
196 })
197 #endif
198
199 #if defined(arch_try_cmpxchg_acquire)
200 #define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
201 #elif defined(arch_try_cmpxchg_relaxed)
202 #define raw_try_cmpxchg_acquire(...) \
203 __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
204 #elif defined(arch_try_cmpxchg)
205 #define raw_try_cmpxchg_acquire arch_try_cmpxchg
206 #else
207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
208 ({ \
209 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
210 ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
211 if (unlikely(___r != ___o)) \
212 *___op = ___r; \
213 likely(___r == ___o); \
214 })
215 #endif
216
217 #if defined(arch_try_cmpxchg_release)
218 #define raw_try_cmpxchg_release arch_try_cmpxchg_release
219 #elif defined(arch_try_cmpxchg_relaxed)
220 #define raw_try_cmpxchg_release(...) \
221 __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
222 #elif defined(arch_try_cmpxchg)
223 #define raw_try_cmpxchg_release arch_try_cmpxchg
224 #else
225 #define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
226 ({ \
227 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
228 ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
229 if (unlikely(___r != ___o)) \
230 *___op = ___r; \
231 likely(___r == ___o); \
232 })
233 #endif
234
235 #if defined(arch_try_cmpxchg_relaxed)
236 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
237 #elif defined(arch_try_cmpxchg)
238 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg
239 #else
240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
241 ({ \
242 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
243 ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
244 if (unlikely(___r != ___o)) \
245 *___op = ___r; \
246 likely(___r == ___o); \
247 })
248 #endif
249
250 #if defined(arch_try_cmpxchg64)
251 #define raw_try_cmpxchg64 arch_try_cmpxchg64
252 #elif defined(arch_try_cmpxchg64_relaxed)
253 #define raw_try_cmpxchg64(...) \
254 __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
255 #else
256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \
257 ({ \
258 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
259 ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
260 if (unlikely(___r != ___o)) \
261 *___op = ___r; \
262 likely(___r == ___o); \
263 })
264 #endif
265
266 #if defined(arch_try_cmpxchg64_acquire)
267 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
268 #elif defined(arch_try_cmpxchg64_relaxed)
269 #define raw_try_cmpxchg64_acquire(...) \
270 __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
271 #elif defined(arch_try_cmpxchg64)
272 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
273 #else
274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
275 ({ \
276 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
277 ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
278 if (unlikely(___r != ___o)) \
279 *___op = ___r; \
280 likely(___r == ___o); \
281 })
282 #endif
283
284 #if defined(arch_try_cmpxchg64_release)
285 #define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
286 #elif defined(arch_try_cmpxchg64_relaxed)
287 #define raw_try_cmpxchg64_release(...) \
288 __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
289 #elif defined(arch_try_cmpxchg64)
290 #define raw_try_cmpxchg64_release arch_try_cmpxchg64
291 #else
292 #define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
293 ({ \
294 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
295 ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
296 if (unlikely(___r != ___o)) \
297 *___op = ___r; \
298 likely(___r == ___o); \
299 })
300 #endif
301
302 #if defined(arch_try_cmpxchg64_relaxed)
303 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
304 #elif defined(arch_try_cmpxchg64)
305 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
306 #else
307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
308 ({ \
309 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
310 ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
311 if (unlikely(___r != ___o)) \
312 *___op = ___r; \
313 likely(___r == ___o); \
314 })
315 #endif
316
317 #if defined(arch_try_cmpxchg128)
318 #define raw_try_cmpxchg128 arch_try_cmpxchg128
319 #elif defined(arch_try_cmpxchg128_relaxed)
320 #define raw_try_cmpxchg128(...) \
321 __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
322 #else
323 #define raw_try_cmpxchg128(_ptr, _oldp, _new) \
324 ({ \
325 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
326 ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
327 if (unlikely(___r != ___o)) \
328 *___op = ___r; \
329 likely(___r == ___o); \
330 })
331 #endif
332
333 #if defined(arch_try_cmpxchg128_acquire)
334 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
335 #elif defined(arch_try_cmpxchg128_relaxed)
336 #define raw_try_cmpxchg128_acquire(...) \
337 __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
338 #elif defined(arch_try_cmpxchg128)
339 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
340 #else
341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
342 ({ \
343 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
344 ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
345 if (unlikely(___r != ___o)) \
346 *___op = ___r; \
347 likely(___r == ___o); \
348 })
349 #endif
350
351 #if defined(arch_try_cmpxchg128_release)
352 #define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
353 #elif defined(arch_try_cmpxchg128_relaxed)
354 #define raw_try_cmpxchg128_release(...) \
355 __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
356 #elif defined(arch_try_cmpxchg128)
357 #define raw_try_cmpxchg128_release arch_try_cmpxchg128
358 #else
359 #define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
360 ({ \
361 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
362 ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
363 if (unlikely(___r != ___o)) \
364 *___op = ___r; \
365 likely(___r == ___o); \
366 })
367 #endif
368
369 #if defined(arch_try_cmpxchg128_relaxed)
370 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
371 #elif defined(arch_try_cmpxchg128)
372 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
373 #else
374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
375 ({ \
376 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
377 ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
378 if (unlikely(___r != ___o)) \
379 *___op = ___r; \
380 likely(___r == ___o); \
381 })
382 #endif
383
384 #define raw_cmpxchg_local arch_cmpxchg_local
385
386 #ifdef arch_try_cmpxchg_local
387 #define raw_try_cmpxchg_local arch_try_cmpxchg_local
388 #else
389 #define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
390 ({ \
391 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
392 ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
393 if (unlikely(___r != ___o)) \
394 *___op = ___r; \
395 likely(___r == ___o); \
396 })
397 #endif
398
399 #define raw_cmpxchg64_local arch_cmpxchg64_local
400
401 #ifdef arch_try_cmpxchg64_local
402 #define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
403 #else
404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
405 ({ \
406 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
407 ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
408 if (unlikely(___r != ___o)) \
409 *___op = ___r; \
410 likely(___r == ___o); \
411 })
412 #endif
413
414 #define raw_cmpxchg128_local arch_cmpxchg128_local
415
416 #ifdef arch_try_cmpxchg128_local
417 #define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
418 #else
419 #define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
420 ({ \
421 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
422 ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
423 if (unlikely(___r != ___o)) \
424 *___op = ___r; \
425 likely(___r == ___o); \
426 })
427 #endif
428
429 #define raw_sync_cmpxchg arch_sync_cmpxchg
430
431 #ifdef arch_sync_try_cmpxchg
432 #define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
433 #else
434 #define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
435 ({ \
436 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
437 ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
438 if (unlikely(___r != ___o)) \
439 *___op = ___r; \
440 likely(___r == ___o); \
441 })
442 #endif
443
444 /**
445 * raw_atomic_read() - atomic load with relaxed ordering
446 * @v: pointer to atomic_t
447 *
448 * Atomically loads the value of @v with relaxed ordering.
449 *
450 * Safe to use in noinstr code; prefer atomic_read() elsewhere.
451 *
452 * Return: The value loaded from @v.
453 */
454 static __always_inline int
raw_atomic_read(const atomic_t * v)455 raw_atomic_read(const atomic_t *v)
456 {
457 return arch_atomic_read(v);
458 }
459
460 /**
461 * raw_atomic_read_acquire() - atomic load with acquire ordering
462 * @v: pointer to atomic_t
463 *
464 * Atomically loads the value of @v with acquire ordering.
465 *
466 * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
467 *
468 * Return: The value loaded from @v.
469 */
470 static __always_inline int
raw_atomic_read_acquire(const atomic_t * v)471 raw_atomic_read_acquire(const atomic_t *v)
472 {
473 #if defined(arch_atomic_read_acquire)
474 return arch_atomic_read_acquire(v);
475 #else
476 int ret;
477
478 if (__native_word(atomic_t)) {
479 ret = smp_load_acquire(&(v)->counter);
480 } else {
481 ret = raw_atomic_read(v);
482 __atomic_acquire_fence();
483 }
484
485 return ret;
486 #endif
487 }
488
489 /**
490 * raw_atomic_set() - atomic set with relaxed ordering
491 * @v: pointer to atomic_t
492 * @i: int value to assign
493 *
494 * Atomically sets @v to @i with relaxed ordering.
495 *
496 * Safe to use in noinstr code; prefer atomic_set() elsewhere.
497 *
498 * Return: Nothing.
499 */
500 static __always_inline void
raw_atomic_set(atomic_t * v,int i)501 raw_atomic_set(atomic_t *v, int i)
502 {
503 arch_atomic_set(v, i);
504 }
505
506 /**
507 * raw_atomic_set_release() - atomic set with release ordering
508 * @v: pointer to atomic_t
509 * @i: int value to assign
510 *
511 * Atomically sets @v to @i with release ordering.
512 *
513 * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
514 *
515 * Return: Nothing.
516 */
517 static __always_inline void
raw_atomic_set_release(atomic_t * v,int i)518 raw_atomic_set_release(atomic_t *v, int i)
519 {
520 #if defined(arch_atomic_set_release)
521 arch_atomic_set_release(v, i);
522 #else
523 if (__native_word(atomic_t)) {
524 smp_store_release(&(v)->counter, i);
525 } else {
526 __atomic_release_fence();
527 raw_atomic_set(v, i);
528 }
529 #endif
530 }
531
532 /**
533 * raw_atomic_add() - atomic add with relaxed ordering
534 * @i: int value to add
535 * @v: pointer to atomic_t
536 *
537 * Atomically updates @v to (@v + @i) with relaxed ordering.
538 *
539 * Safe to use in noinstr code; prefer atomic_add() elsewhere.
540 *
541 * Return: Nothing.
542 */
543 static __always_inline void
raw_atomic_add(int i,atomic_t * v)544 raw_atomic_add(int i, atomic_t *v)
545 {
546 arch_atomic_add(i, v);
547 }
548
549 /**
550 * raw_atomic_add_return() - atomic add with full ordering
551 * @i: int value to add
552 * @v: pointer to atomic_t
553 *
554 * Atomically updates @v to (@v + @i) with full ordering.
555 *
556 * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
557 *
558 * Return: The updated value of @v.
559 */
560 static __always_inline int
raw_atomic_add_return(int i,atomic_t * v)561 raw_atomic_add_return(int i, atomic_t *v)
562 {
563 #if defined(arch_atomic_add_return)
564 return arch_atomic_add_return(i, v);
565 #elif defined(arch_atomic_add_return_relaxed)
566 int ret;
567 __atomic_pre_full_fence();
568 ret = arch_atomic_add_return_relaxed(i, v);
569 __atomic_post_full_fence();
570 return ret;
571 #else
572 #error "Unable to define raw_atomic_add_return"
573 #endif
574 }
575
576 /**
577 * raw_atomic_add_return_acquire() - atomic add with acquire ordering
578 * @i: int value to add
579 * @v: pointer to atomic_t
580 *
581 * Atomically updates @v to (@v + @i) with acquire ordering.
582 *
583 * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
584 *
585 * Return: The updated value of @v.
586 */
587 static __always_inline int
raw_atomic_add_return_acquire(int i,atomic_t * v)588 raw_atomic_add_return_acquire(int i, atomic_t *v)
589 {
590 #if defined(arch_atomic_add_return_acquire)
591 return arch_atomic_add_return_acquire(i, v);
592 #elif defined(arch_atomic_add_return_relaxed)
593 int ret = arch_atomic_add_return_relaxed(i, v);
594 __atomic_acquire_fence();
595 return ret;
596 #elif defined(arch_atomic_add_return)
597 return arch_atomic_add_return(i, v);
598 #else
599 #error "Unable to define raw_atomic_add_return_acquire"
600 #endif
601 }
602
603 /**
604 * raw_atomic_add_return_release() - atomic add with release ordering
605 * @i: int value to add
606 * @v: pointer to atomic_t
607 *
608 * Atomically updates @v to (@v + @i) with release ordering.
609 *
610 * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
611 *
612 * Return: The updated value of @v.
613 */
614 static __always_inline int
raw_atomic_add_return_release(int i,atomic_t * v)615 raw_atomic_add_return_release(int i, atomic_t *v)
616 {
617 #if defined(arch_atomic_add_return_release)
618 return arch_atomic_add_return_release(i, v);
619 #elif defined(arch_atomic_add_return_relaxed)
620 __atomic_release_fence();
621 return arch_atomic_add_return_relaxed(i, v);
622 #elif defined(arch_atomic_add_return)
623 return arch_atomic_add_return(i, v);
624 #else
625 #error "Unable to define raw_atomic_add_return_release"
626 #endif
627 }
628
629 /**
630 * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
631 * @i: int value to add
632 * @v: pointer to atomic_t
633 *
634 * Atomically updates @v to (@v + @i) with relaxed ordering.
635 *
636 * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
637 *
638 * Return: The updated value of @v.
639 */
640 static __always_inline int
raw_atomic_add_return_relaxed(int i,atomic_t * v)641 raw_atomic_add_return_relaxed(int i, atomic_t *v)
642 {
643 #if defined(arch_atomic_add_return_relaxed)
644 return arch_atomic_add_return_relaxed(i, v);
645 #elif defined(arch_atomic_add_return)
646 return arch_atomic_add_return(i, v);
647 #else
648 #error "Unable to define raw_atomic_add_return_relaxed"
649 #endif
650 }
651
652 /**
653 * raw_atomic_fetch_add() - atomic add with full ordering
654 * @i: int value to add
655 * @v: pointer to atomic_t
656 *
657 * Atomically updates @v to (@v + @i) with full ordering.
658 *
659 * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
660 *
661 * Return: The original value of @v.
662 */
663 static __always_inline int
raw_atomic_fetch_add(int i,atomic_t * v)664 raw_atomic_fetch_add(int i, atomic_t *v)
665 {
666 #if defined(arch_atomic_fetch_add)
667 return arch_atomic_fetch_add(i, v);
668 #elif defined(arch_atomic_fetch_add_relaxed)
669 int ret;
670 __atomic_pre_full_fence();
671 ret = arch_atomic_fetch_add_relaxed(i, v);
672 __atomic_post_full_fence();
673 return ret;
674 #else
675 #error "Unable to define raw_atomic_fetch_add"
676 #endif
677 }
678
679 /**
680 * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
681 * @i: int value to add
682 * @v: pointer to atomic_t
683 *
684 * Atomically updates @v to (@v + @i) with acquire ordering.
685 *
686 * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
687 *
688 * Return: The original value of @v.
689 */
690 static __always_inline int
raw_atomic_fetch_add_acquire(int i,atomic_t * v)691 raw_atomic_fetch_add_acquire(int i, atomic_t *v)
692 {
693 #if defined(arch_atomic_fetch_add_acquire)
694 return arch_atomic_fetch_add_acquire(i, v);
695 #elif defined(arch_atomic_fetch_add_relaxed)
696 int ret = arch_atomic_fetch_add_relaxed(i, v);
697 __atomic_acquire_fence();
698 return ret;
699 #elif defined(arch_atomic_fetch_add)
700 return arch_atomic_fetch_add(i, v);
701 #else
702 #error "Unable to define raw_atomic_fetch_add_acquire"
703 #endif
704 }
705
706 /**
707 * raw_atomic_fetch_add_release() - atomic add with release ordering
708 * @i: int value to add
709 * @v: pointer to atomic_t
710 *
711 * Atomically updates @v to (@v + @i) with release ordering.
712 *
713 * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
714 *
715 * Return: The original value of @v.
716 */
717 static __always_inline int
raw_atomic_fetch_add_release(int i,atomic_t * v)718 raw_atomic_fetch_add_release(int i, atomic_t *v)
719 {
720 #if defined(arch_atomic_fetch_add_release)
721 return arch_atomic_fetch_add_release(i, v);
722 #elif defined(arch_atomic_fetch_add_relaxed)
723 __atomic_release_fence();
724 return arch_atomic_fetch_add_relaxed(i, v);
725 #elif defined(arch_atomic_fetch_add)
726 return arch_atomic_fetch_add(i, v);
727 #else
728 #error "Unable to define raw_atomic_fetch_add_release"
729 #endif
730 }
731
732 /**
733 * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
734 * @i: int value to add
735 * @v: pointer to atomic_t
736 *
737 * Atomically updates @v to (@v + @i) with relaxed ordering.
738 *
739 * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
740 *
741 * Return: The original value of @v.
742 */
743 static __always_inline int
raw_atomic_fetch_add_relaxed(int i,atomic_t * v)744 raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
745 {
746 #if defined(arch_atomic_fetch_add_relaxed)
747 return arch_atomic_fetch_add_relaxed(i, v);
748 #elif defined(arch_atomic_fetch_add)
749 return arch_atomic_fetch_add(i, v);
750 #else
751 #error "Unable to define raw_atomic_fetch_add_relaxed"
752 #endif
753 }
754
755 /**
756 * raw_atomic_sub() - atomic subtract with relaxed ordering
757 * @i: int value to subtract
758 * @v: pointer to atomic_t
759 *
760 * Atomically updates @v to (@v - @i) with relaxed ordering.
761 *
762 * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
763 *
764 * Return: Nothing.
765 */
766 static __always_inline void
raw_atomic_sub(int i,atomic_t * v)767 raw_atomic_sub(int i, atomic_t *v)
768 {
769 arch_atomic_sub(i, v);
770 }
771
772 /**
773 * raw_atomic_sub_return() - atomic subtract with full ordering
774 * @i: int value to subtract
775 * @v: pointer to atomic_t
776 *
777 * Atomically updates @v to (@v - @i) with full ordering.
778 *
779 * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
780 *
781 * Return: The updated value of @v.
782 */
783 static __always_inline int
raw_atomic_sub_return(int i,atomic_t * v)784 raw_atomic_sub_return(int i, atomic_t *v)
785 {
786 #if defined(arch_atomic_sub_return)
787 return arch_atomic_sub_return(i, v);
788 #elif defined(arch_atomic_sub_return_relaxed)
789 int ret;
790 __atomic_pre_full_fence();
791 ret = arch_atomic_sub_return_relaxed(i, v);
792 __atomic_post_full_fence();
793 return ret;
794 #else
795 #error "Unable to define raw_atomic_sub_return"
796 #endif
797 }
798
799 /**
800 * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
801 * @i: int value to subtract
802 * @v: pointer to atomic_t
803 *
804 * Atomically updates @v to (@v - @i) with acquire ordering.
805 *
806 * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
807 *
808 * Return: The updated value of @v.
809 */
810 static __always_inline int
raw_atomic_sub_return_acquire(int i,atomic_t * v)811 raw_atomic_sub_return_acquire(int i, atomic_t *v)
812 {
813 #if defined(arch_atomic_sub_return_acquire)
814 return arch_atomic_sub_return_acquire(i, v);
815 #elif defined(arch_atomic_sub_return_relaxed)
816 int ret = arch_atomic_sub_return_relaxed(i, v);
817 __atomic_acquire_fence();
818 return ret;
819 #elif defined(arch_atomic_sub_return)
820 return arch_atomic_sub_return(i, v);
821 #else
822 #error "Unable to define raw_atomic_sub_return_acquire"
823 #endif
824 }
825
826 /**
827 * raw_atomic_sub_return_release() - atomic subtract with release ordering
828 * @i: int value to subtract
829 * @v: pointer to atomic_t
830 *
831 * Atomically updates @v to (@v - @i) with release ordering.
832 *
833 * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
834 *
835 * Return: The updated value of @v.
836 */
837 static __always_inline int
raw_atomic_sub_return_release(int i,atomic_t * v)838 raw_atomic_sub_return_release(int i, atomic_t *v)
839 {
840 #if defined(arch_atomic_sub_return_release)
841 return arch_atomic_sub_return_release(i, v);
842 #elif defined(arch_atomic_sub_return_relaxed)
843 __atomic_release_fence();
844 return arch_atomic_sub_return_relaxed(i, v);
845 #elif defined(arch_atomic_sub_return)
846 return arch_atomic_sub_return(i, v);
847 #else
848 #error "Unable to define raw_atomic_sub_return_release"
849 #endif
850 }
851
852 /**
853 * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
854 * @i: int value to subtract
855 * @v: pointer to atomic_t
856 *
857 * Atomically updates @v to (@v - @i) with relaxed ordering.
858 *
859 * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
860 *
861 * Return: The updated value of @v.
862 */
863 static __always_inline int
raw_atomic_sub_return_relaxed(int i,atomic_t * v)864 raw_atomic_sub_return_relaxed(int i, atomic_t *v)
865 {
866 #if defined(arch_atomic_sub_return_relaxed)
867 return arch_atomic_sub_return_relaxed(i, v);
868 #elif defined(arch_atomic_sub_return)
869 return arch_atomic_sub_return(i, v);
870 #else
871 #error "Unable to define raw_atomic_sub_return_relaxed"
872 #endif
873 }
874
875 /**
876 * raw_atomic_fetch_sub() - atomic subtract with full ordering
877 * @i: int value to subtract
878 * @v: pointer to atomic_t
879 *
880 * Atomically updates @v to (@v - @i) with full ordering.
881 *
882 * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
883 *
884 * Return: The original value of @v.
885 */
886 static __always_inline int
raw_atomic_fetch_sub(int i,atomic_t * v)887 raw_atomic_fetch_sub(int i, atomic_t *v)
888 {
889 #if defined(arch_atomic_fetch_sub)
890 return arch_atomic_fetch_sub(i, v);
891 #elif defined(arch_atomic_fetch_sub_relaxed)
892 int ret;
893 __atomic_pre_full_fence();
894 ret = arch_atomic_fetch_sub_relaxed(i, v);
895 __atomic_post_full_fence();
896 return ret;
897 #else
898 #error "Unable to define raw_atomic_fetch_sub"
899 #endif
900 }
901
902 /**
903 * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
904 * @i: int value to subtract
905 * @v: pointer to atomic_t
906 *
907 * Atomically updates @v to (@v - @i) with acquire ordering.
908 *
909 * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
910 *
911 * Return: The original value of @v.
912 */
913 static __always_inline int
raw_atomic_fetch_sub_acquire(int i,atomic_t * v)914 raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
915 {
916 #if defined(arch_atomic_fetch_sub_acquire)
917 return arch_atomic_fetch_sub_acquire(i, v);
918 #elif defined(arch_atomic_fetch_sub_relaxed)
919 int ret = arch_atomic_fetch_sub_relaxed(i, v);
920 __atomic_acquire_fence();
921 return ret;
922 #elif defined(arch_atomic_fetch_sub)
923 return arch_atomic_fetch_sub(i, v);
924 #else
925 #error "Unable to define raw_atomic_fetch_sub_acquire"
926 #endif
927 }
928
929 /**
930 * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
931 * @i: int value to subtract
932 * @v: pointer to atomic_t
933 *
934 * Atomically updates @v to (@v - @i) with release ordering.
935 *
936 * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
937 *
938 * Return: The original value of @v.
939 */
940 static __always_inline int
raw_atomic_fetch_sub_release(int i,atomic_t * v)941 raw_atomic_fetch_sub_release(int i, atomic_t *v)
942 {
943 #if defined(arch_atomic_fetch_sub_release)
944 return arch_atomic_fetch_sub_release(i, v);
945 #elif defined(arch_atomic_fetch_sub_relaxed)
946 __atomic_release_fence();
947 return arch_atomic_fetch_sub_relaxed(i, v);
948 #elif defined(arch_atomic_fetch_sub)
949 return arch_atomic_fetch_sub(i, v);
950 #else
951 #error "Unable to define raw_atomic_fetch_sub_release"
952 #endif
953 }
954
955 /**
956 * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
957 * @i: int value to subtract
958 * @v: pointer to atomic_t
959 *
960 * Atomically updates @v to (@v - @i) with relaxed ordering.
961 *
962 * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
963 *
964 * Return: The original value of @v.
965 */
966 static __always_inline int
raw_atomic_fetch_sub_relaxed(int i,atomic_t * v)967 raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
968 {
969 #if defined(arch_atomic_fetch_sub_relaxed)
970 return arch_atomic_fetch_sub_relaxed(i, v);
971 #elif defined(arch_atomic_fetch_sub)
972 return arch_atomic_fetch_sub(i, v);
973 #else
974 #error "Unable to define raw_atomic_fetch_sub_relaxed"
975 #endif
976 }
977
978 /**
979 * raw_atomic_inc() - atomic increment with relaxed ordering
980 * @v: pointer to atomic_t
981 *
982 * Atomically updates @v to (@v + 1) with relaxed ordering.
983 *
984 * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
985 *
986 * Return: Nothing.
987 */
988 static __always_inline void
raw_atomic_inc(atomic_t * v)989 raw_atomic_inc(atomic_t *v)
990 {
991 #if defined(arch_atomic_inc)
992 arch_atomic_inc(v);
993 #else
994 raw_atomic_add(1, v);
995 #endif
996 }
997
998 /**
999 * raw_atomic_inc_return() - atomic increment with full ordering
1000 * @v: pointer to atomic_t
1001 *
1002 * Atomically updates @v to (@v + 1) with full ordering.
1003 *
1004 * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
1005 *
1006 * Return: The updated value of @v.
1007 */
1008 static __always_inline int
raw_atomic_inc_return(atomic_t * v)1009 raw_atomic_inc_return(atomic_t *v)
1010 {
1011 #if defined(arch_atomic_inc_return)
1012 return arch_atomic_inc_return(v);
1013 #elif defined(arch_atomic_inc_return_relaxed)
1014 int ret;
1015 __atomic_pre_full_fence();
1016 ret = arch_atomic_inc_return_relaxed(v);
1017 __atomic_post_full_fence();
1018 return ret;
1019 #else
1020 return raw_atomic_add_return(1, v);
1021 #endif
1022 }
1023
1024 /**
1025 * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
1026 * @v: pointer to atomic_t
1027 *
1028 * Atomically updates @v to (@v + 1) with acquire ordering.
1029 *
1030 * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
1031 *
1032 * Return: The updated value of @v.
1033 */
1034 static __always_inline int
raw_atomic_inc_return_acquire(atomic_t * v)1035 raw_atomic_inc_return_acquire(atomic_t *v)
1036 {
1037 #if defined(arch_atomic_inc_return_acquire)
1038 return arch_atomic_inc_return_acquire(v);
1039 #elif defined(arch_atomic_inc_return_relaxed)
1040 int ret = arch_atomic_inc_return_relaxed(v);
1041 __atomic_acquire_fence();
1042 return ret;
1043 #elif defined(arch_atomic_inc_return)
1044 return arch_atomic_inc_return(v);
1045 #else
1046 return raw_atomic_add_return_acquire(1, v);
1047 #endif
1048 }
1049
1050 /**
1051 * raw_atomic_inc_return_release() - atomic increment with release ordering
1052 * @v: pointer to atomic_t
1053 *
1054 * Atomically updates @v to (@v + 1) with release ordering.
1055 *
1056 * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
1057 *
1058 * Return: The updated value of @v.
1059 */
1060 static __always_inline int
raw_atomic_inc_return_release(atomic_t * v)1061 raw_atomic_inc_return_release(atomic_t *v)
1062 {
1063 #if defined(arch_atomic_inc_return_release)
1064 return arch_atomic_inc_return_release(v);
1065 #elif defined(arch_atomic_inc_return_relaxed)
1066 __atomic_release_fence();
1067 return arch_atomic_inc_return_relaxed(v);
1068 #elif defined(arch_atomic_inc_return)
1069 return arch_atomic_inc_return(v);
1070 #else
1071 return raw_atomic_add_return_release(1, v);
1072 #endif
1073 }
1074
1075 /**
1076 * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
1077 * @v: pointer to atomic_t
1078 *
1079 * Atomically updates @v to (@v + 1) with relaxed ordering.
1080 *
1081 * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
1082 *
1083 * Return: The updated value of @v.
1084 */
1085 static __always_inline int
raw_atomic_inc_return_relaxed(atomic_t * v)1086 raw_atomic_inc_return_relaxed(atomic_t *v)
1087 {
1088 #if defined(arch_atomic_inc_return_relaxed)
1089 return arch_atomic_inc_return_relaxed(v);
1090 #elif defined(arch_atomic_inc_return)
1091 return arch_atomic_inc_return(v);
1092 #else
1093 return raw_atomic_add_return_relaxed(1, v);
1094 #endif
1095 }
1096
1097 /**
1098 * raw_atomic_fetch_inc() - atomic increment with full ordering
1099 * @v: pointer to atomic_t
1100 *
1101 * Atomically updates @v to (@v + 1) with full ordering.
1102 *
1103 * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
1104 *
1105 * Return: The original value of @v.
1106 */
1107 static __always_inline int
raw_atomic_fetch_inc(atomic_t * v)1108 raw_atomic_fetch_inc(atomic_t *v)
1109 {
1110 #if defined(arch_atomic_fetch_inc)
1111 return arch_atomic_fetch_inc(v);
1112 #elif defined(arch_atomic_fetch_inc_relaxed)
1113 int ret;
1114 __atomic_pre_full_fence();
1115 ret = arch_atomic_fetch_inc_relaxed(v);
1116 __atomic_post_full_fence();
1117 return ret;
1118 #else
1119 return raw_atomic_fetch_add(1, v);
1120 #endif
1121 }
1122
1123 /**
1124 * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
1125 * @v: pointer to atomic_t
1126 *
1127 * Atomically updates @v to (@v + 1) with acquire ordering.
1128 *
1129 * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
1130 *
1131 * Return: The original value of @v.
1132 */
1133 static __always_inline int
raw_atomic_fetch_inc_acquire(atomic_t * v)1134 raw_atomic_fetch_inc_acquire(atomic_t *v)
1135 {
1136 #if defined(arch_atomic_fetch_inc_acquire)
1137 return arch_atomic_fetch_inc_acquire(v);
1138 #elif defined(arch_atomic_fetch_inc_relaxed)
1139 int ret = arch_atomic_fetch_inc_relaxed(v);
1140 __atomic_acquire_fence();
1141 return ret;
1142 #elif defined(arch_atomic_fetch_inc)
1143 return arch_atomic_fetch_inc(v);
1144 #else
1145 return raw_atomic_fetch_add_acquire(1, v);
1146 #endif
1147 }
1148
1149 /**
1150 * raw_atomic_fetch_inc_release() - atomic increment with release ordering
1151 * @v: pointer to atomic_t
1152 *
1153 * Atomically updates @v to (@v + 1) with release ordering.
1154 *
1155 * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
1156 *
1157 * Return: The original value of @v.
1158 */
1159 static __always_inline int
raw_atomic_fetch_inc_release(atomic_t * v)1160 raw_atomic_fetch_inc_release(atomic_t *v)
1161 {
1162 #if defined(arch_atomic_fetch_inc_release)
1163 return arch_atomic_fetch_inc_release(v);
1164 #elif defined(arch_atomic_fetch_inc_relaxed)
1165 __atomic_release_fence();
1166 return arch_atomic_fetch_inc_relaxed(v);
1167 #elif defined(arch_atomic_fetch_inc)
1168 return arch_atomic_fetch_inc(v);
1169 #else
1170 return raw_atomic_fetch_add_release(1, v);
1171 #endif
1172 }
1173
1174 /**
1175 * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
1176 * @v: pointer to atomic_t
1177 *
1178 * Atomically updates @v to (@v + 1) with relaxed ordering.
1179 *
1180 * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
1181 *
1182 * Return: The original value of @v.
1183 */
1184 static __always_inline int
raw_atomic_fetch_inc_relaxed(atomic_t * v)1185 raw_atomic_fetch_inc_relaxed(atomic_t *v)
1186 {
1187 #if defined(arch_atomic_fetch_inc_relaxed)
1188 return arch_atomic_fetch_inc_relaxed(v);
1189 #elif defined(arch_atomic_fetch_inc)
1190 return arch_atomic_fetch_inc(v);
1191 #else
1192 return raw_atomic_fetch_add_relaxed(1, v);
1193 #endif
1194 }
1195
1196 /**
1197 * raw_atomic_dec() - atomic decrement with relaxed ordering
1198 * @v: pointer to atomic_t
1199 *
1200 * Atomically updates @v to (@v - 1) with relaxed ordering.
1201 *
1202 * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
1203 *
1204 * Return: Nothing.
1205 */
1206 static __always_inline void
raw_atomic_dec(atomic_t * v)1207 raw_atomic_dec(atomic_t *v)
1208 {
1209 #if defined(arch_atomic_dec)
1210 arch_atomic_dec(v);
1211 #else
1212 raw_atomic_sub(1, v);
1213 #endif
1214 }
1215
1216 /**
1217 * raw_atomic_dec_return() - atomic decrement with full ordering
1218 * @v: pointer to atomic_t
1219 *
1220 * Atomically updates @v to (@v - 1) with full ordering.
1221 *
1222 * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
1223 *
1224 * Return: The updated value of @v.
1225 */
1226 static __always_inline int
raw_atomic_dec_return(atomic_t * v)1227 raw_atomic_dec_return(atomic_t *v)
1228 {
1229 #if defined(arch_atomic_dec_return)
1230 return arch_atomic_dec_return(v);
1231 #elif defined(arch_atomic_dec_return_relaxed)
1232 int ret;
1233 __atomic_pre_full_fence();
1234 ret = arch_atomic_dec_return_relaxed(v);
1235 __atomic_post_full_fence();
1236 return ret;
1237 #else
1238 return raw_atomic_sub_return(1, v);
1239 #endif
1240 }
1241
1242 /**
1243 * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
1244 * @v: pointer to atomic_t
1245 *
1246 * Atomically updates @v to (@v - 1) with acquire ordering.
1247 *
1248 * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
1249 *
1250 * Return: The updated value of @v.
1251 */
1252 static __always_inline int
raw_atomic_dec_return_acquire(atomic_t * v)1253 raw_atomic_dec_return_acquire(atomic_t *v)
1254 {
1255 #if defined(arch_atomic_dec_return_acquire)
1256 return arch_atomic_dec_return_acquire(v);
1257 #elif defined(arch_atomic_dec_return_relaxed)
1258 int ret = arch_atomic_dec_return_relaxed(v);
1259 __atomic_acquire_fence();
1260 return ret;
1261 #elif defined(arch_atomic_dec_return)
1262 return arch_atomic_dec_return(v);
1263 #else
1264 return raw_atomic_sub_return_acquire(1, v);
1265 #endif
1266 }
1267
1268 /**
1269 * raw_atomic_dec_return_release() - atomic decrement with release ordering
1270 * @v: pointer to atomic_t
1271 *
1272 * Atomically updates @v to (@v - 1) with release ordering.
1273 *
1274 * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
1275 *
1276 * Return: The updated value of @v.
1277 */
1278 static __always_inline int
raw_atomic_dec_return_release(atomic_t * v)1279 raw_atomic_dec_return_release(atomic_t *v)
1280 {
1281 #if defined(arch_atomic_dec_return_release)
1282 return arch_atomic_dec_return_release(v);
1283 #elif defined(arch_atomic_dec_return_relaxed)
1284 __atomic_release_fence();
1285 return arch_atomic_dec_return_relaxed(v);
1286 #elif defined(arch_atomic_dec_return)
1287 return arch_atomic_dec_return(v);
1288 #else
1289 return raw_atomic_sub_return_release(1, v);
1290 #endif
1291 }
1292
1293 /**
1294 * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
1295 * @v: pointer to atomic_t
1296 *
1297 * Atomically updates @v to (@v - 1) with relaxed ordering.
1298 *
1299 * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
1300 *
1301 * Return: The updated value of @v.
1302 */
1303 static __always_inline int
raw_atomic_dec_return_relaxed(atomic_t * v)1304 raw_atomic_dec_return_relaxed(atomic_t *v)
1305 {
1306 #if defined(arch_atomic_dec_return_relaxed)
1307 return arch_atomic_dec_return_relaxed(v);
1308 #elif defined(arch_atomic_dec_return)
1309 return arch_atomic_dec_return(v);
1310 #else
1311 return raw_atomic_sub_return_relaxed(1, v);
1312 #endif
1313 }
1314
1315 /**
1316 * raw_atomic_fetch_dec() - atomic decrement with full ordering
1317 * @v: pointer to atomic_t
1318 *
1319 * Atomically updates @v to (@v - 1) with full ordering.
1320 *
1321 * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
1322 *
1323 * Return: The original value of @v.
1324 */
1325 static __always_inline int
raw_atomic_fetch_dec(atomic_t * v)1326 raw_atomic_fetch_dec(atomic_t *v)
1327 {
1328 #if defined(arch_atomic_fetch_dec)
1329 return arch_atomic_fetch_dec(v);
1330 #elif defined(arch_atomic_fetch_dec_relaxed)
1331 int ret;
1332 __atomic_pre_full_fence();
1333 ret = arch_atomic_fetch_dec_relaxed(v);
1334 __atomic_post_full_fence();
1335 return ret;
1336 #else
1337 return raw_atomic_fetch_sub(1, v);
1338 #endif
1339 }
1340
1341 /**
1342 * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
1343 * @v: pointer to atomic_t
1344 *
1345 * Atomically updates @v to (@v - 1) with acquire ordering.
1346 *
1347 * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
1348 *
1349 * Return: The original value of @v.
1350 */
1351 static __always_inline int
raw_atomic_fetch_dec_acquire(atomic_t * v)1352 raw_atomic_fetch_dec_acquire(atomic_t *v)
1353 {
1354 #if defined(arch_atomic_fetch_dec_acquire)
1355 return arch_atomic_fetch_dec_acquire(v);
1356 #elif defined(arch_atomic_fetch_dec_relaxed)
1357 int ret = arch_atomic_fetch_dec_relaxed(v);
1358 __atomic_acquire_fence();
1359 return ret;
1360 #elif defined(arch_atomic_fetch_dec)
1361 return arch_atomic_fetch_dec(v);
1362 #else
1363 return raw_atomic_fetch_sub_acquire(1, v);
1364 #endif
1365 }
1366
1367 /**
1368 * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
1369 * @v: pointer to atomic_t
1370 *
1371 * Atomically updates @v to (@v - 1) with release ordering.
1372 *
1373 * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
1374 *
1375 * Return: The original value of @v.
1376 */
1377 static __always_inline int
raw_atomic_fetch_dec_release(atomic_t * v)1378 raw_atomic_fetch_dec_release(atomic_t *v)
1379 {
1380 #if defined(arch_atomic_fetch_dec_release)
1381 return arch_atomic_fetch_dec_release(v);
1382 #elif defined(arch_atomic_fetch_dec_relaxed)
1383 __atomic_release_fence();
1384 return arch_atomic_fetch_dec_relaxed(v);
1385 #elif defined(arch_atomic_fetch_dec)
1386 return arch_atomic_fetch_dec(v);
1387 #else
1388 return raw_atomic_fetch_sub_release(1, v);
1389 #endif
1390 }
1391
1392 /**
1393 * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
1394 * @v: pointer to atomic_t
1395 *
1396 * Atomically updates @v to (@v - 1) with relaxed ordering.
1397 *
1398 * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
1399 *
1400 * Return: The original value of @v.
1401 */
1402 static __always_inline int
raw_atomic_fetch_dec_relaxed(atomic_t * v)1403 raw_atomic_fetch_dec_relaxed(atomic_t *v)
1404 {
1405 #if defined(arch_atomic_fetch_dec_relaxed)
1406 return arch_atomic_fetch_dec_relaxed(v);
1407 #elif defined(arch_atomic_fetch_dec)
1408 return arch_atomic_fetch_dec(v);
1409 #else
1410 return raw_atomic_fetch_sub_relaxed(1, v);
1411 #endif
1412 }
1413
1414 /**
1415 * raw_atomic_and() - atomic bitwise AND with relaxed ordering
1416 * @i: int value
1417 * @v: pointer to atomic_t
1418 *
1419 * Atomically updates @v to (@v & @i) with relaxed ordering.
1420 *
1421 * Safe to use in noinstr code; prefer atomic_and() elsewhere.
1422 *
1423 * Return: Nothing.
1424 */
1425 static __always_inline void
raw_atomic_and(int i,atomic_t * v)1426 raw_atomic_and(int i, atomic_t *v)
1427 {
1428 arch_atomic_and(i, v);
1429 }
1430
1431 /**
1432 * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
1433 * @i: int value
1434 * @v: pointer to atomic_t
1435 *
1436 * Atomically updates @v to (@v & @i) with full ordering.
1437 *
1438 * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
1439 *
1440 * Return: The original value of @v.
1441 */
1442 static __always_inline int
raw_atomic_fetch_and(int i,atomic_t * v)1443 raw_atomic_fetch_and(int i, atomic_t *v)
1444 {
1445 #if defined(arch_atomic_fetch_and)
1446 return arch_atomic_fetch_and(i, v);
1447 #elif defined(arch_atomic_fetch_and_relaxed)
1448 int ret;
1449 __atomic_pre_full_fence();
1450 ret = arch_atomic_fetch_and_relaxed(i, v);
1451 __atomic_post_full_fence();
1452 return ret;
1453 #else
1454 #error "Unable to define raw_atomic_fetch_and"
1455 #endif
1456 }
1457
1458 /**
1459 * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
1460 * @i: int value
1461 * @v: pointer to atomic_t
1462 *
1463 * Atomically updates @v to (@v & @i) with acquire ordering.
1464 *
1465 * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
1466 *
1467 * Return: The original value of @v.
1468 */
1469 static __always_inline int
raw_atomic_fetch_and_acquire(int i,atomic_t * v)1470 raw_atomic_fetch_and_acquire(int i, atomic_t *v)
1471 {
1472 #if defined(arch_atomic_fetch_and_acquire)
1473 return arch_atomic_fetch_and_acquire(i, v);
1474 #elif defined(arch_atomic_fetch_and_relaxed)
1475 int ret = arch_atomic_fetch_and_relaxed(i, v);
1476 __atomic_acquire_fence();
1477 return ret;
1478 #elif defined(arch_atomic_fetch_and)
1479 return arch_atomic_fetch_and(i, v);
1480 #else
1481 #error "Unable to define raw_atomic_fetch_and_acquire"
1482 #endif
1483 }
1484
1485 /**
1486 * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
1487 * @i: int value
1488 * @v: pointer to atomic_t
1489 *
1490 * Atomically updates @v to (@v & @i) with release ordering.
1491 *
1492 * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
1493 *
1494 * Return: The original value of @v.
1495 */
1496 static __always_inline int
raw_atomic_fetch_and_release(int i,atomic_t * v)1497 raw_atomic_fetch_and_release(int i, atomic_t *v)
1498 {
1499 #if defined(arch_atomic_fetch_and_release)
1500 return arch_atomic_fetch_and_release(i, v);
1501 #elif defined(arch_atomic_fetch_and_relaxed)
1502 __atomic_release_fence();
1503 return arch_atomic_fetch_and_relaxed(i, v);
1504 #elif defined(arch_atomic_fetch_and)
1505 return arch_atomic_fetch_and(i, v);
1506 #else
1507 #error "Unable to define raw_atomic_fetch_and_release"
1508 #endif
1509 }
1510
1511 /**
1512 * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
1513 * @i: int value
1514 * @v: pointer to atomic_t
1515 *
1516 * Atomically updates @v to (@v & @i) with relaxed ordering.
1517 *
1518 * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
1519 *
1520 * Return: The original value of @v.
1521 */
1522 static __always_inline int
raw_atomic_fetch_and_relaxed(int i,atomic_t * v)1523 raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
1524 {
1525 #if defined(arch_atomic_fetch_and_relaxed)
1526 return arch_atomic_fetch_and_relaxed(i, v);
1527 #elif defined(arch_atomic_fetch_and)
1528 return arch_atomic_fetch_and(i, v);
1529 #else
1530 #error "Unable to define raw_atomic_fetch_and_relaxed"
1531 #endif
1532 }
1533
1534 /**
1535 * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
1536 * @i: int value
1537 * @v: pointer to atomic_t
1538 *
1539 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1540 *
1541 * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
1542 *
1543 * Return: Nothing.
1544 */
1545 static __always_inline void
raw_atomic_andnot(int i,atomic_t * v)1546 raw_atomic_andnot(int i, atomic_t *v)
1547 {
1548 #if defined(arch_atomic_andnot)
1549 arch_atomic_andnot(i, v);
1550 #else
1551 raw_atomic_and(~i, v);
1552 #endif
1553 }
1554
1555 /**
1556 * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
1557 * @i: int value
1558 * @v: pointer to atomic_t
1559 *
1560 * Atomically updates @v to (@v & ~@i) with full ordering.
1561 *
1562 * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
1563 *
1564 * Return: The original value of @v.
1565 */
1566 static __always_inline int
raw_atomic_fetch_andnot(int i,atomic_t * v)1567 raw_atomic_fetch_andnot(int i, atomic_t *v)
1568 {
1569 #if defined(arch_atomic_fetch_andnot)
1570 return arch_atomic_fetch_andnot(i, v);
1571 #elif defined(arch_atomic_fetch_andnot_relaxed)
1572 int ret;
1573 __atomic_pre_full_fence();
1574 ret = arch_atomic_fetch_andnot_relaxed(i, v);
1575 __atomic_post_full_fence();
1576 return ret;
1577 #else
1578 return raw_atomic_fetch_and(~i, v);
1579 #endif
1580 }
1581
1582 /**
1583 * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
1584 * @i: int value
1585 * @v: pointer to atomic_t
1586 *
1587 * Atomically updates @v to (@v & ~@i) with acquire ordering.
1588 *
1589 * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
1590 *
1591 * Return: The original value of @v.
1592 */
1593 static __always_inline int
raw_atomic_fetch_andnot_acquire(int i,atomic_t * v)1594 raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1595 {
1596 #if defined(arch_atomic_fetch_andnot_acquire)
1597 return arch_atomic_fetch_andnot_acquire(i, v);
1598 #elif defined(arch_atomic_fetch_andnot_relaxed)
1599 int ret = arch_atomic_fetch_andnot_relaxed(i, v);
1600 __atomic_acquire_fence();
1601 return ret;
1602 #elif defined(arch_atomic_fetch_andnot)
1603 return arch_atomic_fetch_andnot(i, v);
1604 #else
1605 return raw_atomic_fetch_and_acquire(~i, v);
1606 #endif
1607 }
1608
1609 /**
1610 * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
1611 * @i: int value
1612 * @v: pointer to atomic_t
1613 *
1614 * Atomically updates @v to (@v & ~@i) with release ordering.
1615 *
1616 * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
1617 *
1618 * Return: The original value of @v.
1619 */
1620 static __always_inline int
raw_atomic_fetch_andnot_release(int i,atomic_t * v)1621 raw_atomic_fetch_andnot_release(int i, atomic_t *v)
1622 {
1623 #if defined(arch_atomic_fetch_andnot_release)
1624 return arch_atomic_fetch_andnot_release(i, v);
1625 #elif defined(arch_atomic_fetch_andnot_relaxed)
1626 __atomic_release_fence();
1627 return arch_atomic_fetch_andnot_relaxed(i, v);
1628 #elif defined(arch_atomic_fetch_andnot)
1629 return arch_atomic_fetch_andnot(i, v);
1630 #else
1631 return raw_atomic_fetch_and_release(~i, v);
1632 #endif
1633 }
1634
1635 /**
1636 * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
1637 * @i: int value
1638 * @v: pointer to atomic_t
1639 *
1640 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1641 *
1642 * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
1643 *
1644 * Return: The original value of @v.
1645 */
1646 static __always_inline int
raw_atomic_fetch_andnot_relaxed(int i,atomic_t * v)1647 raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
1648 {
1649 #if defined(arch_atomic_fetch_andnot_relaxed)
1650 return arch_atomic_fetch_andnot_relaxed(i, v);
1651 #elif defined(arch_atomic_fetch_andnot)
1652 return arch_atomic_fetch_andnot(i, v);
1653 #else
1654 return raw_atomic_fetch_and_relaxed(~i, v);
1655 #endif
1656 }
1657
1658 /**
1659 * raw_atomic_or() - atomic bitwise OR with relaxed ordering
1660 * @i: int value
1661 * @v: pointer to atomic_t
1662 *
1663 * Atomically updates @v to (@v | @i) with relaxed ordering.
1664 *
1665 * Safe to use in noinstr code; prefer atomic_or() elsewhere.
1666 *
1667 * Return: Nothing.
1668 */
1669 static __always_inline void
raw_atomic_or(int i,atomic_t * v)1670 raw_atomic_or(int i, atomic_t *v)
1671 {
1672 arch_atomic_or(i, v);
1673 }
1674
1675 /**
1676 * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
1677 * @i: int value
1678 * @v: pointer to atomic_t
1679 *
1680 * Atomically updates @v to (@v | @i) with full ordering.
1681 *
1682 * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
1683 *
1684 * Return: The original value of @v.
1685 */
1686 static __always_inline int
raw_atomic_fetch_or(int i,atomic_t * v)1687 raw_atomic_fetch_or(int i, atomic_t *v)
1688 {
1689 #if defined(arch_atomic_fetch_or)
1690 return arch_atomic_fetch_or(i, v);
1691 #elif defined(arch_atomic_fetch_or_relaxed)
1692 int ret;
1693 __atomic_pre_full_fence();
1694 ret = arch_atomic_fetch_or_relaxed(i, v);
1695 __atomic_post_full_fence();
1696 return ret;
1697 #else
1698 #error "Unable to define raw_atomic_fetch_or"
1699 #endif
1700 }
1701
1702 /**
1703 * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
1704 * @i: int value
1705 * @v: pointer to atomic_t
1706 *
1707 * Atomically updates @v to (@v | @i) with acquire ordering.
1708 *
1709 * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
1710 *
1711 * Return: The original value of @v.
1712 */
1713 static __always_inline int
raw_atomic_fetch_or_acquire(int i,atomic_t * v)1714 raw_atomic_fetch_or_acquire(int i, atomic_t *v)
1715 {
1716 #if defined(arch_atomic_fetch_or_acquire)
1717 return arch_atomic_fetch_or_acquire(i, v);
1718 #elif defined(arch_atomic_fetch_or_relaxed)
1719 int ret = arch_atomic_fetch_or_relaxed(i, v);
1720 __atomic_acquire_fence();
1721 return ret;
1722 #elif defined(arch_atomic_fetch_or)
1723 return arch_atomic_fetch_or(i, v);
1724 #else
1725 #error "Unable to define raw_atomic_fetch_or_acquire"
1726 #endif
1727 }
1728
1729 /**
1730 * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
1731 * @i: int value
1732 * @v: pointer to atomic_t
1733 *
1734 * Atomically updates @v to (@v | @i) with release ordering.
1735 *
1736 * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
1737 *
1738 * Return: The original value of @v.
1739 */
1740 static __always_inline int
raw_atomic_fetch_or_release(int i,atomic_t * v)1741 raw_atomic_fetch_or_release(int i, atomic_t *v)
1742 {
1743 #if defined(arch_atomic_fetch_or_release)
1744 return arch_atomic_fetch_or_release(i, v);
1745 #elif defined(arch_atomic_fetch_or_relaxed)
1746 __atomic_release_fence();
1747 return arch_atomic_fetch_or_relaxed(i, v);
1748 #elif defined(arch_atomic_fetch_or)
1749 return arch_atomic_fetch_or(i, v);
1750 #else
1751 #error "Unable to define raw_atomic_fetch_or_release"
1752 #endif
1753 }
1754
1755 /**
1756 * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
1757 * @i: int value
1758 * @v: pointer to atomic_t
1759 *
1760 * Atomically updates @v to (@v | @i) with relaxed ordering.
1761 *
1762 * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
1763 *
1764 * Return: The original value of @v.
1765 */
1766 static __always_inline int
raw_atomic_fetch_or_relaxed(int i,atomic_t * v)1767 raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
1768 {
1769 #if defined(arch_atomic_fetch_or_relaxed)
1770 return arch_atomic_fetch_or_relaxed(i, v);
1771 #elif defined(arch_atomic_fetch_or)
1772 return arch_atomic_fetch_or(i, v);
1773 #else
1774 #error "Unable to define raw_atomic_fetch_or_relaxed"
1775 #endif
1776 }
1777
1778 /**
1779 * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
1780 * @i: int value
1781 * @v: pointer to atomic_t
1782 *
1783 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1784 *
1785 * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
1786 *
1787 * Return: Nothing.
1788 */
1789 static __always_inline void
raw_atomic_xor(int i,atomic_t * v)1790 raw_atomic_xor(int i, atomic_t *v)
1791 {
1792 arch_atomic_xor(i, v);
1793 }
1794
1795 /**
1796 * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
1797 * @i: int value
1798 * @v: pointer to atomic_t
1799 *
1800 * Atomically updates @v to (@v ^ @i) with full ordering.
1801 *
1802 * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
1803 *
1804 * Return: The original value of @v.
1805 */
1806 static __always_inline int
raw_atomic_fetch_xor(int i,atomic_t * v)1807 raw_atomic_fetch_xor(int i, atomic_t *v)
1808 {
1809 #if defined(arch_atomic_fetch_xor)
1810 return arch_atomic_fetch_xor(i, v);
1811 #elif defined(arch_atomic_fetch_xor_relaxed)
1812 int ret;
1813 __atomic_pre_full_fence();
1814 ret = arch_atomic_fetch_xor_relaxed(i, v);
1815 __atomic_post_full_fence();
1816 return ret;
1817 #else
1818 #error "Unable to define raw_atomic_fetch_xor"
1819 #endif
1820 }
1821
1822 /**
1823 * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
1824 * @i: int value
1825 * @v: pointer to atomic_t
1826 *
1827 * Atomically updates @v to (@v ^ @i) with acquire ordering.
1828 *
1829 * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
1830 *
1831 * Return: The original value of @v.
1832 */
1833 static __always_inline int
raw_atomic_fetch_xor_acquire(int i,atomic_t * v)1834 raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
1835 {
1836 #if defined(arch_atomic_fetch_xor_acquire)
1837 return arch_atomic_fetch_xor_acquire(i, v);
1838 #elif defined(arch_atomic_fetch_xor_relaxed)
1839 int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840 __atomic_acquire_fence();
1841 return ret;
1842 #elif defined(arch_atomic_fetch_xor)
1843 return arch_atomic_fetch_xor(i, v);
1844 #else
1845 #error "Unable to define raw_atomic_fetch_xor_acquire"
1846 #endif
1847 }
1848
1849 /**
1850 * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
1851 * @i: int value
1852 * @v: pointer to atomic_t
1853 *
1854 * Atomically updates @v to (@v ^ @i) with release ordering.
1855 *
1856 * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
1857 *
1858 * Return: The original value of @v.
1859 */
1860 static __always_inline int
raw_atomic_fetch_xor_release(int i,atomic_t * v)1861 raw_atomic_fetch_xor_release(int i, atomic_t *v)
1862 {
1863 #if defined(arch_atomic_fetch_xor_release)
1864 return arch_atomic_fetch_xor_release(i, v);
1865 #elif defined(arch_atomic_fetch_xor_relaxed)
1866 __atomic_release_fence();
1867 return arch_atomic_fetch_xor_relaxed(i, v);
1868 #elif defined(arch_atomic_fetch_xor)
1869 return arch_atomic_fetch_xor(i, v);
1870 #else
1871 #error "Unable to define raw_atomic_fetch_xor_release"
1872 #endif
1873 }
1874
1875 /**
1876 * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
1877 * @i: int value
1878 * @v: pointer to atomic_t
1879 *
1880 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1881 *
1882 * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
1883 *
1884 * Return: The original value of @v.
1885 */
1886 static __always_inline int
raw_atomic_fetch_xor_relaxed(int i,atomic_t * v)1887 raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
1888 {
1889 #if defined(arch_atomic_fetch_xor_relaxed)
1890 return arch_atomic_fetch_xor_relaxed(i, v);
1891 #elif defined(arch_atomic_fetch_xor)
1892 return arch_atomic_fetch_xor(i, v);
1893 #else
1894 #error "Unable to define raw_atomic_fetch_xor_relaxed"
1895 #endif
1896 }
1897
1898 /**
1899 * raw_atomic_xchg() - atomic exchange with full ordering
1900 * @v: pointer to atomic_t
1901 * @new: int value to assign
1902 *
1903 * Atomically updates @v to @new with full ordering.
1904 *
1905 * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
1906 *
1907 * Return: The original value of @v.
1908 */
1909 static __always_inline int
raw_atomic_xchg(atomic_t * v,int new)1910 raw_atomic_xchg(atomic_t *v, int new)
1911 {
1912 #if defined(arch_atomic_xchg)
1913 return arch_atomic_xchg(v, new);
1914 #elif defined(arch_atomic_xchg_relaxed)
1915 int ret;
1916 __atomic_pre_full_fence();
1917 ret = arch_atomic_xchg_relaxed(v, new);
1918 __atomic_post_full_fence();
1919 return ret;
1920 #else
1921 return raw_xchg(&v->counter, new);
1922 #endif
1923 }
1924
1925 /**
1926 * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
1927 * @v: pointer to atomic_t
1928 * @new: int value to assign
1929 *
1930 * Atomically updates @v to @new with acquire ordering.
1931 *
1932 * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
1933 *
1934 * Return: The original value of @v.
1935 */
1936 static __always_inline int
raw_atomic_xchg_acquire(atomic_t * v,int new)1937 raw_atomic_xchg_acquire(atomic_t *v, int new)
1938 {
1939 #if defined(arch_atomic_xchg_acquire)
1940 return arch_atomic_xchg_acquire(v, new);
1941 #elif defined(arch_atomic_xchg_relaxed)
1942 int ret = arch_atomic_xchg_relaxed(v, new);
1943 __atomic_acquire_fence();
1944 return ret;
1945 #elif defined(arch_atomic_xchg)
1946 return arch_atomic_xchg(v, new);
1947 #else
1948 return raw_xchg_acquire(&v->counter, new);
1949 #endif
1950 }
1951
1952 /**
1953 * raw_atomic_xchg_release() - atomic exchange with release ordering
1954 * @v: pointer to atomic_t
1955 * @new: int value to assign
1956 *
1957 * Atomically updates @v to @new with release ordering.
1958 *
1959 * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
1960 *
1961 * Return: The original value of @v.
1962 */
1963 static __always_inline int
raw_atomic_xchg_release(atomic_t * v,int new)1964 raw_atomic_xchg_release(atomic_t *v, int new)
1965 {
1966 #if defined(arch_atomic_xchg_release)
1967 return arch_atomic_xchg_release(v, new);
1968 #elif defined(arch_atomic_xchg_relaxed)
1969 __atomic_release_fence();
1970 return arch_atomic_xchg_relaxed(v, new);
1971 #elif defined(arch_atomic_xchg)
1972 return arch_atomic_xchg(v, new);
1973 #else
1974 return raw_xchg_release(&v->counter, new);
1975 #endif
1976 }
1977
1978 /**
1979 * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
1980 * @v: pointer to atomic_t
1981 * @new: int value to assign
1982 *
1983 * Atomically updates @v to @new with relaxed ordering.
1984 *
1985 * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
1986 *
1987 * Return: The original value of @v.
1988 */
1989 static __always_inline int
raw_atomic_xchg_relaxed(atomic_t * v,int new)1990 raw_atomic_xchg_relaxed(atomic_t *v, int new)
1991 {
1992 #if defined(arch_atomic_xchg_relaxed)
1993 return arch_atomic_xchg_relaxed(v, new);
1994 #elif defined(arch_atomic_xchg)
1995 return arch_atomic_xchg(v, new);
1996 #else
1997 return raw_xchg_relaxed(&v->counter, new);
1998 #endif
1999 }
2000
2001 /**
2002 * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
2003 * @v: pointer to atomic_t
2004 * @old: int value to compare with
2005 * @new: int value to assign
2006 *
2007 * If (@v == @old), atomically updates @v to @new with full ordering.
2008 *
2009 * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
2010 *
2011 * Return: The original value of @v.
2012 */
2013 static __always_inline int
raw_atomic_cmpxchg(atomic_t * v,int old,int new)2014 raw_atomic_cmpxchg(atomic_t *v, int old, int new)
2015 {
2016 #if defined(arch_atomic_cmpxchg)
2017 return arch_atomic_cmpxchg(v, old, new);
2018 #elif defined(arch_atomic_cmpxchg_relaxed)
2019 int ret;
2020 __atomic_pre_full_fence();
2021 ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2022 __atomic_post_full_fence();
2023 return ret;
2024 #else
2025 return raw_cmpxchg(&v->counter, old, new);
2026 #endif
2027 }
2028
2029 /**
2030 * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2031 * @v: pointer to atomic_t
2032 * @old: int value to compare with
2033 * @new: int value to assign
2034 *
2035 * If (@v == @old), atomically updates @v to @new with acquire ordering.
2036 *
2037 * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
2038 *
2039 * Return: The original value of @v.
2040 */
2041 static __always_inline int
raw_atomic_cmpxchg_acquire(atomic_t * v,int old,int new)2042 raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2043 {
2044 #if defined(arch_atomic_cmpxchg_acquire)
2045 return arch_atomic_cmpxchg_acquire(v, old, new);
2046 #elif defined(arch_atomic_cmpxchg_relaxed)
2047 int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2048 __atomic_acquire_fence();
2049 return ret;
2050 #elif defined(arch_atomic_cmpxchg)
2051 return arch_atomic_cmpxchg(v, old, new);
2052 #else
2053 return raw_cmpxchg_acquire(&v->counter, old, new);
2054 #endif
2055 }
2056
2057 /**
2058 * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
2059 * @v: pointer to atomic_t
2060 * @old: int value to compare with
2061 * @new: int value to assign
2062 *
2063 * If (@v == @old), atomically updates @v to @new with release ordering.
2064 *
2065 * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
2066 *
2067 * Return: The original value of @v.
2068 */
2069 static __always_inline int
raw_atomic_cmpxchg_release(atomic_t * v,int old,int new)2070 raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2071 {
2072 #if defined(arch_atomic_cmpxchg_release)
2073 return arch_atomic_cmpxchg_release(v, old, new);
2074 #elif defined(arch_atomic_cmpxchg_relaxed)
2075 __atomic_release_fence();
2076 return arch_atomic_cmpxchg_relaxed(v, old, new);
2077 #elif defined(arch_atomic_cmpxchg)
2078 return arch_atomic_cmpxchg(v, old, new);
2079 #else
2080 return raw_cmpxchg_release(&v->counter, old, new);
2081 #endif
2082 }
2083
2084 /**
2085 * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2086 * @v: pointer to atomic_t
2087 * @old: int value to compare with
2088 * @new: int value to assign
2089 *
2090 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2091 *
2092 * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
2093 *
2094 * Return: The original value of @v.
2095 */
2096 static __always_inline int
raw_atomic_cmpxchg_relaxed(atomic_t * v,int old,int new)2097 raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
2098 {
2099 #if defined(arch_atomic_cmpxchg_relaxed)
2100 return arch_atomic_cmpxchg_relaxed(v, old, new);
2101 #elif defined(arch_atomic_cmpxchg)
2102 return arch_atomic_cmpxchg(v, old, new);
2103 #else
2104 return raw_cmpxchg_relaxed(&v->counter, old, new);
2105 #endif
2106 }
2107
2108 /**
2109 * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
2110 * @v: pointer to atomic_t
2111 * @old: pointer to int value to compare with
2112 * @new: int value to assign
2113 *
2114 * If (@v == @old), atomically updates @v to @new with full ordering.
2115 * Otherwise, updates @old to the current value of @v.
2116 *
2117 * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
2118 *
2119 * Return: @true if the exchange occured, @false otherwise.
2120 */
2121 static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t * v,int * old,int new)2122 raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2123 {
2124 #if defined(arch_atomic_try_cmpxchg)
2125 return arch_atomic_try_cmpxchg(v, old, new);
2126 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2127 bool ret;
2128 __atomic_pre_full_fence();
2129 ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2130 __atomic_post_full_fence();
2131 return ret;
2132 #else
2133 int r, o = *old;
2134 r = raw_atomic_cmpxchg(v, o, new);
2135 if (unlikely(r != o))
2136 *old = r;
2137 return likely(r == o);
2138 #endif
2139 }
2140
2141 /**
2142 * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2143 * @v: pointer to atomic_t
2144 * @old: pointer to int value to compare with
2145 * @new: int value to assign
2146 *
2147 * If (@v == @old), atomically updates @v to @new with acquire ordering.
2148 * Otherwise, updates @old to the current value of @v.
2149 *
2150 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
2151 *
2152 * Return: @true if the exchange occured, @false otherwise.
2153 */
2154 static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t * v,int * old,int new)2155 raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2156 {
2157 #if defined(arch_atomic_try_cmpxchg_acquire)
2158 return arch_atomic_try_cmpxchg_acquire(v, old, new);
2159 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2160 bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2161 __atomic_acquire_fence();
2162 return ret;
2163 #elif defined(arch_atomic_try_cmpxchg)
2164 return arch_atomic_try_cmpxchg(v, old, new);
2165 #else
2166 int r, o = *old;
2167 r = raw_atomic_cmpxchg_acquire(v, o, new);
2168 if (unlikely(r != o))
2169 *old = r;
2170 return likely(r == o);
2171 #endif
2172 }
2173
2174 /**
2175 * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
2176 * @v: pointer to atomic_t
2177 * @old: pointer to int value to compare with
2178 * @new: int value to assign
2179 *
2180 * If (@v == @old), atomically updates @v to @new with release ordering.
2181 * Otherwise, updates @old to the current value of @v.
2182 *
2183 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
2184 *
2185 * Return: @true if the exchange occured, @false otherwise.
2186 */
2187 static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t * v,int * old,int new)2188 raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2189 {
2190 #if defined(arch_atomic_try_cmpxchg_release)
2191 return arch_atomic_try_cmpxchg_release(v, old, new);
2192 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2193 __atomic_release_fence();
2194 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2195 #elif defined(arch_atomic_try_cmpxchg)
2196 return arch_atomic_try_cmpxchg(v, old, new);
2197 #else
2198 int r, o = *old;
2199 r = raw_atomic_cmpxchg_release(v, o, new);
2200 if (unlikely(r != o))
2201 *old = r;
2202 return likely(r == o);
2203 #endif
2204 }
2205
2206 /**
2207 * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2208 * @v: pointer to atomic_t
2209 * @old: pointer to int value to compare with
2210 * @new: int value to assign
2211 *
2212 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2213 * Otherwise, updates @old to the current value of @v.
2214 *
2215 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
2216 *
2217 * Return: @true if the exchange occured, @false otherwise.
2218 */
2219 static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t * v,int * old,int new)2220 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
2221 {
2222 #if defined(arch_atomic_try_cmpxchg_relaxed)
2223 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2224 #elif defined(arch_atomic_try_cmpxchg)
2225 return arch_atomic_try_cmpxchg(v, old, new);
2226 #else
2227 int r, o = *old;
2228 r = raw_atomic_cmpxchg_relaxed(v, o, new);
2229 if (unlikely(r != o))
2230 *old = r;
2231 return likely(r == o);
2232 #endif
2233 }
2234
2235 /**
2236 * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
2237 * @i: int value to add
2238 * @v: pointer to atomic_t
2239 *
2240 * Atomically updates @v to (@v - @i) with full ordering.
2241 *
2242 * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
2243 *
2244 * Return: @true if the resulting value of @v is zero, @false otherwise.
2245 */
2246 static __always_inline bool
raw_atomic_sub_and_test(int i,atomic_t * v)2247 raw_atomic_sub_and_test(int i, atomic_t *v)
2248 {
2249 #if defined(arch_atomic_sub_and_test)
2250 return arch_atomic_sub_and_test(i, v);
2251 #else
2252 return raw_atomic_sub_return(i, v) == 0;
2253 #endif
2254 }
2255
2256 /**
2257 * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
2258 * @v: pointer to atomic_t
2259 *
2260 * Atomically updates @v to (@v - 1) with full ordering.
2261 *
2262 * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
2263 *
2264 * Return: @true if the resulting value of @v is zero, @false otherwise.
2265 */
2266 static __always_inline bool
raw_atomic_dec_and_test(atomic_t * v)2267 raw_atomic_dec_and_test(atomic_t *v)
2268 {
2269 #if defined(arch_atomic_dec_and_test)
2270 return arch_atomic_dec_and_test(v);
2271 #else
2272 return raw_atomic_dec_return(v) == 0;
2273 #endif
2274 }
2275
2276 /**
2277 * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
2278 * @v: pointer to atomic_t
2279 *
2280 * Atomically updates @v to (@v + 1) with full ordering.
2281 *
2282 * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
2283 *
2284 * Return: @true if the resulting value of @v is zero, @false otherwise.
2285 */
2286 static __always_inline bool
raw_atomic_inc_and_test(atomic_t * v)2287 raw_atomic_inc_and_test(atomic_t *v)
2288 {
2289 #if defined(arch_atomic_inc_and_test)
2290 return arch_atomic_inc_and_test(v);
2291 #else
2292 return raw_atomic_inc_return(v) == 0;
2293 #endif
2294 }
2295
2296 /**
2297 * raw_atomic_add_negative() - atomic add and test if negative with full ordering
2298 * @i: int value to add
2299 * @v: pointer to atomic_t
2300 *
2301 * Atomically updates @v to (@v + @i) with full ordering.
2302 *
2303 * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
2304 *
2305 * Return: @true if the resulting value of @v is negative, @false otherwise.
2306 */
2307 static __always_inline bool
raw_atomic_add_negative(int i,atomic_t * v)2308 raw_atomic_add_negative(int i, atomic_t *v)
2309 {
2310 #if defined(arch_atomic_add_negative)
2311 return arch_atomic_add_negative(i, v);
2312 #elif defined(arch_atomic_add_negative_relaxed)
2313 bool ret;
2314 __atomic_pre_full_fence();
2315 ret = arch_atomic_add_negative_relaxed(i, v);
2316 __atomic_post_full_fence();
2317 return ret;
2318 #else
2319 return raw_atomic_add_return(i, v) < 0;
2320 #endif
2321 }
2322
2323 /**
2324 * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
2325 * @i: int value to add
2326 * @v: pointer to atomic_t
2327 *
2328 * Atomically updates @v to (@v + @i) with acquire ordering.
2329 *
2330 * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
2331 *
2332 * Return: @true if the resulting value of @v is negative, @false otherwise.
2333 */
2334 static __always_inline bool
raw_atomic_add_negative_acquire(int i,atomic_t * v)2335 raw_atomic_add_negative_acquire(int i, atomic_t *v)
2336 {
2337 #if defined(arch_atomic_add_negative_acquire)
2338 return arch_atomic_add_negative_acquire(i, v);
2339 #elif defined(arch_atomic_add_negative_relaxed)
2340 bool ret = arch_atomic_add_negative_relaxed(i, v);
2341 __atomic_acquire_fence();
2342 return ret;
2343 #elif defined(arch_atomic_add_negative)
2344 return arch_atomic_add_negative(i, v);
2345 #else
2346 return raw_atomic_add_return_acquire(i, v) < 0;
2347 #endif
2348 }
2349
2350 /**
2351 * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
2352 * @i: int value to add
2353 * @v: pointer to atomic_t
2354 *
2355 * Atomically updates @v to (@v + @i) with release ordering.
2356 *
2357 * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
2358 *
2359 * Return: @true if the resulting value of @v is negative, @false otherwise.
2360 */
2361 static __always_inline bool
raw_atomic_add_negative_release(int i,atomic_t * v)2362 raw_atomic_add_negative_release(int i, atomic_t *v)
2363 {
2364 #if defined(arch_atomic_add_negative_release)
2365 return arch_atomic_add_negative_release(i, v);
2366 #elif defined(arch_atomic_add_negative_relaxed)
2367 __atomic_release_fence();
2368 return arch_atomic_add_negative_relaxed(i, v);
2369 #elif defined(arch_atomic_add_negative)
2370 return arch_atomic_add_negative(i, v);
2371 #else
2372 return raw_atomic_add_return_release(i, v) < 0;
2373 #endif
2374 }
2375
2376 /**
2377 * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
2378 * @i: int value to add
2379 * @v: pointer to atomic_t
2380 *
2381 * Atomically updates @v to (@v + @i) with relaxed ordering.
2382 *
2383 * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
2384 *
2385 * Return: @true if the resulting value of @v is negative, @false otherwise.
2386 */
2387 static __always_inline bool
raw_atomic_add_negative_relaxed(int i,atomic_t * v)2388 raw_atomic_add_negative_relaxed(int i, atomic_t *v)
2389 {
2390 #if defined(arch_atomic_add_negative_relaxed)
2391 return arch_atomic_add_negative_relaxed(i, v);
2392 #elif defined(arch_atomic_add_negative)
2393 return arch_atomic_add_negative(i, v);
2394 #else
2395 return raw_atomic_add_return_relaxed(i, v) < 0;
2396 #endif
2397 }
2398
2399 /**
2400 * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
2401 * @v: pointer to atomic_t
2402 * @a: int value to add
2403 * @u: int value to compare with
2404 *
2405 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2406 *
2407 * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
2408 *
2409 * Return: The original value of @v.
2410 */
2411 static __always_inline int
raw_atomic_fetch_add_unless(atomic_t * v,int a,int u)2412 raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
2413 {
2414 #if defined(arch_atomic_fetch_add_unless)
2415 return arch_atomic_fetch_add_unless(v, a, u);
2416 #else
2417 int c = raw_atomic_read(v);
2418
2419 do {
2420 if (unlikely(c == u))
2421 break;
2422 } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
2423
2424 return c;
2425 #endif
2426 }
2427
2428 /**
2429 * raw_atomic_add_unless() - atomic add unless value with full ordering
2430 * @v: pointer to atomic_t
2431 * @a: int value to add
2432 * @u: int value to compare with
2433 *
2434 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2435 *
2436 * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
2437 *
2438 * Return: @true if @v was updated, @false otherwise.
2439 */
2440 static __always_inline bool
raw_atomic_add_unless(atomic_t * v,int a,int u)2441 raw_atomic_add_unless(atomic_t *v, int a, int u)
2442 {
2443 #if defined(arch_atomic_add_unless)
2444 return arch_atomic_add_unless(v, a, u);
2445 #else
2446 return raw_atomic_fetch_add_unless(v, a, u) != u;
2447 #endif
2448 }
2449
2450 /**
2451 * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
2452 * @v: pointer to atomic_t
2453 *
2454 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
2455 *
2456 * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
2457 *
2458 * Return: @true if @v was updated, @false otherwise.
2459 */
2460 static __always_inline bool
raw_atomic_inc_not_zero(atomic_t * v)2461 raw_atomic_inc_not_zero(atomic_t *v)
2462 {
2463 #if defined(arch_atomic_inc_not_zero)
2464 return arch_atomic_inc_not_zero(v);
2465 #else
2466 return raw_atomic_add_unless(v, 1, 0);
2467 #endif
2468 }
2469
2470 /**
2471 * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
2472 * @v: pointer to atomic_t
2473 *
2474 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
2475 *
2476 * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
2477 *
2478 * Return: @true if @v was updated, @false otherwise.
2479 */
2480 static __always_inline bool
raw_atomic_inc_unless_negative(atomic_t * v)2481 raw_atomic_inc_unless_negative(atomic_t *v)
2482 {
2483 #if defined(arch_atomic_inc_unless_negative)
2484 return arch_atomic_inc_unless_negative(v);
2485 #else
2486 int c = raw_atomic_read(v);
2487
2488 do {
2489 if (unlikely(c < 0))
2490 return false;
2491 } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
2492
2493 return true;
2494 #endif
2495 }
2496
2497 /**
2498 * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
2499 * @v: pointer to atomic_t
2500 *
2501 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
2502 *
2503 * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
2504 *
2505 * Return: @true if @v was updated, @false otherwise.
2506 */
2507 static __always_inline bool
raw_atomic_dec_unless_positive(atomic_t * v)2508 raw_atomic_dec_unless_positive(atomic_t *v)
2509 {
2510 #if defined(arch_atomic_dec_unless_positive)
2511 return arch_atomic_dec_unless_positive(v);
2512 #else
2513 int c = raw_atomic_read(v);
2514
2515 do {
2516 if (unlikely(c > 0))
2517 return false;
2518 } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
2519
2520 return true;
2521 #endif
2522 }
2523
2524 /**
2525 * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
2526 * @v: pointer to atomic_t
2527 *
2528 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
2529 *
2530 * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
2531 *
2532 * Return: The old value of (@v - 1), regardless of whether @v was updated.
2533 */
2534 static __always_inline int
raw_atomic_dec_if_positive(atomic_t * v)2535 raw_atomic_dec_if_positive(atomic_t *v)
2536 {
2537 #if defined(arch_atomic_dec_if_positive)
2538 return arch_atomic_dec_if_positive(v);
2539 #else
2540 int dec, c = raw_atomic_read(v);
2541
2542 do {
2543 dec = c - 1;
2544 if (unlikely(dec < 0))
2545 break;
2546 } while (!raw_atomic_try_cmpxchg(v, &c, dec));
2547
2548 return dec;
2549 #endif
2550 }
2551
2552 #ifdef CONFIG_GENERIC_ATOMIC64
2553 #include <asm-generic/atomic64.h>
2554 #endif
2555
2556 /**
2557 * raw_atomic64_read() - atomic load with relaxed ordering
2558 * @v: pointer to atomic64_t
2559 *
2560 * Atomically loads the value of @v with relaxed ordering.
2561 *
2562 * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
2563 *
2564 * Return: The value loaded from @v.
2565 */
2566 static __always_inline s64
raw_atomic64_read(const atomic64_t * v)2567 raw_atomic64_read(const atomic64_t *v)
2568 {
2569 return arch_atomic64_read(v);
2570 }
2571
2572 /**
2573 * raw_atomic64_read_acquire() - atomic load with acquire ordering
2574 * @v: pointer to atomic64_t
2575 *
2576 * Atomically loads the value of @v with acquire ordering.
2577 *
2578 * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
2579 *
2580 * Return: The value loaded from @v.
2581 */
2582 static __always_inline s64
raw_atomic64_read_acquire(const atomic64_t * v)2583 raw_atomic64_read_acquire(const atomic64_t *v)
2584 {
2585 #if defined(arch_atomic64_read_acquire)
2586 return arch_atomic64_read_acquire(v);
2587 #else
2588 s64 ret;
2589
2590 if (__native_word(atomic64_t)) {
2591 ret = smp_load_acquire(&(v)->counter);
2592 } else {
2593 ret = raw_atomic64_read(v);
2594 __atomic_acquire_fence();
2595 }
2596
2597 return ret;
2598 #endif
2599 }
2600
2601 /**
2602 * raw_atomic64_set() - atomic set with relaxed ordering
2603 * @v: pointer to atomic64_t
2604 * @i: s64 value to assign
2605 *
2606 * Atomically sets @v to @i with relaxed ordering.
2607 *
2608 * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
2609 *
2610 * Return: Nothing.
2611 */
2612 static __always_inline void
raw_atomic64_set(atomic64_t * v,s64 i)2613 raw_atomic64_set(atomic64_t *v, s64 i)
2614 {
2615 arch_atomic64_set(v, i);
2616 }
2617
2618 /**
2619 * raw_atomic64_set_release() - atomic set with release ordering
2620 * @v: pointer to atomic64_t
2621 * @i: s64 value to assign
2622 *
2623 * Atomically sets @v to @i with release ordering.
2624 *
2625 * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
2626 *
2627 * Return: Nothing.
2628 */
2629 static __always_inline void
raw_atomic64_set_release(atomic64_t * v,s64 i)2630 raw_atomic64_set_release(atomic64_t *v, s64 i)
2631 {
2632 #if defined(arch_atomic64_set_release)
2633 arch_atomic64_set_release(v, i);
2634 #else
2635 if (__native_word(atomic64_t)) {
2636 smp_store_release(&(v)->counter, i);
2637 } else {
2638 __atomic_release_fence();
2639 raw_atomic64_set(v, i);
2640 }
2641 #endif
2642 }
2643
2644 /**
2645 * raw_atomic64_add() - atomic add with relaxed ordering
2646 * @i: s64 value to add
2647 * @v: pointer to atomic64_t
2648 *
2649 * Atomically updates @v to (@v + @i) with relaxed ordering.
2650 *
2651 * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
2652 *
2653 * Return: Nothing.
2654 */
2655 static __always_inline void
raw_atomic64_add(s64 i,atomic64_t * v)2656 raw_atomic64_add(s64 i, atomic64_t *v)
2657 {
2658 arch_atomic64_add(i, v);
2659 }
2660
2661 /**
2662 * raw_atomic64_add_return() - atomic add with full ordering
2663 * @i: s64 value to add
2664 * @v: pointer to atomic64_t
2665 *
2666 * Atomically updates @v to (@v + @i) with full ordering.
2667 *
2668 * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
2669 *
2670 * Return: The updated value of @v.
2671 */
2672 static __always_inline s64
raw_atomic64_add_return(s64 i,atomic64_t * v)2673 raw_atomic64_add_return(s64 i, atomic64_t *v)
2674 {
2675 #if defined(arch_atomic64_add_return)
2676 return arch_atomic64_add_return(i, v);
2677 #elif defined(arch_atomic64_add_return_relaxed)
2678 s64 ret;
2679 __atomic_pre_full_fence();
2680 ret = arch_atomic64_add_return_relaxed(i, v);
2681 __atomic_post_full_fence();
2682 return ret;
2683 #else
2684 #error "Unable to define raw_atomic64_add_return"
2685 #endif
2686 }
2687
2688 /**
2689 * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
2690 * @i: s64 value to add
2691 * @v: pointer to atomic64_t
2692 *
2693 * Atomically updates @v to (@v + @i) with acquire ordering.
2694 *
2695 * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
2696 *
2697 * Return: The updated value of @v.
2698 */
2699 static __always_inline s64
raw_atomic64_add_return_acquire(s64 i,atomic64_t * v)2700 raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
2701 {
2702 #if defined(arch_atomic64_add_return_acquire)
2703 return arch_atomic64_add_return_acquire(i, v);
2704 #elif defined(arch_atomic64_add_return_relaxed)
2705 s64 ret = arch_atomic64_add_return_relaxed(i, v);
2706 __atomic_acquire_fence();
2707 return ret;
2708 #elif defined(arch_atomic64_add_return)
2709 return arch_atomic64_add_return(i, v);
2710 #else
2711 #error "Unable to define raw_atomic64_add_return_acquire"
2712 #endif
2713 }
2714
2715 /**
2716 * raw_atomic64_add_return_release() - atomic add with release ordering
2717 * @i: s64 value to add
2718 * @v: pointer to atomic64_t
2719 *
2720 * Atomically updates @v to (@v + @i) with release ordering.
2721 *
2722 * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
2723 *
2724 * Return: The updated value of @v.
2725 */
2726 static __always_inline s64
raw_atomic64_add_return_release(s64 i,atomic64_t * v)2727 raw_atomic64_add_return_release(s64 i, atomic64_t *v)
2728 {
2729 #if defined(arch_atomic64_add_return_release)
2730 return arch_atomic64_add_return_release(i, v);
2731 #elif defined(arch_atomic64_add_return_relaxed)
2732 __atomic_release_fence();
2733 return arch_atomic64_add_return_relaxed(i, v);
2734 #elif defined(arch_atomic64_add_return)
2735 return arch_atomic64_add_return(i, v);
2736 #else
2737 #error "Unable to define raw_atomic64_add_return_release"
2738 #endif
2739 }
2740
2741 /**
2742 * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
2743 * @i: s64 value to add
2744 * @v: pointer to atomic64_t
2745 *
2746 * Atomically updates @v to (@v + @i) with relaxed ordering.
2747 *
2748 * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
2749 *
2750 * Return: The updated value of @v.
2751 */
2752 static __always_inline s64
raw_atomic64_add_return_relaxed(s64 i,atomic64_t * v)2753 raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
2754 {
2755 #if defined(arch_atomic64_add_return_relaxed)
2756 return arch_atomic64_add_return_relaxed(i, v);
2757 #elif defined(arch_atomic64_add_return)
2758 return arch_atomic64_add_return(i, v);
2759 #else
2760 #error "Unable to define raw_atomic64_add_return_relaxed"
2761 #endif
2762 }
2763
2764 /**
2765 * raw_atomic64_fetch_add() - atomic add with full ordering
2766 * @i: s64 value to add
2767 * @v: pointer to atomic64_t
2768 *
2769 * Atomically updates @v to (@v + @i) with full ordering.
2770 *
2771 * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
2772 *
2773 * Return: The original value of @v.
2774 */
2775 static __always_inline s64
raw_atomic64_fetch_add(s64 i,atomic64_t * v)2776 raw_atomic64_fetch_add(s64 i, atomic64_t *v)
2777 {
2778 #if defined(arch_atomic64_fetch_add)
2779 return arch_atomic64_fetch_add(i, v);
2780 #elif defined(arch_atomic64_fetch_add_relaxed)
2781 s64 ret;
2782 __atomic_pre_full_fence();
2783 ret = arch_atomic64_fetch_add_relaxed(i, v);
2784 __atomic_post_full_fence();
2785 return ret;
2786 #else
2787 #error "Unable to define raw_atomic64_fetch_add"
2788 #endif
2789 }
2790
2791 /**
2792 * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
2793 * @i: s64 value to add
2794 * @v: pointer to atomic64_t
2795 *
2796 * Atomically updates @v to (@v + @i) with acquire ordering.
2797 *
2798 * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
2799 *
2800 * Return: The original value of @v.
2801 */
2802 static __always_inline s64
raw_atomic64_fetch_add_acquire(s64 i,atomic64_t * v)2803 raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2804 {
2805 #if defined(arch_atomic64_fetch_add_acquire)
2806 return arch_atomic64_fetch_add_acquire(i, v);
2807 #elif defined(arch_atomic64_fetch_add_relaxed)
2808 s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2809 __atomic_acquire_fence();
2810 return ret;
2811 #elif defined(arch_atomic64_fetch_add)
2812 return arch_atomic64_fetch_add(i, v);
2813 #else
2814 #error "Unable to define raw_atomic64_fetch_add_acquire"
2815 #endif
2816 }
2817
2818 /**
2819 * raw_atomic64_fetch_add_release() - atomic add with release ordering
2820 * @i: s64 value to add
2821 * @v: pointer to atomic64_t
2822 *
2823 * Atomically updates @v to (@v + @i) with release ordering.
2824 *
2825 * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
2826 *
2827 * Return: The original value of @v.
2828 */
2829 static __always_inline s64
raw_atomic64_fetch_add_release(s64 i,atomic64_t * v)2830 raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2831 {
2832 #if defined(arch_atomic64_fetch_add_release)
2833 return arch_atomic64_fetch_add_release(i, v);
2834 #elif defined(arch_atomic64_fetch_add_relaxed)
2835 __atomic_release_fence();
2836 return arch_atomic64_fetch_add_relaxed(i, v);
2837 #elif defined(arch_atomic64_fetch_add)
2838 return arch_atomic64_fetch_add(i, v);
2839 #else
2840 #error "Unable to define raw_atomic64_fetch_add_release"
2841 #endif
2842 }
2843
2844 /**
2845 * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
2846 * @i: s64 value to add
2847 * @v: pointer to atomic64_t
2848 *
2849 * Atomically updates @v to (@v + @i) with relaxed ordering.
2850 *
2851 * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
2852 *
2853 * Return: The original value of @v.
2854 */
2855 static __always_inline s64
raw_atomic64_fetch_add_relaxed(s64 i,atomic64_t * v)2856 raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
2857 {
2858 #if defined(arch_atomic64_fetch_add_relaxed)
2859 return arch_atomic64_fetch_add_relaxed(i, v);
2860 #elif defined(arch_atomic64_fetch_add)
2861 return arch_atomic64_fetch_add(i, v);
2862 #else
2863 #error "Unable to define raw_atomic64_fetch_add_relaxed"
2864 #endif
2865 }
2866
2867 /**
2868 * raw_atomic64_sub() - atomic subtract with relaxed ordering
2869 * @i: s64 value to subtract
2870 * @v: pointer to atomic64_t
2871 *
2872 * Atomically updates @v to (@v - @i) with relaxed ordering.
2873 *
2874 * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
2875 *
2876 * Return: Nothing.
2877 */
2878 static __always_inline void
raw_atomic64_sub(s64 i,atomic64_t * v)2879 raw_atomic64_sub(s64 i, atomic64_t *v)
2880 {
2881 arch_atomic64_sub(i, v);
2882 }
2883
2884 /**
2885 * raw_atomic64_sub_return() - atomic subtract with full ordering
2886 * @i: s64 value to subtract
2887 * @v: pointer to atomic64_t
2888 *
2889 * Atomically updates @v to (@v - @i) with full ordering.
2890 *
2891 * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
2892 *
2893 * Return: The updated value of @v.
2894 */
2895 static __always_inline s64
raw_atomic64_sub_return(s64 i,atomic64_t * v)2896 raw_atomic64_sub_return(s64 i, atomic64_t *v)
2897 {
2898 #if defined(arch_atomic64_sub_return)
2899 return arch_atomic64_sub_return(i, v);
2900 #elif defined(arch_atomic64_sub_return_relaxed)
2901 s64 ret;
2902 __atomic_pre_full_fence();
2903 ret = arch_atomic64_sub_return_relaxed(i, v);
2904 __atomic_post_full_fence();
2905 return ret;
2906 #else
2907 #error "Unable to define raw_atomic64_sub_return"
2908 #endif
2909 }
2910
2911 /**
2912 * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
2913 * @i: s64 value to subtract
2914 * @v: pointer to atomic64_t
2915 *
2916 * Atomically updates @v to (@v - @i) with acquire ordering.
2917 *
2918 * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
2919 *
2920 * Return: The updated value of @v.
2921 */
2922 static __always_inline s64
raw_atomic64_sub_return_acquire(s64 i,atomic64_t * v)2923 raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2924 {
2925 #if defined(arch_atomic64_sub_return_acquire)
2926 return arch_atomic64_sub_return_acquire(i, v);
2927 #elif defined(arch_atomic64_sub_return_relaxed)
2928 s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2929 __atomic_acquire_fence();
2930 return ret;
2931 #elif defined(arch_atomic64_sub_return)
2932 return arch_atomic64_sub_return(i, v);
2933 #else
2934 #error "Unable to define raw_atomic64_sub_return_acquire"
2935 #endif
2936 }
2937
2938 /**
2939 * raw_atomic64_sub_return_release() - atomic subtract with release ordering
2940 * @i: s64 value to subtract
2941 * @v: pointer to atomic64_t
2942 *
2943 * Atomically updates @v to (@v - @i) with release ordering.
2944 *
2945 * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
2946 *
2947 * Return: The updated value of @v.
2948 */
2949 static __always_inline s64
raw_atomic64_sub_return_release(s64 i,atomic64_t * v)2950 raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
2951 {
2952 #if defined(arch_atomic64_sub_return_release)
2953 return arch_atomic64_sub_return_release(i, v);
2954 #elif defined(arch_atomic64_sub_return_relaxed)
2955 __atomic_release_fence();
2956 return arch_atomic64_sub_return_relaxed(i, v);
2957 #elif defined(arch_atomic64_sub_return)
2958 return arch_atomic64_sub_return(i, v);
2959 #else
2960 #error "Unable to define raw_atomic64_sub_return_release"
2961 #endif
2962 }
2963
2964 /**
2965 * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
2966 * @i: s64 value to subtract
2967 * @v: pointer to atomic64_t
2968 *
2969 * Atomically updates @v to (@v - @i) with relaxed ordering.
2970 *
2971 * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
2972 *
2973 * Return: The updated value of @v.
2974 */
2975 static __always_inline s64
raw_atomic64_sub_return_relaxed(s64 i,atomic64_t * v)2976 raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
2977 {
2978 #if defined(arch_atomic64_sub_return_relaxed)
2979 return arch_atomic64_sub_return_relaxed(i, v);
2980 #elif defined(arch_atomic64_sub_return)
2981 return arch_atomic64_sub_return(i, v);
2982 #else
2983 #error "Unable to define raw_atomic64_sub_return_relaxed"
2984 #endif
2985 }
2986
2987 /**
2988 * raw_atomic64_fetch_sub() - atomic subtract with full ordering
2989 * @i: s64 value to subtract
2990 * @v: pointer to atomic64_t
2991 *
2992 * Atomically updates @v to (@v - @i) with full ordering.
2993 *
2994 * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
2995 *
2996 * Return: The original value of @v.
2997 */
2998 static __always_inline s64
raw_atomic64_fetch_sub(s64 i,atomic64_t * v)2999 raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
3000 {
3001 #if defined(arch_atomic64_fetch_sub)
3002 return arch_atomic64_fetch_sub(i, v);
3003 #elif defined(arch_atomic64_fetch_sub_relaxed)
3004 s64 ret;
3005 __atomic_pre_full_fence();
3006 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3007 __atomic_post_full_fence();
3008 return ret;
3009 #else
3010 #error "Unable to define raw_atomic64_fetch_sub"
3011 #endif
3012 }
3013
3014 /**
3015 * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
3016 * @i: s64 value to subtract
3017 * @v: pointer to atomic64_t
3018 *
3019 * Atomically updates @v to (@v - @i) with acquire ordering.
3020 *
3021 * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
3022 *
3023 * Return: The original value of @v.
3024 */
3025 static __always_inline s64
raw_atomic64_fetch_sub_acquire(s64 i,atomic64_t * v)3026 raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
3027 {
3028 #if defined(arch_atomic64_fetch_sub_acquire)
3029 return arch_atomic64_fetch_sub_acquire(i, v);
3030 #elif defined(arch_atomic64_fetch_sub_relaxed)
3031 s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3032 __atomic_acquire_fence();
3033 return ret;
3034 #elif defined(arch_atomic64_fetch_sub)
3035 return arch_atomic64_fetch_sub(i, v);
3036 #else
3037 #error "Unable to define raw_atomic64_fetch_sub_acquire"
3038 #endif
3039 }
3040
3041 /**
3042 * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
3043 * @i: s64 value to subtract
3044 * @v: pointer to atomic64_t
3045 *
3046 * Atomically updates @v to (@v - @i) with release ordering.
3047 *
3048 * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
3049 *
3050 * Return: The original value of @v.
3051 */
3052 static __always_inline s64
raw_atomic64_fetch_sub_release(s64 i,atomic64_t * v)3053 raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
3054 {
3055 #if defined(arch_atomic64_fetch_sub_release)
3056 return arch_atomic64_fetch_sub_release(i, v);
3057 #elif defined(arch_atomic64_fetch_sub_relaxed)
3058 __atomic_release_fence();
3059 return arch_atomic64_fetch_sub_relaxed(i, v);
3060 #elif defined(arch_atomic64_fetch_sub)
3061 return arch_atomic64_fetch_sub(i, v);
3062 #else
3063 #error "Unable to define raw_atomic64_fetch_sub_release"
3064 #endif
3065 }
3066
3067 /**
3068 * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
3069 * @i: s64 value to subtract
3070 * @v: pointer to atomic64_t
3071 *
3072 * Atomically updates @v to (@v - @i) with relaxed ordering.
3073 *
3074 * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
3075 *
3076 * Return: The original value of @v.
3077 */
3078 static __always_inline s64
raw_atomic64_fetch_sub_relaxed(s64 i,atomic64_t * v)3079 raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
3080 {
3081 #if defined(arch_atomic64_fetch_sub_relaxed)
3082 return arch_atomic64_fetch_sub_relaxed(i, v);
3083 #elif defined(arch_atomic64_fetch_sub)
3084 return arch_atomic64_fetch_sub(i, v);
3085 #else
3086 #error "Unable to define raw_atomic64_fetch_sub_relaxed"
3087 #endif
3088 }
3089
3090 /**
3091 * raw_atomic64_inc() - atomic increment with relaxed ordering
3092 * @v: pointer to atomic64_t
3093 *
3094 * Atomically updates @v to (@v + 1) with relaxed ordering.
3095 *
3096 * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
3097 *
3098 * Return: Nothing.
3099 */
3100 static __always_inline void
raw_atomic64_inc(atomic64_t * v)3101 raw_atomic64_inc(atomic64_t *v)
3102 {
3103 #if defined(arch_atomic64_inc)
3104 arch_atomic64_inc(v);
3105 #else
3106 raw_atomic64_add(1, v);
3107 #endif
3108 }
3109
3110 /**
3111 * raw_atomic64_inc_return() - atomic increment with full ordering
3112 * @v: pointer to atomic64_t
3113 *
3114 * Atomically updates @v to (@v + 1) with full ordering.
3115 *
3116 * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
3117 *
3118 * Return: The updated value of @v.
3119 */
3120 static __always_inline s64
raw_atomic64_inc_return(atomic64_t * v)3121 raw_atomic64_inc_return(atomic64_t *v)
3122 {
3123 #if defined(arch_atomic64_inc_return)
3124 return arch_atomic64_inc_return(v);
3125 #elif defined(arch_atomic64_inc_return_relaxed)
3126 s64 ret;
3127 __atomic_pre_full_fence();
3128 ret = arch_atomic64_inc_return_relaxed(v);
3129 __atomic_post_full_fence();
3130 return ret;
3131 #else
3132 return raw_atomic64_add_return(1, v);
3133 #endif
3134 }
3135
3136 /**
3137 * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
3138 * @v: pointer to atomic64_t
3139 *
3140 * Atomically updates @v to (@v + 1) with acquire ordering.
3141 *
3142 * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
3143 *
3144 * Return: The updated value of @v.
3145 */
3146 static __always_inline s64
raw_atomic64_inc_return_acquire(atomic64_t * v)3147 raw_atomic64_inc_return_acquire(atomic64_t *v)
3148 {
3149 #if defined(arch_atomic64_inc_return_acquire)
3150 return arch_atomic64_inc_return_acquire(v);
3151 #elif defined(arch_atomic64_inc_return_relaxed)
3152 s64 ret = arch_atomic64_inc_return_relaxed(v);
3153 __atomic_acquire_fence();
3154 return ret;
3155 #elif defined(arch_atomic64_inc_return)
3156 return arch_atomic64_inc_return(v);
3157 #else
3158 return raw_atomic64_add_return_acquire(1, v);
3159 #endif
3160 }
3161
3162 /**
3163 * raw_atomic64_inc_return_release() - atomic increment with release ordering
3164 * @v: pointer to atomic64_t
3165 *
3166 * Atomically updates @v to (@v + 1) with release ordering.
3167 *
3168 * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
3169 *
3170 * Return: The updated value of @v.
3171 */
3172 static __always_inline s64
raw_atomic64_inc_return_release(atomic64_t * v)3173 raw_atomic64_inc_return_release(atomic64_t *v)
3174 {
3175 #if defined(arch_atomic64_inc_return_release)
3176 return arch_atomic64_inc_return_release(v);
3177 #elif defined(arch_atomic64_inc_return_relaxed)
3178 __atomic_release_fence();
3179 return arch_atomic64_inc_return_relaxed(v);
3180 #elif defined(arch_atomic64_inc_return)
3181 return arch_atomic64_inc_return(v);
3182 #else
3183 return raw_atomic64_add_return_release(1, v);
3184 #endif
3185 }
3186
3187 /**
3188 * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
3189 * @v: pointer to atomic64_t
3190 *
3191 * Atomically updates @v to (@v + 1) with relaxed ordering.
3192 *
3193 * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
3194 *
3195 * Return: The updated value of @v.
3196 */
3197 static __always_inline s64
raw_atomic64_inc_return_relaxed(atomic64_t * v)3198 raw_atomic64_inc_return_relaxed(atomic64_t *v)
3199 {
3200 #if defined(arch_atomic64_inc_return_relaxed)
3201 return arch_atomic64_inc_return_relaxed(v);
3202 #elif defined(arch_atomic64_inc_return)
3203 return arch_atomic64_inc_return(v);
3204 #else
3205 return raw_atomic64_add_return_relaxed(1, v);
3206 #endif
3207 }
3208
3209 /**
3210 * raw_atomic64_fetch_inc() - atomic increment with full ordering
3211 * @v: pointer to atomic64_t
3212 *
3213 * Atomically updates @v to (@v + 1) with full ordering.
3214 *
3215 * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
3216 *
3217 * Return: The original value of @v.
3218 */
3219 static __always_inline s64
raw_atomic64_fetch_inc(atomic64_t * v)3220 raw_atomic64_fetch_inc(atomic64_t *v)
3221 {
3222 #if defined(arch_atomic64_fetch_inc)
3223 return arch_atomic64_fetch_inc(v);
3224 #elif defined(arch_atomic64_fetch_inc_relaxed)
3225 s64 ret;
3226 __atomic_pre_full_fence();
3227 ret = arch_atomic64_fetch_inc_relaxed(v);
3228 __atomic_post_full_fence();
3229 return ret;
3230 #else
3231 return raw_atomic64_fetch_add(1, v);
3232 #endif
3233 }
3234
3235 /**
3236 * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
3237 * @v: pointer to atomic64_t
3238 *
3239 * Atomically updates @v to (@v + 1) with acquire ordering.
3240 *
3241 * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
3242 *
3243 * Return: The original value of @v.
3244 */
3245 static __always_inline s64
raw_atomic64_fetch_inc_acquire(atomic64_t * v)3246 raw_atomic64_fetch_inc_acquire(atomic64_t *v)
3247 {
3248 #if defined(arch_atomic64_fetch_inc_acquire)
3249 return arch_atomic64_fetch_inc_acquire(v);
3250 #elif defined(arch_atomic64_fetch_inc_relaxed)
3251 s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3252 __atomic_acquire_fence();
3253 return ret;
3254 #elif defined(arch_atomic64_fetch_inc)
3255 return arch_atomic64_fetch_inc(v);
3256 #else
3257 return raw_atomic64_fetch_add_acquire(1, v);
3258 #endif
3259 }
3260
3261 /**
3262 * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
3263 * @v: pointer to atomic64_t
3264 *
3265 * Atomically updates @v to (@v + 1) with release ordering.
3266 *
3267 * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
3268 *
3269 * Return: The original value of @v.
3270 */
3271 static __always_inline s64
raw_atomic64_fetch_inc_release(atomic64_t * v)3272 raw_atomic64_fetch_inc_release(atomic64_t *v)
3273 {
3274 #if defined(arch_atomic64_fetch_inc_release)
3275 return arch_atomic64_fetch_inc_release(v);
3276 #elif defined(arch_atomic64_fetch_inc_relaxed)
3277 __atomic_release_fence();
3278 return arch_atomic64_fetch_inc_relaxed(v);
3279 #elif defined(arch_atomic64_fetch_inc)
3280 return arch_atomic64_fetch_inc(v);
3281 #else
3282 return raw_atomic64_fetch_add_release(1, v);
3283 #endif
3284 }
3285
3286 /**
3287 * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
3288 * @v: pointer to atomic64_t
3289 *
3290 * Atomically updates @v to (@v + 1) with relaxed ordering.
3291 *
3292 * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
3293 *
3294 * Return: The original value of @v.
3295 */
3296 static __always_inline s64
raw_atomic64_fetch_inc_relaxed(atomic64_t * v)3297 raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
3298 {
3299 #if defined(arch_atomic64_fetch_inc_relaxed)
3300 return arch_atomic64_fetch_inc_relaxed(v);
3301 #elif defined(arch_atomic64_fetch_inc)
3302 return arch_atomic64_fetch_inc(v);
3303 #else
3304 return raw_atomic64_fetch_add_relaxed(1, v);
3305 #endif
3306 }
3307
3308 /**
3309 * raw_atomic64_dec() - atomic decrement with relaxed ordering
3310 * @v: pointer to atomic64_t
3311 *
3312 * Atomically updates @v to (@v - 1) with relaxed ordering.
3313 *
3314 * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
3315 *
3316 * Return: Nothing.
3317 */
3318 static __always_inline void
raw_atomic64_dec(atomic64_t * v)3319 raw_atomic64_dec(atomic64_t *v)
3320 {
3321 #if defined(arch_atomic64_dec)
3322 arch_atomic64_dec(v);
3323 #else
3324 raw_atomic64_sub(1, v);
3325 #endif
3326 }
3327
3328 /**
3329 * raw_atomic64_dec_return() - atomic decrement with full ordering
3330 * @v: pointer to atomic64_t
3331 *
3332 * Atomically updates @v to (@v - 1) with full ordering.
3333 *
3334 * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
3335 *
3336 * Return: The updated value of @v.
3337 */
3338 static __always_inline s64
raw_atomic64_dec_return(atomic64_t * v)3339 raw_atomic64_dec_return(atomic64_t *v)
3340 {
3341 #if defined(arch_atomic64_dec_return)
3342 return arch_atomic64_dec_return(v);
3343 #elif defined(arch_atomic64_dec_return_relaxed)
3344 s64 ret;
3345 __atomic_pre_full_fence();
3346 ret = arch_atomic64_dec_return_relaxed(v);
3347 __atomic_post_full_fence();
3348 return ret;
3349 #else
3350 return raw_atomic64_sub_return(1, v);
3351 #endif
3352 }
3353
3354 /**
3355 * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
3356 * @v: pointer to atomic64_t
3357 *
3358 * Atomically updates @v to (@v - 1) with acquire ordering.
3359 *
3360 * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
3361 *
3362 * Return: The updated value of @v.
3363 */
3364 static __always_inline s64
raw_atomic64_dec_return_acquire(atomic64_t * v)3365 raw_atomic64_dec_return_acquire(atomic64_t *v)
3366 {
3367 #if defined(arch_atomic64_dec_return_acquire)
3368 return arch_atomic64_dec_return_acquire(v);
3369 #elif defined(arch_atomic64_dec_return_relaxed)
3370 s64 ret = arch_atomic64_dec_return_relaxed(v);
3371 __atomic_acquire_fence();
3372 return ret;
3373 #elif defined(arch_atomic64_dec_return)
3374 return arch_atomic64_dec_return(v);
3375 #else
3376 return raw_atomic64_sub_return_acquire(1, v);
3377 #endif
3378 }
3379
3380 /**
3381 * raw_atomic64_dec_return_release() - atomic decrement with release ordering
3382 * @v: pointer to atomic64_t
3383 *
3384 * Atomically updates @v to (@v - 1) with release ordering.
3385 *
3386 * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
3387 *
3388 * Return: The updated value of @v.
3389 */
3390 static __always_inline s64
raw_atomic64_dec_return_release(atomic64_t * v)3391 raw_atomic64_dec_return_release(atomic64_t *v)
3392 {
3393 #if defined(arch_atomic64_dec_return_release)
3394 return arch_atomic64_dec_return_release(v);
3395 #elif defined(arch_atomic64_dec_return_relaxed)
3396 __atomic_release_fence();
3397 return arch_atomic64_dec_return_relaxed(v);
3398 #elif defined(arch_atomic64_dec_return)
3399 return arch_atomic64_dec_return(v);
3400 #else
3401 return raw_atomic64_sub_return_release(1, v);
3402 #endif
3403 }
3404
3405 /**
3406 * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
3407 * @v: pointer to atomic64_t
3408 *
3409 * Atomically updates @v to (@v - 1) with relaxed ordering.
3410 *
3411 * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
3412 *
3413 * Return: The updated value of @v.
3414 */
3415 static __always_inline s64
raw_atomic64_dec_return_relaxed(atomic64_t * v)3416 raw_atomic64_dec_return_relaxed(atomic64_t *v)
3417 {
3418 #if defined(arch_atomic64_dec_return_relaxed)
3419 return arch_atomic64_dec_return_relaxed(v);
3420 #elif defined(arch_atomic64_dec_return)
3421 return arch_atomic64_dec_return(v);
3422 #else
3423 return raw_atomic64_sub_return_relaxed(1, v);
3424 #endif
3425 }
3426
3427 /**
3428 * raw_atomic64_fetch_dec() - atomic decrement with full ordering
3429 * @v: pointer to atomic64_t
3430 *
3431 * Atomically updates @v to (@v - 1) with full ordering.
3432 *
3433 * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
3434 *
3435 * Return: The original value of @v.
3436 */
3437 static __always_inline s64
raw_atomic64_fetch_dec(atomic64_t * v)3438 raw_atomic64_fetch_dec(atomic64_t *v)
3439 {
3440 #if defined(arch_atomic64_fetch_dec)
3441 return arch_atomic64_fetch_dec(v);
3442 #elif defined(arch_atomic64_fetch_dec_relaxed)
3443 s64 ret;
3444 __atomic_pre_full_fence();
3445 ret = arch_atomic64_fetch_dec_relaxed(v);
3446 __atomic_post_full_fence();
3447 return ret;
3448 #else
3449 return raw_atomic64_fetch_sub(1, v);
3450 #endif
3451 }
3452
3453 /**
3454 * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
3455 * @v: pointer to atomic64_t
3456 *
3457 * Atomically updates @v to (@v - 1) with acquire ordering.
3458 *
3459 * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
3460 *
3461 * Return: The original value of @v.
3462 */
3463 static __always_inline s64
raw_atomic64_fetch_dec_acquire(atomic64_t * v)3464 raw_atomic64_fetch_dec_acquire(atomic64_t *v)
3465 {
3466 #if defined(arch_atomic64_fetch_dec_acquire)
3467 return arch_atomic64_fetch_dec_acquire(v);
3468 #elif defined(arch_atomic64_fetch_dec_relaxed)
3469 s64 ret = arch_atomic64_fetch_dec_relaxed(v);
3470 __atomic_acquire_fence();
3471 return ret;
3472 #elif defined(arch_atomic64_fetch_dec)
3473 return arch_atomic64_fetch_dec(v);
3474 #else
3475 return raw_atomic64_fetch_sub_acquire(1, v);
3476 #endif
3477 }
3478
3479 /**
3480 * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
3481 * @v: pointer to atomic64_t
3482 *
3483 * Atomically updates @v to (@v - 1) with release ordering.
3484 *
3485 * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
3486 *
3487 * Return: The original value of @v.
3488 */
3489 static __always_inline s64
raw_atomic64_fetch_dec_release(atomic64_t * v)3490 raw_atomic64_fetch_dec_release(atomic64_t *v)
3491 {
3492 #if defined(arch_atomic64_fetch_dec_release)
3493 return arch_atomic64_fetch_dec_release(v);
3494 #elif defined(arch_atomic64_fetch_dec_relaxed)
3495 __atomic_release_fence();
3496 return arch_atomic64_fetch_dec_relaxed(v);
3497 #elif defined(arch_atomic64_fetch_dec)
3498 return arch_atomic64_fetch_dec(v);
3499 #else
3500 return raw_atomic64_fetch_sub_release(1, v);
3501 #endif
3502 }
3503
3504 /**
3505 * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
3506 * @v: pointer to atomic64_t
3507 *
3508 * Atomically updates @v to (@v - 1) with relaxed ordering.
3509 *
3510 * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
3511 *
3512 * Return: The original value of @v.
3513 */
3514 static __always_inline s64
raw_atomic64_fetch_dec_relaxed(atomic64_t * v)3515 raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
3516 {
3517 #if defined(arch_atomic64_fetch_dec_relaxed)
3518 return arch_atomic64_fetch_dec_relaxed(v);
3519 #elif defined(arch_atomic64_fetch_dec)
3520 return arch_atomic64_fetch_dec(v);
3521 #else
3522 return raw_atomic64_fetch_sub_relaxed(1, v);
3523 #endif
3524 }
3525
3526 /**
3527 * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
3528 * @i: s64 value
3529 * @v: pointer to atomic64_t
3530 *
3531 * Atomically updates @v to (@v & @i) with relaxed ordering.
3532 *
3533 * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
3534 *
3535 * Return: Nothing.
3536 */
3537 static __always_inline void
raw_atomic64_and(s64 i,atomic64_t * v)3538 raw_atomic64_and(s64 i, atomic64_t *v)
3539 {
3540 arch_atomic64_and(i, v);
3541 }
3542
3543 /**
3544 * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
3545 * @i: s64 value
3546 * @v: pointer to atomic64_t
3547 *
3548 * Atomically updates @v to (@v & @i) with full ordering.
3549 *
3550 * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
3551 *
3552 * Return: The original value of @v.
3553 */
3554 static __always_inline s64
raw_atomic64_fetch_and(s64 i,atomic64_t * v)3555 raw_atomic64_fetch_and(s64 i, atomic64_t *v)
3556 {
3557 #if defined(arch_atomic64_fetch_and)
3558 return arch_atomic64_fetch_and(i, v);
3559 #elif defined(arch_atomic64_fetch_and_relaxed)
3560 s64 ret;
3561 __atomic_pre_full_fence();
3562 ret = arch_atomic64_fetch_and_relaxed(i, v);
3563 __atomic_post_full_fence();
3564 return ret;
3565 #else
3566 #error "Unable to define raw_atomic64_fetch_and"
3567 #endif
3568 }
3569
3570 /**
3571 * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
3572 * @i: s64 value
3573 * @v: pointer to atomic64_t
3574 *
3575 * Atomically updates @v to (@v & @i) with acquire ordering.
3576 *
3577 * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
3578 *
3579 * Return: The original value of @v.
3580 */
3581 static __always_inline s64
raw_atomic64_fetch_and_acquire(s64 i,atomic64_t * v)3582 raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3583 {
3584 #if defined(arch_atomic64_fetch_and_acquire)
3585 return arch_atomic64_fetch_and_acquire(i, v);
3586 #elif defined(arch_atomic64_fetch_and_relaxed)
3587 s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3588 __atomic_acquire_fence();
3589 return ret;
3590 #elif defined(arch_atomic64_fetch_and)
3591 return arch_atomic64_fetch_and(i, v);
3592 #else
3593 #error "Unable to define raw_atomic64_fetch_and_acquire"
3594 #endif
3595 }
3596
3597 /**
3598 * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
3599 * @i: s64 value
3600 * @v: pointer to atomic64_t
3601 *
3602 * Atomically updates @v to (@v & @i) with release ordering.
3603 *
3604 * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
3605 *
3606 * Return: The original value of @v.
3607 */
3608 static __always_inline s64
raw_atomic64_fetch_and_release(s64 i,atomic64_t * v)3609 raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3610 {
3611 #if defined(arch_atomic64_fetch_and_release)
3612 return arch_atomic64_fetch_and_release(i, v);
3613 #elif defined(arch_atomic64_fetch_and_relaxed)
3614 __atomic_release_fence();
3615 return arch_atomic64_fetch_and_relaxed(i, v);
3616 #elif defined(arch_atomic64_fetch_and)
3617 return arch_atomic64_fetch_and(i, v);
3618 #else
3619 #error "Unable to define raw_atomic64_fetch_and_release"
3620 #endif
3621 }
3622
3623 /**
3624 * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
3625 * @i: s64 value
3626 * @v: pointer to atomic64_t
3627 *
3628 * Atomically updates @v to (@v & @i) with relaxed ordering.
3629 *
3630 * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
3631 *
3632 * Return: The original value of @v.
3633 */
3634 static __always_inline s64
raw_atomic64_fetch_and_relaxed(s64 i,atomic64_t * v)3635 raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
3636 {
3637 #if defined(arch_atomic64_fetch_and_relaxed)
3638 return arch_atomic64_fetch_and_relaxed(i, v);
3639 #elif defined(arch_atomic64_fetch_and)
3640 return arch_atomic64_fetch_and(i, v);
3641 #else
3642 #error "Unable to define raw_atomic64_fetch_and_relaxed"
3643 #endif
3644 }
3645
3646 /**
3647 * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
3648 * @i: s64 value
3649 * @v: pointer to atomic64_t
3650 *
3651 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3652 *
3653 * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
3654 *
3655 * Return: Nothing.
3656 */
3657 static __always_inline void
raw_atomic64_andnot(s64 i,atomic64_t * v)3658 raw_atomic64_andnot(s64 i, atomic64_t *v)
3659 {
3660 #if defined(arch_atomic64_andnot)
3661 arch_atomic64_andnot(i, v);
3662 #else
3663 raw_atomic64_and(~i, v);
3664 #endif
3665 }
3666
3667 /**
3668 * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
3669 * @i: s64 value
3670 * @v: pointer to atomic64_t
3671 *
3672 * Atomically updates @v to (@v & ~@i) with full ordering.
3673 *
3674 * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
3675 *
3676 * Return: The original value of @v.
3677 */
3678 static __always_inline s64
raw_atomic64_fetch_andnot(s64 i,atomic64_t * v)3679 raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3680 {
3681 #if defined(arch_atomic64_fetch_andnot)
3682 return arch_atomic64_fetch_andnot(i, v);
3683 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3684 s64 ret;
3685 __atomic_pre_full_fence();
3686 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3687 __atomic_post_full_fence();
3688 return ret;
3689 #else
3690 return raw_atomic64_fetch_and(~i, v);
3691 #endif
3692 }
3693
3694 /**
3695 * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
3696 * @i: s64 value
3697 * @v: pointer to atomic64_t
3698 *
3699 * Atomically updates @v to (@v & ~@i) with acquire ordering.
3700 *
3701 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
3702 *
3703 * Return: The original value of @v.
3704 */
3705 static __always_inline s64
raw_atomic64_fetch_andnot_acquire(s64 i,atomic64_t * v)3706 raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3707 {
3708 #if defined(arch_atomic64_fetch_andnot_acquire)
3709 return arch_atomic64_fetch_andnot_acquire(i, v);
3710 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3711 s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3712 __atomic_acquire_fence();
3713 return ret;
3714 #elif defined(arch_atomic64_fetch_andnot)
3715 return arch_atomic64_fetch_andnot(i, v);
3716 #else
3717 return raw_atomic64_fetch_and_acquire(~i, v);
3718 #endif
3719 }
3720
3721 /**
3722 * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
3723 * @i: s64 value
3724 * @v: pointer to atomic64_t
3725 *
3726 * Atomically updates @v to (@v & ~@i) with release ordering.
3727 *
3728 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
3729 *
3730 * Return: The original value of @v.
3731 */
3732 static __always_inline s64
raw_atomic64_fetch_andnot_release(s64 i,atomic64_t * v)3733 raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3734 {
3735 #if defined(arch_atomic64_fetch_andnot_release)
3736 return arch_atomic64_fetch_andnot_release(i, v);
3737 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3738 __atomic_release_fence();
3739 return arch_atomic64_fetch_andnot_relaxed(i, v);
3740 #elif defined(arch_atomic64_fetch_andnot)
3741 return arch_atomic64_fetch_andnot(i, v);
3742 #else
3743 return raw_atomic64_fetch_and_release(~i, v);
3744 #endif
3745 }
3746
3747 /**
3748 * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
3749 * @i: s64 value
3750 * @v: pointer to atomic64_t
3751 *
3752 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3753 *
3754 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
3755 *
3756 * Return: The original value of @v.
3757 */
3758 static __always_inline s64
raw_atomic64_fetch_andnot_relaxed(s64 i,atomic64_t * v)3759 raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
3760 {
3761 #if defined(arch_atomic64_fetch_andnot_relaxed)
3762 return arch_atomic64_fetch_andnot_relaxed(i, v);
3763 #elif defined(arch_atomic64_fetch_andnot)
3764 return arch_atomic64_fetch_andnot(i, v);
3765 #else
3766 return raw_atomic64_fetch_and_relaxed(~i, v);
3767 #endif
3768 }
3769
3770 /**
3771 * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
3772 * @i: s64 value
3773 * @v: pointer to atomic64_t
3774 *
3775 * Atomically updates @v to (@v | @i) with relaxed ordering.
3776 *
3777 * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
3778 *
3779 * Return: Nothing.
3780 */
3781 static __always_inline void
raw_atomic64_or(s64 i,atomic64_t * v)3782 raw_atomic64_or(s64 i, atomic64_t *v)
3783 {
3784 arch_atomic64_or(i, v);
3785 }
3786
3787 /**
3788 * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
3789 * @i: s64 value
3790 * @v: pointer to atomic64_t
3791 *
3792 * Atomically updates @v to (@v | @i) with full ordering.
3793 *
3794 * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
3795 *
3796 * Return: The original value of @v.
3797 */
3798 static __always_inline s64
raw_atomic64_fetch_or(s64 i,atomic64_t * v)3799 raw_atomic64_fetch_or(s64 i, atomic64_t *v)
3800 {
3801 #if defined(arch_atomic64_fetch_or)
3802 return arch_atomic64_fetch_or(i, v);
3803 #elif defined(arch_atomic64_fetch_or_relaxed)
3804 s64 ret;
3805 __atomic_pre_full_fence();
3806 ret = arch_atomic64_fetch_or_relaxed(i, v);
3807 __atomic_post_full_fence();
3808 return ret;
3809 #else
3810 #error "Unable to define raw_atomic64_fetch_or"
3811 #endif
3812 }
3813
3814 /**
3815 * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
3816 * @i: s64 value
3817 * @v: pointer to atomic64_t
3818 *
3819 * Atomically updates @v to (@v | @i) with acquire ordering.
3820 *
3821 * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
3822 *
3823 * Return: The original value of @v.
3824 */
3825 static __always_inline s64
raw_atomic64_fetch_or_acquire(s64 i,atomic64_t * v)3826 raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3827 {
3828 #if defined(arch_atomic64_fetch_or_acquire)
3829 return arch_atomic64_fetch_or_acquire(i, v);
3830 #elif defined(arch_atomic64_fetch_or_relaxed)
3831 s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3832 __atomic_acquire_fence();
3833 return ret;
3834 #elif defined(arch_atomic64_fetch_or)
3835 return arch_atomic64_fetch_or(i, v);
3836 #else
3837 #error "Unable to define raw_atomic64_fetch_or_acquire"
3838 #endif
3839 }
3840
3841 /**
3842 * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
3843 * @i: s64 value
3844 * @v: pointer to atomic64_t
3845 *
3846 * Atomically updates @v to (@v | @i) with release ordering.
3847 *
3848 * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
3849 *
3850 * Return: The original value of @v.
3851 */
3852 static __always_inline s64
raw_atomic64_fetch_or_release(s64 i,atomic64_t * v)3853 raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3854 {
3855 #if defined(arch_atomic64_fetch_or_release)
3856 return arch_atomic64_fetch_or_release(i, v);
3857 #elif defined(arch_atomic64_fetch_or_relaxed)
3858 __atomic_release_fence();
3859 return arch_atomic64_fetch_or_relaxed(i, v);
3860 #elif defined(arch_atomic64_fetch_or)
3861 return arch_atomic64_fetch_or(i, v);
3862 #else
3863 #error "Unable to define raw_atomic64_fetch_or_release"
3864 #endif
3865 }
3866
3867 /**
3868 * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
3869 * @i: s64 value
3870 * @v: pointer to atomic64_t
3871 *
3872 * Atomically updates @v to (@v | @i) with relaxed ordering.
3873 *
3874 * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
3875 *
3876 * Return: The original value of @v.
3877 */
3878 static __always_inline s64
raw_atomic64_fetch_or_relaxed(s64 i,atomic64_t * v)3879 raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
3880 {
3881 #if defined(arch_atomic64_fetch_or_relaxed)
3882 return arch_atomic64_fetch_or_relaxed(i, v);
3883 #elif defined(arch_atomic64_fetch_or)
3884 return arch_atomic64_fetch_or(i, v);
3885 #else
3886 #error "Unable to define raw_atomic64_fetch_or_relaxed"
3887 #endif
3888 }
3889
3890 /**
3891 * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
3892 * @i: s64 value
3893 * @v: pointer to atomic64_t
3894 *
3895 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3896 *
3897 * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
3898 *
3899 * Return: Nothing.
3900 */
3901 static __always_inline void
raw_atomic64_xor(s64 i,atomic64_t * v)3902 raw_atomic64_xor(s64 i, atomic64_t *v)
3903 {
3904 arch_atomic64_xor(i, v);
3905 }
3906
3907 /**
3908 * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
3909 * @i: s64 value
3910 * @v: pointer to atomic64_t
3911 *
3912 * Atomically updates @v to (@v ^ @i) with full ordering.
3913 *
3914 * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
3915 *
3916 * Return: The original value of @v.
3917 */
3918 static __always_inline s64
raw_atomic64_fetch_xor(s64 i,atomic64_t * v)3919 raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
3920 {
3921 #if defined(arch_atomic64_fetch_xor)
3922 return arch_atomic64_fetch_xor(i, v);
3923 #elif defined(arch_atomic64_fetch_xor_relaxed)
3924 s64 ret;
3925 __atomic_pre_full_fence();
3926 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3927 __atomic_post_full_fence();
3928 return ret;
3929 #else
3930 #error "Unable to define raw_atomic64_fetch_xor"
3931 #endif
3932 }
3933
3934 /**
3935 * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
3936 * @i: s64 value
3937 * @v: pointer to atomic64_t
3938 *
3939 * Atomically updates @v to (@v ^ @i) with acquire ordering.
3940 *
3941 * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
3942 *
3943 * Return: The original value of @v.
3944 */
3945 static __always_inline s64
raw_atomic64_fetch_xor_acquire(s64 i,atomic64_t * v)3946 raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3947 {
3948 #if defined(arch_atomic64_fetch_xor_acquire)
3949 return arch_atomic64_fetch_xor_acquire(i, v);
3950 #elif defined(arch_atomic64_fetch_xor_relaxed)
3951 s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3952 __atomic_acquire_fence();
3953 return ret;
3954 #elif defined(arch_atomic64_fetch_xor)
3955 return arch_atomic64_fetch_xor(i, v);
3956 #else
3957 #error "Unable to define raw_atomic64_fetch_xor_acquire"
3958 #endif
3959 }
3960
3961 /**
3962 * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
3963 * @i: s64 value
3964 * @v: pointer to atomic64_t
3965 *
3966 * Atomically updates @v to (@v ^ @i) with release ordering.
3967 *
3968 * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
3969 *
3970 * Return: The original value of @v.
3971 */
3972 static __always_inline s64
raw_atomic64_fetch_xor_release(s64 i,atomic64_t * v)3973 raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3974 {
3975 #if defined(arch_atomic64_fetch_xor_release)
3976 return arch_atomic64_fetch_xor_release(i, v);
3977 #elif defined(arch_atomic64_fetch_xor_relaxed)
3978 __atomic_release_fence();
3979 return arch_atomic64_fetch_xor_relaxed(i, v);
3980 #elif defined(arch_atomic64_fetch_xor)
3981 return arch_atomic64_fetch_xor(i, v);
3982 #else
3983 #error "Unable to define raw_atomic64_fetch_xor_release"
3984 #endif
3985 }
3986
3987 /**
3988 * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
3989 * @i: s64 value
3990 * @v: pointer to atomic64_t
3991 *
3992 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3993 *
3994 * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
3995 *
3996 * Return: The original value of @v.
3997 */
3998 static __always_inline s64
raw_atomic64_fetch_xor_relaxed(s64 i,atomic64_t * v)3999 raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
4000 {
4001 #if defined(arch_atomic64_fetch_xor_relaxed)
4002 return arch_atomic64_fetch_xor_relaxed(i, v);
4003 #elif defined(arch_atomic64_fetch_xor)
4004 return arch_atomic64_fetch_xor(i, v);
4005 #else
4006 #error "Unable to define raw_atomic64_fetch_xor_relaxed"
4007 #endif
4008 }
4009
4010 /**
4011 * raw_atomic64_xchg() - atomic exchange with full ordering
4012 * @v: pointer to atomic64_t
4013 * @new: s64 value to assign
4014 *
4015 * Atomically updates @v to @new with full ordering.
4016 *
4017 * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
4018 *
4019 * Return: The original value of @v.
4020 */
4021 static __always_inline s64
raw_atomic64_xchg(atomic64_t * v,s64 new)4022 raw_atomic64_xchg(atomic64_t *v, s64 new)
4023 {
4024 #if defined(arch_atomic64_xchg)
4025 return arch_atomic64_xchg(v, new);
4026 #elif defined(arch_atomic64_xchg_relaxed)
4027 s64 ret;
4028 __atomic_pre_full_fence();
4029 ret = arch_atomic64_xchg_relaxed(v, new);
4030 __atomic_post_full_fence();
4031 return ret;
4032 #else
4033 return raw_xchg(&v->counter, new);
4034 #endif
4035 }
4036
4037 /**
4038 * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
4039 * @v: pointer to atomic64_t
4040 * @new: s64 value to assign
4041 *
4042 * Atomically updates @v to @new with acquire ordering.
4043 *
4044 * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
4045 *
4046 * Return: The original value of @v.
4047 */
4048 static __always_inline s64
raw_atomic64_xchg_acquire(atomic64_t * v,s64 new)4049 raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
4050 {
4051 #if defined(arch_atomic64_xchg_acquire)
4052 return arch_atomic64_xchg_acquire(v, new);
4053 #elif defined(arch_atomic64_xchg_relaxed)
4054 s64 ret = arch_atomic64_xchg_relaxed(v, new);
4055 __atomic_acquire_fence();
4056 return ret;
4057 #elif defined(arch_atomic64_xchg)
4058 return arch_atomic64_xchg(v, new);
4059 #else
4060 return raw_xchg_acquire(&v->counter, new);
4061 #endif
4062 }
4063
4064 /**
4065 * raw_atomic64_xchg_release() - atomic exchange with release ordering
4066 * @v: pointer to atomic64_t
4067 * @new: s64 value to assign
4068 *
4069 * Atomically updates @v to @new with release ordering.
4070 *
4071 * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
4072 *
4073 * Return: The original value of @v.
4074 */
4075 static __always_inline s64
raw_atomic64_xchg_release(atomic64_t * v,s64 new)4076 raw_atomic64_xchg_release(atomic64_t *v, s64 new)
4077 {
4078 #if defined(arch_atomic64_xchg_release)
4079 return arch_atomic64_xchg_release(v, new);
4080 #elif defined(arch_atomic64_xchg_relaxed)
4081 __atomic_release_fence();
4082 return arch_atomic64_xchg_relaxed(v, new);
4083 #elif defined(arch_atomic64_xchg)
4084 return arch_atomic64_xchg(v, new);
4085 #else
4086 return raw_xchg_release(&v->counter, new);
4087 #endif
4088 }
4089
4090 /**
4091 * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
4092 * @v: pointer to atomic64_t
4093 * @new: s64 value to assign
4094 *
4095 * Atomically updates @v to @new with relaxed ordering.
4096 *
4097 * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
4098 *
4099 * Return: The original value of @v.
4100 */
4101 static __always_inline s64
raw_atomic64_xchg_relaxed(atomic64_t * v,s64 new)4102 raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
4103 {
4104 #if defined(arch_atomic64_xchg_relaxed)
4105 return arch_atomic64_xchg_relaxed(v, new);
4106 #elif defined(arch_atomic64_xchg)
4107 return arch_atomic64_xchg(v, new);
4108 #else
4109 return raw_xchg_relaxed(&v->counter, new);
4110 #endif
4111 }
4112
4113 /**
4114 * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
4115 * @v: pointer to atomic64_t
4116 * @old: s64 value to compare with
4117 * @new: s64 value to assign
4118 *
4119 * If (@v == @old), atomically updates @v to @new with full ordering.
4120 *
4121 * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
4122 *
4123 * Return: The original value of @v.
4124 */
4125 static __always_inline s64
raw_atomic64_cmpxchg(atomic64_t * v,s64 old,s64 new)4126 raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4127 {
4128 #if defined(arch_atomic64_cmpxchg)
4129 return arch_atomic64_cmpxchg(v, old, new);
4130 #elif defined(arch_atomic64_cmpxchg_relaxed)
4131 s64 ret;
4132 __atomic_pre_full_fence();
4133 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4134 __atomic_post_full_fence();
4135 return ret;
4136 #else
4137 return raw_cmpxchg(&v->counter, old, new);
4138 #endif
4139 }
4140
4141 /**
4142 * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4143 * @v: pointer to atomic64_t
4144 * @old: s64 value to compare with
4145 * @new: s64 value to assign
4146 *
4147 * If (@v == @old), atomically updates @v to @new with acquire ordering.
4148 *
4149 * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
4150 *
4151 * Return: The original value of @v.
4152 */
4153 static __always_inline s64
raw_atomic64_cmpxchg_acquire(atomic64_t * v,s64 old,s64 new)4154 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4155 {
4156 #if defined(arch_atomic64_cmpxchg_acquire)
4157 return arch_atomic64_cmpxchg_acquire(v, old, new);
4158 #elif defined(arch_atomic64_cmpxchg_relaxed)
4159 s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4160 __atomic_acquire_fence();
4161 return ret;
4162 #elif defined(arch_atomic64_cmpxchg)
4163 return arch_atomic64_cmpxchg(v, old, new);
4164 #else
4165 return raw_cmpxchg_acquire(&v->counter, old, new);
4166 #endif
4167 }
4168
4169 /**
4170 * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
4171 * @v: pointer to atomic64_t
4172 * @old: s64 value to compare with
4173 * @new: s64 value to assign
4174 *
4175 * If (@v == @old), atomically updates @v to @new with release ordering.
4176 *
4177 * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
4178 *
4179 * Return: The original value of @v.
4180 */
4181 static __always_inline s64
raw_atomic64_cmpxchg_release(atomic64_t * v,s64 old,s64 new)4182 raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4183 {
4184 #if defined(arch_atomic64_cmpxchg_release)
4185 return arch_atomic64_cmpxchg_release(v, old, new);
4186 #elif defined(arch_atomic64_cmpxchg_relaxed)
4187 __atomic_release_fence();
4188 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4189 #elif defined(arch_atomic64_cmpxchg)
4190 return arch_atomic64_cmpxchg(v, old, new);
4191 #else
4192 return raw_cmpxchg_release(&v->counter, old, new);
4193 #endif
4194 }
4195
4196 /**
4197 * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4198 * @v: pointer to atomic64_t
4199 * @old: s64 value to compare with
4200 * @new: s64 value to assign
4201 *
4202 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4203 *
4204 * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
4205 *
4206 * Return: The original value of @v.
4207 */
4208 static __always_inline s64
raw_atomic64_cmpxchg_relaxed(atomic64_t * v,s64 old,s64 new)4209 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
4210 {
4211 #if defined(arch_atomic64_cmpxchg_relaxed)
4212 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4213 #elif defined(arch_atomic64_cmpxchg)
4214 return arch_atomic64_cmpxchg(v, old, new);
4215 #else
4216 return raw_cmpxchg_relaxed(&v->counter, old, new);
4217 #endif
4218 }
4219
4220 /**
4221 * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
4222 * @v: pointer to atomic64_t
4223 * @old: pointer to s64 value to compare with
4224 * @new: s64 value to assign
4225 *
4226 * If (@v == @old), atomically updates @v to @new with full ordering.
4227 * Otherwise, updates @old to the current value of @v.
4228 *
4229 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
4230 *
4231 * Return: @true if the exchange occured, @false otherwise.
4232 */
4233 static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t * v,s64 * old,s64 new)4234 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4235 {
4236 #if defined(arch_atomic64_try_cmpxchg)
4237 return arch_atomic64_try_cmpxchg(v, old, new);
4238 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4239 bool ret;
4240 __atomic_pre_full_fence();
4241 ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4242 __atomic_post_full_fence();
4243 return ret;
4244 #else
4245 s64 r, o = *old;
4246 r = raw_atomic64_cmpxchg(v, o, new);
4247 if (unlikely(r != o))
4248 *old = r;
4249 return likely(r == o);
4250 #endif
4251 }
4252
4253 /**
4254 * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4255 * @v: pointer to atomic64_t
4256 * @old: pointer to s64 value to compare with
4257 * @new: s64 value to assign
4258 *
4259 * If (@v == @old), atomically updates @v to @new with acquire ordering.
4260 * Otherwise, updates @old to the current value of @v.
4261 *
4262 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
4263 *
4264 * Return: @true if the exchange occured, @false otherwise.
4265 */
4266 static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t * v,s64 * old,s64 new)4267 raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4268 {
4269 #if defined(arch_atomic64_try_cmpxchg_acquire)
4270 return arch_atomic64_try_cmpxchg_acquire(v, old, new);
4271 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4272 bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4273 __atomic_acquire_fence();
4274 return ret;
4275 #elif defined(arch_atomic64_try_cmpxchg)
4276 return arch_atomic64_try_cmpxchg(v, old, new);
4277 #else
4278 s64 r, o = *old;
4279 r = raw_atomic64_cmpxchg_acquire(v, o, new);
4280 if (unlikely(r != o))
4281 *old = r;
4282 return likely(r == o);
4283 #endif
4284 }
4285
4286 /**
4287 * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
4288 * @v: pointer to atomic64_t
4289 * @old: pointer to s64 value to compare with
4290 * @new: s64 value to assign
4291 *
4292 * If (@v == @old), atomically updates @v to @new with release ordering.
4293 * Otherwise, updates @old to the current value of @v.
4294 *
4295 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
4296 *
4297 * Return: @true if the exchange occured, @false otherwise.
4298 */
4299 static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t * v,s64 * old,s64 new)4300 raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4301 {
4302 #if defined(arch_atomic64_try_cmpxchg_release)
4303 return arch_atomic64_try_cmpxchg_release(v, old, new);
4304 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4305 __atomic_release_fence();
4306 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4307 #elif defined(arch_atomic64_try_cmpxchg)
4308 return arch_atomic64_try_cmpxchg(v, old, new);
4309 #else
4310 s64 r, o = *old;
4311 r = raw_atomic64_cmpxchg_release(v, o, new);
4312 if (unlikely(r != o))
4313 *old = r;
4314 return likely(r == o);
4315 #endif
4316 }
4317
4318 /**
4319 * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4320 * @v: pointer to atomic64_t
4321 * @old: pointer to s64 value to compare with
4322 * @new: s64 value to assign
4323 *
4324 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4325 * Otherwise, updates @old to the current value of @v.
4326 *
4327 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
4328 *
4329 * Return: @true if the exchange occured, @false otherwise.
4330 */
4331 static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t * v,s64 * old,s64 new)4332 raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
4333 {
4334 #if defined(arch_atomic64_try_cmpxchg_relaxed)
4335 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4336 #elif defined(arch_atomic64_try_cmpxchg)
4337 return arch_atomic64_try_cmpxchg(v, old, new);
4338 #else
4339 s64 r, o = *old;
4340 r = raw_atomic64_cmpxchg_relaxed(v, o, new);
4341 if (unlikely(r != o))
4342 *old = r;
4343 return likely(r == o);
4344 #endif
4345 }
4346
4347 /**
4348 * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
4349 * @i: s64 value to add
4350 * @v: pointer to atomic64_t
4351 *
4352 * Atomically updates @v to (@v - @i) with full ordering.
4353 *
4354 * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
4355 *
4356 * Return: @true if the resulting value of @v is zero, @false otherwise.
4357 */
4358 static __always_inline bool
raw_atomic64_sub_and_test(s64 i,atomic64_t * v)4359 raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
4360 {
4361 #if defined(arch_atomic64_sub_and_test)
4362 return arch_atomic64_sub_and_test(i, v);
4363 #else
4364 return raw_atomic64_sub_return(i, v) == 0;
4365 #endif
4366 }
4367
4368 /**
4369 * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
4370 * @v: pointer to atomic64_t
4371 *
4372 * Atomically updates @v to (@v - 1) with full ordering.
4373 *
4374 * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
4375 *
4376 * Return: @true if the resulting value of @v is zero, @false otherwise.
4377 */
4378 static __always_inline bool
raw_atomic64_dec_and_test(atomic64_t * v)4379 raw_atomic64_dec_and_test(atomic64_t *v)
4380 {
4381 #if defined(arch_atomic64_dec_and_test)
4382 return arch_atomic64_dec_and_test(v);
4383 #else
4384 return raw_atomic64_dec_return(v) == 0;
4385 #endif
4386 }
4387
4388 /**
4389 * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
4390 * @v: pointer to atomic64_t
4391 *
4392 * Atomically updates @v to (@v + 1) with full ordering.
4393 *
4394 * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
4395 *
4396 * Return: @true if the resulting value of @v is zero, @false otherwise.
4397 */
4398 static __always_inline bool
raw_atomic64_inc_and_test(atomic64_t * v)4399 raw_atomic64_inc_and_test(atomic64_t *v)
4400 {
4401 #if defined(arch_atomic64_inc_and_test)
4402 return arch_atomic64_inc_and_test(v);
4403 #else
4404 return raw_atomic64_inc_return(v) == 0;
4405 #endif
4406 }
4407
4408 /**
4409 * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
4410 * @i: s64 value to add
4411 * @v: pointer to atomic64_t
4412 *
4413 * Atomically updates @v to (@v + @i) with full ordering.
4414 *
4415 * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
4416 *
4417 * Return: @true if the resulting value of @v is negative, @false otherwise.
4418 */
4419 static __always_inline bool
raw_atomic64_add_negative(s64 i,atomic64_t * v)4420 raw_atomic64_add_negative(s64 i, atomic64_t *v)
4421 {
4422 #if defined(arch_atomic64_add_negative)
4423 return arch_atomic64_add_negative(i, v);
4424 #elif defined(arch_atomic64_add_negative_relaxed)
4425 bool ret;
4426 __atomic_pre_full_fence();
4427 ret = arch_atomic64_add_negative_relaxed(i, v);
4428 __atomic_post_full_fence();
4429 return ret;
4430 #else
4431 return raw_atomic64_add_return(i, v) < 0;
4432 #endif
4433 }
4434
4435 /**
4436 * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
4437 * @i: s64 value to add
4438 * @v: pointer to atomic64_t
4439 *
4440 * Atomically updates @v to (@v + @i) with acquire ordering.
4441 *
4442 * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
4443 *
4444 * Return: @true if the resulting value of @v is negative, @false otherwise.
4445 */
4446 static __always_inline bool
raw_atomic64_add_negative_acquire(s64 i,atomic64_t * v)4447 raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
4448 {
4449 #if defined(arch_atomic64_add_negative_acquire)
4450 return arch_atomic64_add_negative_acquire(i, v);
4451 #elif defined(arch_atomic64_add_negative_relaxed)
4452 bool ret = arch_atomic64_add_negative_relaxed(i, v);
4453 __atomic_acquire_fence();
4454 return ret;
4455 #elif defined(arch_atomic64_add_negative)
4456 return arch_atomic64_add_negative(i, v);
4457 #else
4458 return raw_atomic64_add_return_acquire(i, v) < 0;
4459 #endif
4460 }
4461
4462 /**
4463 * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
4464 * @i: s64 value to add
4465 * @v: pointer to atomic64_t
4466 *
4467 * Atomically updates @v to (@v + @i) with release ordering.
4468 *
4469 * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
4470 *
4471 * Return: @true if the resulting value of @v is negative, @false otherwise.
4472 */
4473 static __always_inline bool
raw_atomic64_add_negative_release(s64 i,atomic64_t * v)4474 raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
4475 {
4476 #if defined(arch_atomic64_add_negative_release)
4477 return arch_atomic64_add_negative_release(i, v);
4478 #elif defined(arch_atomic64_add_negative_relaxed)
4479 __atomic_release_fence();
4480 return arch_atomic64_add_negative_relaxed(i, v);
4481 #elif defined(arch_atomic64_add_negative)
4482 return arch_atomic64_add_negative(i, v);
4483 #else
4484 return raw_atomic64_add_return_release(i, v) < 0;
4485 #endif
4486 }
4487
4488 /**
4489 * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
4490 * @i: s64 value to add
4491 * @v: pointer to atomic64_t
4492 *
4493 * Atomically updates @v to (@v + @i) with relaxed ordering.
4494 *
4495 * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
4496 *
4497 * Return: @true if the resulting value of @v is negative, @false otherwise.
4498 */
4499 static __always_inline bool
raw_atomic64_add_negative_relaxed(s64 i,atomic64_t * v)4500 raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
4501 {
4502 #if defined(arch_atomic64_add_negative_relaxed)
4503 return arch_atomic64_add_negative_relaxed(i, v);
4504 #elif defined(arch_atomic64_add_negative)
4505 return arch_atomic64_add_negative(i, v);
4506 #else
4507 return raw_atomic64_add_return_relaxed(i, v) < 0;
4508 #endif
4509 }
4510
4511 /**
4512 * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
4513 * @v: pointer to atomic64_t
4514 * @a: s64 value to add
4515 * @u: s64 value to compare with
4516 *
4517 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4518 *
4519 * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
4520 *
4521 * Return: The original value of @v.
4522 */
4523 static __always_inline s64
raw_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)4524 raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
4525 {
4526 #if defined(arch_atomic64_fetch_add_unless)
4527 return arch_atomic64_fetch_add_unless(v, a, u);
4528 #else
4529 s64 c = raw_atomic64_read(v);
4530
4531 do {
4532 if (unlikely(c == u))
4533 break;
4534 } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
4535
4536 return c;
4537 #endif
4538 }
4539
4540 /**
4541 * raw_atomic64_add_unless() - atomic add unless value with full ordering
4542 * @v: pointer to atomic64_t
4543 * @a: s64 value to add
4544 * @u: s64 value to compare with
4545 *
4546 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4547 *
4548 * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
4549 *
4550 * Return: @true if @v was updated, @false otherwise.
4551 */
4552 static __always_inline bool
raw_atomic64_add_unless(atomic64_t * v,s64 a,s64 u)4553 raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4554 {
4555 #if defined(arch_atomic64_add_unless)
4556 return arch_atomic64_add_unless(v, a, u);
4557 #else
4558 return raw_atomic64_fetch_add_unless(v, a, u) != u;
4559 #endif
4560 }
4561
4562 /**
4563 * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
4564 * @v: pointer to atomic64_t
4565 *
4566 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
4567 *
4568 * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
4569 *
4570 * Return: @true if @v was updated, @false otherwise.
4571 */
4572 static __always_inline bool
raw_atomic64_inc_not_zero(atomic64_t * v)4573 raw_atomic64_inc_not_zero(atomic64_t *v)
4574 {
4575 #if defined(arch_atomic64_inc_not_zero)
4576 return arch_atomic64_inc_not_zero(v);
4577 #else
4578 return raw_atomic64_add_unless(v, 1, 0);
4579 #endif
4580 }
4581
4582 /**
4583 * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
4584 * @v: pointer to atomic64_t
4585 *
4586 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
4587 *
4588 * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
4589 *
4590 * Return: @true if @v was updated, @false otherwise.
4591 */
4592 static __always_inline bool
raw_atomic64_inc_unless_negative(atomic64_t * v)4593 raw_atomic64_inc_unless_negative(atomic64_t *v)
4594 {
4595 #if defined(arch_atomic64_inc_unless_negative)
4596 return arch_atomic64_inc_unless_negative(v);
4597 #else
4598 s64 c = raw_atomic64_read(v);
4599
4600 do {
4601 if (unlikely(c < 0))
4602 return false;
4603 } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
4604
4605 return true;
4606 #endif
4607 }
4608
4609 /**
4610 * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
4611 * @v: pointer to atomic64_t
4612 *
4613 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
4614 *
4615 * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
4616 *
4617 * Return: @true if @v was updated, @false otherwise.
4618 */
4619 static __always_inline bool
raw_atomic64_dec_unless_positive(atomic64_t * v)4620 raw_atomic64_dec_unless_positive(atomic64_t *v)
4621 {
4622 #if defined(arch_atomic64_dec_unless_positive)
4623 return arch_atomic64_dec_unless_positive(v);
4624 #else
4625 s64 c = raw_atomic64_read(v);
4626
4627 do {
4628 if (unlikely(c > 0))
4629 return false;
4630 } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
4631
4632 return true;
4633 #endif
4634 }
4635
4636 /**
4637 * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
4638 * @v: pointer to atomic64_t
4639 *
4640 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
4641 *
4642 * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
4643 *
4644 * Return: The old value of (@v - 1), regardless of whether @v was updated.
4645 */
4646 static __always_inline s64
raw_atomic64_dec_if_positive(atomic64_t * v)4647 raw_atomic64_dec_if_positive(atomic64_t *v)
4648 {
4649 #if defined(arch_atomic64_dec_if_positive)
4650 return arch_atomic64_dec_if_positive(v);
4651 #else
4652 s64 dec, c = raw_atomic64_read(v);
4653
4654 do {
4655 dec = c - 1;
4656 if (unlikely(dec < 0))
4657 break;
4658 } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
4659
4660 return dec;
4661 #endif
4662 }
4663
4664 #endif /* _LINUX_ATOMIC_FALLBACK_H */
4665 // eec048affea735b8464f58e6d96992101f8f85f1
4666