xref: /kvm-unit-tests/lib/linux/compiler.h (revision 1721daedbe11602b6d43d2e3512fb19456ea4f53)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Taken from Linux commit 219d54332a09 ("Linux 5.4"), from the file
4  * tools/include/linux/compiler.h, with minor changes.
5  */
6 #ifndef __LINUX_COMPILER_H
7 #define __LINUX_COMPILER_H
8 
9 #ifndef __ASSEMBLY__
10 
11 #define GCC_VERSION (__GNUC__ * 10000           \
12 		     + __GNUC_MINOR__ * 100     \
13 		     + __GNUC_PATCHLEVEL__)
14 
15 #ifdef __clang__
16 #if __has_builtin(__builtin_add_overflow) && \
17     __has_builtin(__builtin_sub_overflow) && \
18     __has_builtin(__builtin_mul_overflow)
19 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
20 #define check_add_overflow(a, b) ({			\
21 	typeof((a) + (b)) __d;				\
22 	__builtin_add_overflow(a, b, &__d);		\
23 })
24 #define check_sub_overflow(a, b) ({			\
25 	typeof((a) - (b)) __d;				\
26 	__builtin_sub_overflow(a, b, &__d);		\
27 })
28 #define check_mul_overflow(a, b) ({			\
29 	typeof((a) * (b)) __d;				\
30 	__builtin_mul_overflow(a, b, &__d);		\
31 })
32 #endif
33 #elif GCC_VERSION >= 70100
34 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
35 #define check_add_overflow(a, b) __builtin_add_overflow_p(a, b, (typeof((a) + (b)))0)
36 #define check_sub_overflow(a, b) __builtin_sub_overflow_p(a, b, (typeof((a) - (b)))0)
37 #define check_mul_overflow(a, b) __builtin_mul_overflow_p(a, b, (typeof((a) * (b)))0)
38 #else
39 #define check_add_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; })
40 #define check_sub_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; })
41 #define check_mul_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; })
42 #endif
43 
44 #include <stdint.h>
45 
46 #define barrier()	asm volatile("" : : : "memory")
47 
48 #define __always_inline	inline __attribute__((always_inline))
49 #define noinline __attribute__((noinline))
50 #define __unused __attribute__((__unused__))
51 
52 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
53 {
54 	switch (size) {
55 	case 1: *(uint8_t *)res = *(volatile uint8_t *)p; break;
56 	case 2: *(uint16_t *)res = *(volatile uint16_t *)p; break;
57 	case 4: *(uint32_t *)res = *(volatile uint32_t *)p; break;
58 	case 8: *(uint64_t *)res = *(volatile uint64_t *)p; break;
59 	default:
60 		barrier();
61 		__builtin_memcpy((void *)res, (const void *)p, size);
62 		barrier();
63 	}
64 }
65 
66 /*
67  * Prevent the compiler from merging or refetching reads or writes. The
68  * compiler is also forbidden from reordering successive instances of
69  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
70  * particular ordering. One way to make the compiler aware of ordering is to
71  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
72  * statements.
73  *
74  * These two macros will also work on aggregate data types like structs or
75  * unions. If the size of the accessed data type exceeds the word size of
76  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
77  * fall back to memcpy and print a compile-time warning.
78  *
79  * Their two major use cases are: (1) Mediating communication between
80  * process-level code and irq/NMI handlers, all running on the same CPU,
81  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
82  * mutilate accesses that either do not require ordering or that interact
83  * with an explicit memory barrier or atomic instruction that provides the
84  * required ordering.
85  */
86 
87 #define READ_ONCE(x)					\
88 ({							\
89 	union { typeof(x) __val; char __c[1]; } __u =	\
90 		{ .__c = { 0 } };			\
91 	__read_once_size(&(x), __u.__c, sizeof(x));	\
92 	__u.__val;					\
93 })
94 
95 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
96 {
97 	switch (size) {
98 	case 1: *(volatile uint8_t *) p = *(uint8_t  *) res; break;
99 	case 2: *(volatile uint16_t *) p = *(uint16_t *) res; break;
100 	case 4: *(volatile uint32_t *) p = *(uint32_t *) res; break;
101 	case 8: *(volatile uint64_t *) p = *(uint64_t *) res; break;
102 	default:
103 		barrier();
104 		__builtin_memcpy((void *)p, (const void *)res, size);
105 		barrier();
106 	}
107 }
108 
109 #define WRITE_ONCE(x, val)				\
110 ({							\
111 	union { typeof(x) __val; char __c[1]; } __u =	\
112 		{ .__val = (val) }; 			\
113 	__write_once_size(&(x), __u.__c, sizeof(x));	\
114 	__u.__val;					\
115 })
116 
117 #endif /* !__ASSEMBLY__ */
118 #endif /* !__LINUX_COMPILER_H */
119