1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Taken from Linux commit 219d54332a09 ("Linux 5.4"), from the file 4 * tools/include/linux/compiler.h, with minor changes. 5 */ 6 #ifndef __LINUX_COMPILER_H 7 #define __LINUX_COMPILER_H 8 9 #ifndef __ASSEMBLY__ 10 11 #define GCC_VERSION (__GNUC__ * 10000 \ 12 + __GNUC_MINOR__ * 100 \ 13 + __GNUC_PATCHLEVEL__) 14 15 #ifdef __clang__ 16 #if __has_builtin(__builtin_add_overflow) && \ 17 __has_builtin(__builtin_sub_overflow) && \ 18 __has_builtin(__builtin_mul_overflow) 19 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 20 #define check_add_overflow(a, b) ({ \ 21 typeof((a) + (b)) __d; \ 22 __builtin_add_overflow(a, b, &__d); \ 23 }) 24 #define check_sub_overflow(a, b) ({ \ 25 typeof((a) - (b)) __d; \ 26 __builtin_sub_overflow(a, b, &__d); \ 27 }) 28 #define check_mul_overflow(a, b) ({ \ 29 typeof((a) * (b)) __d; \ 30 __builtin_mul_overflow(a, b, &__d); \ 31 }) 32 #endif 33 #elif GCC_VERSION >= 70100 34 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 35 #define check_add_overflow(a, b) __builtin_add_overflow_p(a, b, (typeof((a) + (b)))0) 36 #define check_sub_overflow(a, b) __builtin_sub_overflow_p(a, b, (typeof((a) - (b)))0) 37 #define check_mul_overflow(a, b) __builtin_mul_overflow_p(a, b, (typeof((a) * (b)))0) 38 #else 39 #define check_add_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; }) 40 #define check_sub_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; }) 41 #define check_mul_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; }) 42 #endif 43 44 #include <stdint.h> 45 46 #define barrier() asm volatile("" : : : "memory") 47 48 /* 49 * As glibc's sys/cdefs.h does, this undefines __always_inline because 50 * Linux's stddef.h kernel header also defines it in an incompatible 51 * way. 52 */ 53 #undef __always_inline 54 #define __always_inline __inline __attribute__ ((__always_inline__)) 55 56 #define noinline __attribute__((noinline)) 57 #define __unused __attribute__((__unused__)) 58 59 static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 60 { 61 switch (size) { 62 case 1: *(uint8_t *)res = *(volatile uint8_t *)p; break; 63 case 2: *(uint16_t *)res = *(volatile uint16_t *)p; break; 64 case 4: *(uint32_t *)res = *(volatile uint32_t *)p; break; 65 case 8: *(uint64_t *)res = *(volatile uint64_t *)p; break; 66 default: 67 barrier(); 68 __builtin_memcpy((void *)res, (const void *)p, size); 69 barrier(); 70 } 71 } 72 73 /* 74 * Prevent the compiler from merging or refetching reads or writes. The 75 * compiler is also forbidden from reordering successive instances of 76 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some 77 * particular ordering. One way to make the compiler aware of ordering is to 78 * put the two invocations of READ_ONCE or WRITE_ONCE in different C 79 * statements. 80 * 81 * These two macros will also work on aggregate data types like structs or 82 * unions. If the size of the accessed data type exceeds the word size of 83 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will 84 * fall back to memcpy and print a compile-time warning. 85 * 86 * Their two major use cases are: (1) Mediating communication between 87 * process-level code and irq/NMI handlers, all running on the same CPU, 88 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 89 * mutilate accesses that either do not require ordering or that interact 90 * with an explicit memory barrier or atomic instruction that provides the 91 * required ordering. 92 */ 93 94 #define READ_ONCE(x) \ 95 ({ \ 96 union { typeof(x) __val; char __c[1]; } __u = \ 97 { .__c = { 0 } }; \ 98 __read_once_size(&(x), __u.__c, sizeof(x)); \ 99 __u.__val; \ 100 }) 101 102 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 103 { 104 switch (size) { 105 case 1: *(volatile uint8_t *) p = *(uint8_t *) res; break; 106 case 2: *(volatile uint16_t *) p = *(uint16_t *) res; break; 107 case 4: *(volatile uint32_t *) p = *(uint32_t *) res; break; 108 case 8: *(volatile uint64_t *) p = *(uint64_t *) res; break; 109 default: 110 barrier(); 111 __builtin_memcpy((void *)p, (const void *)res, size); 112 barrier(); 113 } 114 } 115 116 #define WRITE_ONCE(x, val) \ 117 ({ \ 118 union { typeof(x) __val; char __c[1]; } __u = \ 119 { .__val = (val) }; \ 120 __write_once_size(&(x), __u.__c, sizeof(x)); \ 121 __u.__val; \ 122 }) 123 124 #endif /* !__ASSEMBLY__ */ 125 #endif /* !__LINUX_COMPILER_H */ 126