1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Taken from Linux commit 219d54332a09 ("Linux 5.4"), from the file 4 * tools/include/linux/compiler.h, with minor changes. 5 */ 6 #ifndef __LINUX_COMPILER_H 7 #define __LINUX_COMPILER_H 8 9 #ifndef __ASSEMBLY__ 10 11 #define GCC_VERSION (__GNUC__ * 10000 \ 12 + __GNUC_MINOR__ * 100 \ 13 + __GNUC_PATCHLEVEL__) 14 15 #ifdef __clang__ 16 #if __has_builtin(__builtin_add_overflow) && \ 17 __has_builtin(__builtin_sub_overflow) && \ 18 __has_builtin(__builtin_mul_overflow) 19 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 20 #define check_add_overflow(a, b) ({ \ 21 typeof((a) + (b)) __d; \ 22 __builtin_add_overflow(a, b, &__d); \ 23 }) 24 #define check_sub_overflow(a, b) ({ \ 25 typeof((a) - (b)) __d; \ 26 __builtin_sub_overflow(a, b, &__d); \ 27 }) 28 #define check_mul_overflow(a, b) ({ \ 29 typeof((a) * (b)) __d; \ 30 __builtin_mul_overflow(a, b, &__d); \ 31 }) 32 #endif 33 #elif GCC_VERSION >= 50100 34 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 35 #define check_add_overflow(a, b) __builtin_add_overflow_p(a, b, (typeof((a) + (b)))0) 36 #define check_sub_overflow(a, b) __builtin_add_overflow_p(a, b, (typeof((a) - (b)))0) 37 #define check_mul_overflow(a, b) __builtin_add_overflow_p(a, b, (typeof((a) * (b)))0) 38 #else 39 #define check_add_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; }) 40 #define check_sub_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; }) 41 #define check_mul_overflow(a, b) ({ (void)((int)(a) == (int)(b)); 0; }) 42 #endif 43 44 #include <stdint.h> 45 46 #define barrier() asm volatile("" : : : "memory") 47 48 #define __always_inline inline __attribute__((always_inline)) 49 50 static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 51 { 52 switch (size) { 53 case 1: *(uint8_t *)res = *(volatile uint8_t *)p; break; 54 case 2: *(uint16_t *)res = *(volatile uint16_t *)p; break; 55 case 4: *(uint32_t *)res = *(volatile uint32_t *)p; break; 56 case 8: *(uint64_t *)res = *(volatile uint64_t *)p; break; 57 default: 58 barrier(); 59 __builtin_memcpy((void *)res, (const void *)p, size); 60 barrier(); 61 } 62 } 63 64 /* 65 * Prevent the compiler from merging or refetching reads or writes. The 66 * compiler is also forbidden from reordering successive instances of 67 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some 68 * particular ordering. One way to make the compiler aware of ordering is to 69 * put the two invocations of READ_ONCE or WRITE_ONCE in different C 70 * statements. 71 * 72 * These two macros will also work on aggregate data types like structs or 73 * unions. If the size of the accessed data type exceeds the word size of 74 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will 75 * fall back to memcpy and print a compile-time warning. 76 * 77 * Their two major use cases are: (1) Mediating communication between 78 * process-level code and irq/NMI handlers, all running on the same CPU, 79 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 80 * mutilate accesses that either do not require ordering or that interact 81 * with an explicit memory barrier or atomic instruction that provides the 82 * required ordering. 83 */ 84 85 #define READ_ONCE(x) \ 86 ({ \ 87 union { typeof(x) __val; char __c[1]; } __u = \ 88 { .__c = { 0 } }; \ 89 __read_once_size(&(x), __u.__c, sizeof(x)); \ 90 __u.__val; \ 91 }) 92 93 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 94 { 95 switch (size) { 96 case 1: *(volatile uint8_t *) p = *(uint8_t *) res; break; 97 case 2: *(volatile uint16_t *) p = *(uint16_t *) res; break; 98 case 4: *(volatile uint32_t *) p = *(uint32_t *) res; break; 99 case 8: *(volatile uint64_t *) p = *(uint64_t *) res; break; 100 default: 101 barrier(); 102 __builtin_memcpy((void *)p, (const void *)res, size); 103 barrier(); 104 } 105 } 106 107 #define WRITE_ONCE(x, val) \ 108 ({ \ 109 union { typeof(x) __val; char __c[1]; } __u = \ 110 { .__val = (val) }; \ 111 __write_once_size(&(x), __u.__c, sizeof(x)); \ 112 __u.__val; \ 113 }) 114 115 #endif /* !__ASSEMBLY__ */ 116 #endif /* !__LINUX_COMPILER_H */ 117