1 /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
2 /*
3  * Copyright (c) Meta Platforms, Inc. and affiliates.
4  * All rights reserved.
5  *
6  * This source code is licensed under both the BSD-style license (found in the
7  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
8  * in the COPYING file in the root directory of this source tree).
9  * You may select, at your option, one of the above-listed licenses.
10  */
11 
12 #ifndef ZSTD_COMPILER_H
13 #define ZSTD_COMPILER_H
14 
15 #include <linux/types.h>
16 
17 #include "portability_macros.h"
18 
19 /*-*******************************************************
20 *  Compiler specifics
21 *********************************************************/
22 /* force inlining */
23 
24 #if !defined(ZSTD_NO_INLINE)
25 #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
26 #  define INLINE_KEYWORD inline
27 #else
28 #  define INLINE_KEYWORD
29 #endif
30 
31 #define FORCE_INLINE_ATTR __attribute__((always_inline))
32 
33 #else
34 
35 #define INLINE_KEYWORD
36 #define FORCE_INLINE_ATTR
37 
38 #endif
39 
40 /*
41   On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
42   This explicitly marks such functions as __cdecl so that the code will still compile
43   if a CC other than __cdecl has been made the default.
44 */
45 #define WIN_CDECL
46 
47 /* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
48 #define UNUSED_ATTR __attribute__((unused))
49 
50 /*
51  * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
52  * parameters. They must be inlined for the compiler to eliminate the constant
53  * branches.
54  */
55 #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR
56 /*
57  * HINT_INLINE is used to help the compiler generate better code. It is *not*
58  * used for "templates", so it can be tweaked based on the compilers
59  * performance.
60  *
61  * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
62  * always_inline attribute.
63  *
64  * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
65  * attribute.
66  */
67 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
68 #  define HINT_INLINE static INLINE_KEYWORD
69 #else
70 #  define HINT_INLINE FORCE_INLINE_TEMPLATE
71 #endif
72 
73 /* "soft" inline :
74  * The compiler is free to select if it's a good idea to inline or not.
75  * The main objective is to silence compiler warnings
76  * when a defined function in included but not used.
77  *
78  * Note : this macro is prefixed `MEM_` because it used to be provided by `mem.h` unit.
79  * Updating the prefix is probably preferable, but requires a fairly large codemod,
80  * since this name is used everywhere.
81  */
82 #ifndef MEM_STATIC  /* already defined in Linux Kernel mem.h */
83 #define MEM_STATIC static __inline UNUSED_ATTR
84 #endif
85 
86 /* force no inlining */
87 #define FORCE_NOINLINE static __attribute__((__noinline__))
88 
89 
90 /* target attribute */
91 #define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
92 
93 /* Target attribute for BMI2 dynamic dispatch.
94  * Enable lzcnt, bmi, and bmi2.
95  * We test for bmi1 & bmi2. lzcnt is included in bmi1.
96  */
97 #define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
98 
99 /* prefetch
100  * can be disabled, by declaring NO_PREFETCH build macro */
101 #if ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
102 #  define PREFETCH_L1(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
103 #  define PREFETCH_L2(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
104 #elif defined(__aarch64__)
105 #  define PREFETCH_L1(ptr)  do { __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))); } while (0)
106 #  define PREFETCH_L2(ptr)  do { __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))); } while (0)
107 #else
108 #  define PREFETCH_L1(ptr) do { (void)(ptr); } while (0)  /* disabled */
109 #  define PREFETCH_L2(ptr) do { (void)(ptr); } while (0)  /* disabled */
110 #endif  /* NO_PREFETCH */
111 
112 #define CACHELINE_SIZE 64
113 
114 #define PREFETCH_AREA(p, s)                              \
115     do {                                                 \
116         const char* const _ptr = (const char*)(p);       \
117         size_t const _size = (size_t)(s);                \
118         size_t _pos;                                     \
119         for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
120             PREFETCH_L2(_ptr + _pos);                    \
121         }                                                \
122     } while (0)
123 
124 /* vectorization
125  * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
126  * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
127 #if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
128 #  if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
129 #    define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
130 #  else
131 #    define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
132 #  endif
133 #else
134 #  define DONT_VECTORIZE
135 #endif
136 
137 /* Tell the compiler that a branch is likely or unlikely.
138  * Only use these macros if it causes the compiler to generate better code.
139  * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
140  * and clang, please do.
141  */
142 #define LIKELY(x) (__builtin_expect((x), 1))
143 #define UNLIKELY(x) (__builtin_expect((x), 0))
144 
145 #if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
146 #  define ZSTD_UNREACHABLE do { assert(0), __builtin_unreachable(); } while (0)
147 #else
148 #  define ZSTD_UNREACHABLE do { assert(0); } while (0)
149 #endif
150 
151 /* disable warnings */
152 
153 /* compile time determination of SIMD support */
154 
155 /* C-language Attributes are added in C23. */
156 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
157 # define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
158 #else
159 # define ZSTD_HAS_C_ATTRIBUTE(x) 0
160 #endif
161 
162 /* Only use C++ attributes in C++. Some compilers report support for C++
163  * attributes when compiling with C.
164  */
165 #define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
166 
167 /* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
168  * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
169  * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
170  * - Else: __attribute__((__fallthrough__))
171  */
172 #define ZSTD_FALLTHROUGH fallthrough
173 
174 /*-**************************************************************
175 *  Alignment
176 *****************************************************************/
177 
178 /* @return 1 if @u is a 2^n value, 0 otherwise
179  * useful to check a value is valid for alignment restrictions */
180 MEM_STATIC int ZSTD_isPower2(size_t u) {
181     return (u & (u-1)) == 0;
182 }
183 
184 /* this test was initially positioned in mem.h,
185  * but this file is removed (or replaced) for linux kernel
186  * so it's now hosted in compiler.h,
187  * which remains valid for both user & kernel spaces.
188  */
189 
190 #ifndef ZSTD_ALIGNOF
191 /* covers gcc, clang & MSVC */
192 /* note : this section must come first, before C11,
193  * due to a limitation in the kernel source generator */
194 #  define ZSTD_ALIGNOF(T) __alignof(T)
195 
196 #endif /* ZSTD_ALIGNOF */
197 
198 #ifndef ZSTD_ALIGNED
199 /* C90-compatible alignment macro (GCC/Clang). Adjust for other compilers if needed. */
200 #define ZSTD_ALIGNED(a) __attribute__((aligned(a)))
201 #endif /* ZSTD_ALIGNED */
202 
203 
204 /*-**************************************************************
205 *  Sanitizer
206 *****************************************************************/
207 
208 /*
209  * Zstd relies on pointer overflow in its decompressor.
210  * We add this attribute to functions that rely on pointer overflow.
211  */
212 #ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
213 #  if __has_attribute(no_sanitize)
214 #    if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8
215        /* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */
216 #      define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow")))
217 #    else
218        /* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */
219 #      define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow")))
220 #    endif
221 #  else
222 #    define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
223 #  endif
224 #endif
225 
226 /*
227  * Helper function to perform a wrapped pointer difference without triggering
228  * UBSAN.
229  *
230  * @returns lhs - rhs with wrapping
231  */
232 MEM_STATIC
233 ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
234 ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs)
235 {
236     return lhs - rhs;
237 }
238 
239 /*
240  * Helper function to perform a wrapped pointer add without triggering UBSAN.
241  *
242  * @return ptr + add with wrapping
243  */
244 MEM_STATIC
245 ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
246 unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add)
247 {
248     return ptr + add;
249 }
250 
251 /*
252  * Helper function to perform a wrapped pointer subtraction without triggering
253  * UBSAN.
254  *
255  * @return ptr - sub with wrapping
256  */
257 MEM_STATIC
258 ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
259 unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub)
260 {
261     return ptr - sub;
262 }
263 
264 /*
265  * Helper function to add to a pointer that works around C's undefined behavior
266  * of adding 0 to NULL.
267  *
268  * @returns `ptr + add` except it defines `NULL + 0 == NULL`.
269  */
270 MEM_STATIC
271 unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add)
272 {
273     return add > 0 ? ptr + add : ptr;
274 }
275 
276 /* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
277  * abundance of caution, disable our custom poisoning on mingw. */
278 #ifdef __MINGW32__
279 #ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE
280 #define ZSTD_ASAN_DONT_POISON_WORKSPACE 1
281 #endif
282 #ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE
283 #define ZSTD_MSAN_DONT_POISON_WORKSPACE 1
284 #endif
285 #endif
286 
287 
288 
289 #endif /* ZSTD_COMPILER_H */
290