1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-only
2 /*
3 * Copyright (c) Meta Platforms, Inc. and affiliates.
4 * All rights reserved.
5 *
6 * This source code is licensed under both the BSD-style license (found in the
7 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
8 * in the COPYING file in the root directory of this source tree).
9 * You may select, at your option, one of the above-listed licenses.
10 */
11
12 #ifndef ZSTD_CCOMMON_H_MODULE
13 #define ZSTD_CCOMMON_H_MODULE
14
15 /* this module contains definitions which must be identical
16 * across compression, decompression and dictBuilder.
17 * It also contains a few functions useful to at least 2 of them
18 * and which benefit from being inlined */
19
20 /*-*************************************
21 * Dependencies
22 ***************************************/
23 #include "compiler.h"
24 #include "cpu.h"
25 #include "mem.h"
26 #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
27 #include "error_private.h"
28 #define ZSTD_STATIC_LINKING_ONLY
29 #include "../zstd.h"
30 #define FSE_STATIC_LINKING_ONLY
31 #include "fse.h"
32 #include "huf.h"
33 #ifndef XXH_STATIC_LINKING_ONLY
34 # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
35 #endif
36 #include "xxhash.h" /* XXH_reset, update, digest */
37 #ifndef ZSTD_NO_TRACE
38 # include "zstd_trace.h"
39 #else
40 # define ZSTD_TRACE 0
41 #endif
42
43 /* ---- static assert (debug) --- */
44 #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
45 #define ZSTD_isError ERR_isError /* for inlining */
46 #define FSE_isError ERR_isError
47 #define HUF_isError ERR_isError
48
49
50 /*-*************************************
51 * shared macros
52 ***************************************/
53 #undef MIN
54 #undef MAX
55 #define MIN(a,b) ((a)<(b) ? (a) : (b))
56 #define MAX(a,b) ((a)>(b) ? (a) : (b))
57 #define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
58
59
60 /*-*************************************
61 * Common constants
62 ***************************************/
63 #define ZSTD_OPT_NUM (1<<12)
64
65 #define ZSTD_REP_NUM 3 /* number of repcodes */
66 static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
67
68 #define KB *(1 <<10)
69 #define MB *(1 <<20)
70 #define GB *(1U<<30)
71
72 #define BIT7 128
73 #define BIT6 64
74 #define BIT5 32
75 #define BIT4 16
76 #define BIT1 2
77 #define BIT0 1
78
79 #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
80 static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
81 static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
82
83 #define ZSTD_FRAMEIDSIZE 4 /* magic number size */
84
85 #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
86 static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
87 typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
88
89 #define ZSTD_FRAMECHECKSUMSIZE 4
90
91 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
92 #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
93 #define MIN_LITERALS_FOR_4_STREAMS 6
94
95 typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e;
96
97 #define LONGNBSEQ 0x7F00
98
99 #define MINMATCH 3
100
101 #define Litbits 8
102 #define LitHufLog 11
103 #define MaxLit ((1<<Litbits) - 1)
104 #define MaxML 52
105 #define MaxLL 35
106 #define DefaultMaxOff 28
107 #define MaxOff 31
108 #define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
109 #define MLFSELog 9
110 #define LLFSELog 9
111 #define OffFSELog 8
112 #define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
113 #define MaxMLBits 16
114 #define MaxLLBits 16
115
116 #define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
117 /* Each table cannot take more than #symbols * FSELog bits */
118 #define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
119
120 static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = {
121 0, 0, 0, 0, 0, 0, 0, 0,
122 0, 0, 0, 0, 0, 0, 0, 0,
123 1, 1, 1, 1, 2, 2, 3, 3,
124 4, 6, 7, 8, 9,10,11,12,
125 13,14,15,16
126 };
127 static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
128 4, 3, 2, 2, 2, 2, 2, 2,
129 2, 2, 2, 2, 2, 1, 1, 1,
130 2, 2, 2, 2, 2, 2, 2, 2,
131 2, 3, 2, 1, 1, 1, 1, 1,
132 -1,-1,-1,-1
133 };
134 #define LL_DEFAULTNORMLOG 6 /* for static allocation */
135 static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
136
137 static UNUSED_ATTR const U8 ML_bits[MaxML+1] = {
138 0, 0, 0, 0, 0, 0, 0, 0,
139 0, 0, 0, 0, 0, 0, 0, 0,
140 0, 0, 0, 0, 0, 0, 0, 0,
141 0, 0, 0, 0, 0, 0, 0, 0,
142 1, 1, 1, 1, 2, 2, 3, 3,
143 4, 4, 5, 7, 8, 9,10,11,
144 12,13,14,15,16
145 };
146 static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
147 1, 4, 3, 2, 2, 2, 2, 2,
148 2, 1, 1, 1, 1, 1, 1, 1,
149 1, 1, 1, 1, 1, 1, 1, 1,
150 1, 1, 1, 1, 1, 1, 1, 1,
151 1, 1, 1, 1, 1, 1, 1, 1,
152 1, 1, 1, 1, 1, 1,-1,-1,
153 -1,-1,-1,-1,-1
154 };
155 #define ML_DEFAULTNORMLOG 6 /* for static allocation */
156 static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
157
158 static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
159 1, 1, 1, 1, 1, 1, 2, 2,
160 2, 1, 1, 1, 1, 1, 1, 1,
161 1, 1, 1, 1, 1, 1, 1, 1,
162 -1,-1,-1,-1,-1
163 };
164 #define OF_DEFAULTNORMLOG 5 /* for static allocation */
165 static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
166
167
168 /*-*******************************************
169 * Shared functions to include for inlining
170 *********************************************/
ZSTD_copy8(void * dst,const void * src)171 static void ZSTD_copy8(void* dst, const void* src) {
172 #if defined(ZSTD_ARCH_ARM_NEON)
173 vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
174 #else
175 ZSTD_memcpy(dst, src, 8);
176 #endif
177 }
178 #define COPY8(d,s) do { ZSTD_copy8(d,s); d+=8; s+=8; } while (0)
179
180 /* Need to use memmove here since the literal buffer can now be located within
181 the dst buffer. In circumstances where the op "catches up" to where the
182 literal buffer is, there can be partial overlaps in this call on the final
183 copy if the literal is being shifted by less than 16 bytes. */
ZSTD_copy16(void * dst,const void * src)184 static void ZSTD_copy16(void* dst, const void* src) {
185 #if defined(ZSTD_ARCH_ARM_NEON)
186 vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
187 #elif defined(ZSTD_ARCH_X86_SSE2)
188 _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
189 #elif defined(__clang__)
190 ZSTD_memmove(dst, src, 16);
191 #else
192 /* ZSTD_memmove is not inlined properly by gcc */
193 BYTE copy16_buf[16];
194 ZSTD_memcpy(copy16_buf, src, 16);
195 ZSTD_memcpy(dst, copy16_buf, 16);
196 #endif
197 }
198 #define COPY16(d,s) do { ZSTD_copy16(d,s); d+=16; s+=16; } while (0)
199
200 #define WILDCOPY_OVERLENGTH 32
201 #define WILDCOPY_VECLEN 16
202
203 typedef enum {
204 ZSTD_no_overlap,
205 ZSTD_overlap_src_before_dst
206 /* ZSTD_overlap_dst_before_src, */
207 } ZSTD_overlap_e;
208
209 /*! ZSTD_wildcopy() :
210 * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
211 * @param ovtype controls the overlap detection
212 * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
213 * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
214 * The src buffer must be before the dst buffer.
215 */
216 MEM_STATIC FORCE_INLINE_ATTR
ZSTD_wildcopy(void * dst,const void * src,ptrdiff_t length,ZSTD_overlap_e const ovtype)217 void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
218 {
219 ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
220 const BYTE* ip = (const BYTE*)src;
221 BYTE* op = (BYTE*)dst;
222 BYTE* const oend = op + length;
223
224 if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
225 /* Handle short offset copies. */
226 do {
227 COPY8(op, ip);
228 } while (op < oend);
229 } else {
230 assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
231 /* Separate out the first COPY16() call because the copy length is
232 * almost certain to be short, so the branches have different
233 * probabilities. Since it is almost certain to be short, only do
234 * one COPY16() in the first call. Then, do two calls per loop since
235 * at that point it is more likely to have a high trip count.
236 */
237 ZSTD_copy16(op, ip);
238 if (16 >= length) return;
239 op += 16;
240 ip += 16;
241 do {
242 COPY16(op, ip);
243 COPY16(op, ip);
244 }
245 while (op < oend);
246 }
247 }
248
ZSTD_limitCopy(void * dst,size_t dstCapacity,const void * src,size_t srcSize)249 MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
250 {
251 size_t const length = MIN(dstCapacity, srcSize);
252 if (length > 0) {
253 ZSTD_memcpy(dst, src, length);
254 }
255 return length;
256 }
257
258 /* define "workspace is too large" as this number of times larger than needed */
259 #define ZSTD_WORKSPACETOOLARGE_FACTOR 3
260
261 /* when workspace is continuously too large
262 * during at least this number of times,
263 * context's memory usage is considered wasteful,
264 * because it's sized to handle a worst case scenario which rarely happens.
265 * In which case, resize it down to free some memory */
266 #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
267
268 /* Controls whether the input/output buffer is buffered or stable. */
269 typedef enum {
270 ZSTD_bm_buffered = 0, /* Buffer the input/output */
271 ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
272 } ZSTD_bufferMode_e;
273
274
275 /*-*******************************************
276 * Private declarations
277 *********************************************/
278
279 /**
280 * Contains the compressed frame size and an upper-bound for the decompressed frame size.
281 * Note: before using `compressedSize`, check for errors using ZSTD_isError().
282 * similarly, before using `decompressedBound`, check for errors using:
283 * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
284 */
285 typedef struct {
286 size_t nbBlocks;
287 size_t compressedSize;
288 unsigned long long decompressedBound;
289 } ZSTD_frameSizeInfo; /* decompress & legacy */
290
291 /* ZSTD_invalidateRepCodes() :
292 * ensures next compression will not use repcodes from previous block.
293 * Note : only works with regular variant;
294 * do not use with extDict variant ! */
295 void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
296
297
298 typedef struct {
299 blockType_e blockType;
300 U32 lastBlock;
301 U32 origSize;
302 } blockProperties_t; /* declared here for decompress and fullbench */
303
304 /*! ZSTD_getcBlockSize() :
305 * Provides the size of compressed block from block header `src` */
306 /* Used by: decompress, fullbench */
307 size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
308 blockProperties_t* bpPtr);
309
310 /*! ZSTD_decodeSeqHeaders() :
311 * decode sequence header from src */
312 /* Used by: zstd_decompress_block, fullbench */
313 size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
314 const void* src, size_t srcSize);
315
316 /**
317 * @returns true iff the CPU supports dynamic BMI2 dispatch.
318 */
ZSTD_cpuSupportsBmi2(void)319 MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
320 {
321 ZSTD_cpuid_t cpuid = ZSTD_cpuid();
322 return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
323 }
324
325 #endif /* ZSTD_CCOMMON_H_MODULE */
326