1 // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause 2 /* 3 * Copyright (c) Meta Platforms, Inc. and affiliates. 4 * All rights reserved. 5 * 6 * This source code is licensed under both the BSD-style license (found in the 7 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 8 * in the COPYING file in the root directory of this source tree). 9 * You may select, at your option, one of the above-listed licenses. 10 */ 11 12 /*-************************************* 13 * Dependencies 14 ***************************************/ 15 #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ 16 #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */ 17 #include "../common/mem.h" 18 #include "../common/error_private.h" 19 #include "hist.h" /* HIST_countFast_wksp */ 20 #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ 21 #include "../common/fse.h" 22 #include "../common/huf.h" 23 #include "zstd_compress_internal.h" 24 #include "zstd_compress_sequences.h" 25 #include "zstd_compress_literals.h" 26 #include "zstd_fast.h" 27 #include "zstd_double_fast.h" 28 #include "zstd_lazy.h" 29 #include "zstd_opt.h" 30 #include "zstd_ldm.h" 31 #include "zstd_compress_superblock.h" 32 #include "../common/bits.h" /* ZSTD_highbit32, ZSTD_rotateRight_U64 */ 33 34 /* *************************************************************** 35 * Tuning parameters 36 *****************************************************************/ 37 /*! 38 * COMPRESS_HEAPMODE : 39 * Select how default decompression function ZSTD_compress() allocates its context, 40 * on stack (0, default), or into heap (1). 41 * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected. 42 */ 43 44 /*! 45 * ZSTD_HASHLOG3_MAX : 46 * Maximum size of the hash table dedicated to find 3-bytes matches, 47 * in log format, aka 17 => 1 << 17 == 128Ki positions. 48 * This structure is only used in zstd_opt. 49 * Since allocation is centralized for all strategies, it has to be known here. 50 * The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3, 51 * so that zstd_opt.c doesn't need to know about this constant. 52 */ 53 #ifndef ZSTD_HASHLOG3_MAX 54 # define ZSTD_HASHLOG3_MAX 17 55 #endif 56 57 /*-************************************* 58 * Helper functions 59 ***************************************/ 60 /* ZSTD_compressBound() 61 * Note that the result from this function is only valid for 62 * the one-pass compression functions. 63 * When employing the streaming mode, 64 * if flushes are frequently altering the size of blocks, 65 * the overhead from block headers can make the compressed data larger 66 * than the return value of ZSTD_compressBound(). 67 */ 68 size_t ZSTD_compressBound(size_t srcSize) { 69 size_t const r = ZSTD_COMPRESSBOUND(srcSize); 70 if (r==0) return ERROR(srcSize_wrong); 71 return r; 72 } 73 74 75 /*-************************************* 76 * Context memory management 77 ***************************************/ 78 struct ZSTD_CDict_s { 79 const void* dictContent; 80 size_t dictContentSize; 81 ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */ 82 U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ 83 ZSTD_cwksp workspace; 84 ZSTD_MatchState_t matchState; 85 ZSTD_compressedBlockState_t cBlockState; 86 ZSTD_customMem customMem; 87 U32 dictID; 88 int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ 89 ZSTD_ParamSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use 90 * row-based matchfinder. Unless the cdict is reloaded, we will use 91 * the same greedy/lazy matchfinder at compression time. 92 */ 93 }; /* typedef'd to ZSTD_CDict within "zstd.h" */ 94 95 ZSTD_CCtx* ZSTD_createCCtx(void) 96 { 97 return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); 98 } 99 100 static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) 101 { 102 assert(cctx != NULL); 103 ZSTD_memset(cctx, 0, sizeof(*cctx)); 104 cctx->customMem = memManager; 105 cctx->bmi2 = ZSTD_cpuSupportsBmi2(); 106 { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); 107 assert(!ZSTD_isError(err)); 108 (void)err; 109 } 110 } 111 112 ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) 113 { 114 ZSTD_STATIC_ASSERT(zcss_init==0); 115 ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); 116 if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; 117 { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem); 118 if (!cctx) return NULL; 119 ZSTD_initCCtx(cctx, customMem); 120 return cctx; 121 } 122 } 123 124 ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) 125 { 126 ZSTD_cwksp ws; 127 ZSTD_CCtx* cctx; 128 if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ 129 if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ 130 ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); 131 132 cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); 133 if (cctx == NULL) return NULL; 134 135 ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx)); 136 ZSTD_cwksp_move(&cctx->workspace, &ws); 137 cctx->staticSize = workspaceSize; 138 139 /* statically sized space. tmpWorkspace never moves (but prev/next block swap places) */ 140 if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; 141 cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); 142 cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); 143 cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE); 144 cctx->tmpWkspSize = TMP_WORKSPACE_SIZE; 145 cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); 146 return cctx; 147 } 148 149 /* 150 * Clears and frees all of the dictionaries in the CCtx. 151 */ 152 static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx) 153 { 154 ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem); 155 ZSTD_freeCDict(cctx->localDict.cdict); 156 ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict)); 157 ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); 158 cctx->cdict = NULL; 159 } 160 161 static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict) 162 { 163 size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0; 164 size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict); 165 return bufferSize + cdictSize; 166 } 167 168 static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) 169 { 170 assert(cctx != NULL); 171 assert(cctx->staticSize == 0); 172 ZSTD_clearAllDicts(cctx); 173 ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); 174 } 175 176 size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) 177 { 178 DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx); 179 if (cctx==NULL) return 0; /* support free on NULL */ 180 RETURN_ERROR_IF(cctx->staticSize, memory_allocation, 181 "not compatible with static CCtx"); 182 { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); 183 ZSTD_freeCCtxContent(cctx); 184 if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem); 185 } 186 return 0; 187 } 188 189 190 static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) 191 { 192 (void)cctx; 193 return 0; 194 } 195 196 197 size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) 198 { 199 if (cctx==NULL) return 0; /* support sizeof on NULL */ 200 /* cctx may be in the workspace */ 201 return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx)) 202 + ZSTD_cwksp_sizeof(&cctx->workspace) 203 + ZSTD_sizeof_localDict(cctx->localDict) 204 + ZSTD_sizeof_mtctx(cctx); 205 } 206 207 size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) 208 { 209 return ZSTD_sizeof_CCtx(zcs); /* same object */ 210 } 211 212 /* private API call, for dictBuilder only */ 213 const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } 214 215 /* Returns true if the strategy supports using a row based matchfinder */ 216 static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { 217 return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2); 218 } 219 220 /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder 221 * for this compression. 222 */ 223 static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_ParamSwitch_e mode) { 224 assert(mode != ZSTD_ps_auto); 225 return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable); 226 } 227 228 /* Returns row matchfinder usage given an initial mode and cParams */ 229 static ZSTD_ParamSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_ParamSwitch_e mode, 230 const ZSTD_compressionParameters* const cParams) { 231 /* The Linux Kernel does not use SIMD, and 128KB is a very common size, e.g. in BtrFS. 232 * The row match finder is slower for this size without SIMD, so disable it. 233 */ 234 const unsigned kWindowLogLowerBound = 17; 235 if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ 236 mode = ZSTD_ps_disable; 237 if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; 238 if (cParams->windowLog > kWindowLogLowerBound) mode = ZSTD_ps_enable; 239 return mode; 240 } 241 242 /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ 243 static ZSTD_ParamSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_ParamSwitch_e mode, 244 const ZSTD_compressionParameters* const cParams) { 245 if (mode != ZSTD_ps_auto) return mode; 246 return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; 247 } 248 249 /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ 250 static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, 251 const ZSTD_ParamSwitch_e useRowMatchFinder, 252 const U32 forDDSDict) { 253 assert(useRowMatchFinder != ZSTD_ps_auto); 254 /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. 255 * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder. 256 */ 257 return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder)); 258 } 259 260 /* Returns ZSTD_ps_enable if compression parameters are such that we should 261 * enable long distance matching (wlog >= 27, strategy >= btopt). 262 * Returns ZSTD_ps_disable otherwise. 263 */ 264 static ZSTD_ParamSwitch_e ZSTD_resolveEnableLdm(ZSTD_ParamSwitch_e mode, 265 const ZSTD_compressionParameters* const cParams) { 266 if (mode != ZSTD_ps_auto) return mode; 267 return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; 268 } 269 270 static int ZSTD_resolveExternalSequenceValidation(int mode) { 271 return mode; 272 } 273 274 /* Resolves maxBlockSize to the default if no value is present. */ 275 static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) { 276 if (maxBlockSize == 0) { 277 return ZSTD_BLOCKSIZE_MAX; 278 } else { 279 return maxBlockSize; 280 } 281 } 282 283 static ZSTD_ParamSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_ParamSwitch_e value, int cLevel) { 284 if (value != ZSTD_ps_auto) return value; 285 if (cLevel < 10) { 286 return ZSTD_ps_disable; 287 } else { 288 return ZSTD_ps_enable; 289 } 290 } 291 292 /* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. 293 * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ 294 static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) { 295 return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast; 296 } 297 298 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( 299 ZSTD_compressionParameters cParams) 300 { 301 ZSTD_CCtx_params cctxParams; 302 /* should not matter, as all cParams are presumed properly defined */ 303 ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT); 304 cctxParams.cParams = cParams; 305 306 /* Adjust advanced params according to cParams */ 307 cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams); 308 if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) { 309 ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); 310 assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); 311 assert(cctxParams.ldmParams.hashRateLog < 32); 312 } 313 cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams); 314 cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); 315 cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); 316 cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); 317 cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, 318 cctxParams.compressionLevel); 319 assert(!ZSTD_checkCParams(cParams)); 320 return cctxParams; 321 } 322 323 static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( 324 ZSTD_customMem customMem) 325 { 326 ZSTD_CCtx_params* params; 327 if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; 328 params = (ZSTD_CCtx_params*)ZSTD_customCalloc( 329 sizeof(ZSTD_CCtx_params), customMem); 330 if (!params) { return NULL; } 331 ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); 332 params->customMem = customMem; 333 return params; 334 } 335 336 ZSTD_CCtx_params* ZSTD_createCCtxParams(void) 337 { 338 return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); 339 } 340 341 size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) 342 { 343 if (params == NULL) { return 0; } 344 ZSTD_customFree(params, params->customMem); 345 return 0; 346 } 347 348 size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) 349 { 350 return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); 351 } 352 353 size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { 354 RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); 355 ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); 356 cctxParams->compressionLevel = compressionLevel; 357 cctxParams->fParams.contentSizeFlag = 1; 358 return 0; 359 } 360 361 #define ZSTD_NO_CLEVEL 0 362 363 /* 364 * Initializes `cctxParams` from `params` and `compressionLevel`. 365 * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. 366 */ 367 static void 368 ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, 369 const ZSTD_parameters* params, 370 int compressionLevel) 371 { 372 assert(!ZSTD_checkCParams(params->cParams)); 373 ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); 374 cctxParams->cParams = params->cParams; 375 cctxParams->fParams = params->fParams; 376 /* Should not matter, as all cParams are presumed properly defined. 377 * But, set it for tracing anyway. 378 */ 379 cctxParams->compressionLevel = compressionLevel; 380 cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); 381 cctxParams->postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, ¶ms->cParams); 382 cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); 383 cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); 384 cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); 385 cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); 386 DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", 387 cctxParams->useRowMatchFinder, cctxParams->postBlockSplitter, cctxParams->ldmParams.enableLdm); 388 } 389 390 size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) 391 { 392 RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); 393 FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); 394 ZSTD_CCtxParams_init_internal(cctxParams, ¶ms, ZSTD_NO_CLEVEL); 395 return 0; 396 } 397 398 /* 399 * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. 400 * @param params Validated zstd parameters. 401 */ 402 static void ZSTD_CCtxParams_setZstdParams( 403 ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) 404 { 405 assert(!ZSTD_checkCParams(params->cParams)); 406 cctxParams->cParams = params->cParams; 407 cctxParams->fParams = params->fParams; 408 /* Should not matter, as all cParams are presumed properly defined. 409 * But, set it for tracing anyway. 410 */ 411 cctxParams->compressionLevel = ZSTD_NO_CLEVEL; 412 } 413 414 ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) 415 { 416 ZSTD_bounds bounds = { 0, 0, 0 }; 417 418 switch(param) 419 { 420 case ZSTD_c_compressionLevel: 421 bounds.lowerBound = ZSTD_minCLevel(); 422 bounds.upperBound = ZSTD_maxCLevel(); 423 return bounds; 424 425 case ZSTD_c_windowLog: 426 bounds.lowerBound = ZSTD_WINDOWLOG_MIN; 427 bounds.upperBound = ZSTD_WINDOWLOG_MAX; 428 return bounds; 429 430 case ZSTD_c_hashLog: 431 bounds.lowerBound = ZSTD_HASHLOG_MIN; 432 bounds.upperBound = ZSTD_HASHLOG_MAX; 433 return bounds; 434 435 case ZSTD_c_chainLog: 436 bounds.lowerBound = ZSTD_CHAINLOG_MIN; 437 bounds.upperBound = ZSTD_CHAINLOG_MAX; 438 return bounds; 439 440 case ZSTD_c_searchLog: 441 bounds.lowerBound = ZSTD_SEARCHLOG_MIN; 442 bounds.upperBound = ZSTD_SEARCHLOG_MAX; 443 return bounds; 444 445 case ZSTD_c_minMatch: 446 bounds.lowerBound = ZSTD_MINMATCH_MIN; 447 bounds.upperBound = ZSTD_MINMATCH_MAX; 448 return bounds; 449 450 case ZSTD_c_targetLength: 451 bounds.lowerBound = ZSTD_TARGETLENGTH_MIN; 452 bounds.upperBound = ZSTD_TARGETLENGTH_MAX; 453 return bounds; 454 455 case ZSTD_c_strategy: 456 bounds.lowerBound = ZSTD_STRATEGY_MIN; 457 bounds.upperBound = ZSTD_STRATEGY_MAX; 458 return bounds; 459 460 case ZSTD_c_contentSizeFlag: 461 bounds.lowerBound = 0; 462 bounds.upperBound = 1; 463 return bounds; 464 465 case ZSTD_c_checksumFlag: 466 bounds.lowerBound = 0; 467 bounds.upperBound = 1; 468 return bounds; 469 470 case ZSTD_c_dictIDFlag: 471 bounds.lowerBound = 0; 472 bounds.upperBound = 1; 473 return bounds; 474 475 case ZSTD_c_nbWorkers: 476 bounds.lowerBound = 0; 477 bounds.upperBound = 0; 478 return bounds; 479 480 case ZSTD_c_jobSize: 481 bounds.lowerBound = 0; 482 bounds.upperBound = 0; 483 return bounds; 484 485 case ZSTD_c_overlapLog: 486 bounds.lowerBound = 0; 487 bounds.upperBound = 0; 488 return bounds; 489 490 case ZSTD_c_enableDedicatedDictSearch: 491 bounds.lowerBound = 0; 492 bounds.upperBound = 1; 493 return bounds; 494 495 case ZSTD_c_enableLongDistanceMatching: 496 bounds.lowerBound = (int)ZSTD_ps_auto; 497 bounds.upperBound = (int)ZSTD_ps_disable; 498 return bounds; 499 500 case ZSTD_c_ldmHashLog: 501 bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN; 502 bounds.upperBound = ZSTD_LDM_HASHLOG_MAX; 503 return bounds; 504 505 case ZSTD_c_ldmMinMatch: 506 bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN; 507 bounds.upperBound = ZSTD_LDM_MINMATCH_MAX; 508 return bounds; 509 510 case ZSTD_c_ldmBucketSizeLog: 511 bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN; 512 bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX; 513 return bounds; 514 515 case ZSTD_c_ldmHashRateLog: 516 bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN; 517 bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX; 518 return bounds; 519 520 /* experimental parameters */ 521 case ZSTD_c_rsyncable: 522 bounds.lowerBound = 0; 523 bounds.upperBound = 1; 524 return bounds; 525 526 case ZSTD_c_forceMaxWindow : 527 bounds.lowerBound = 0; 528 bounds.upperBound = 1; 529 return bounds; 530 531 case ZSTD_c_format: 532 ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); 533 bounds.lowerBound = ZSTD_f_zstd1; 534 bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */ 535 return bounds; 536 537 case ZSTD_c_forceAttachDict: 538 ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad); 539 bounds.lowerBound = ZSTD_dictDefaultAttach; 540 bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */ 541 return bounds; 542 543 case ZSTD_c_literalCompressionMode: 544 ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable); 545 bounds.lowerBound = (int)ZSTD_ps_auto; 546 bounds.upperBound = (int)ZSTD_ps_disable; 547 return bounds; 548 549 case ZSTD_c_targetCBlockSize: 550 bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN; 551 bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX; 552 return bounds; 553 554 case ZSTD_c_srcSizeHint: 555 bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN; 556 bounds.upperBound = ZSTD_SRCSIZEHINT_MAX; 557 return bounds; 558 559 case ZSTD_c_stableInBuffer: 560 case ZSTD_c_stableOutBuffer: 561 bounds.lowerBound = (int)ZSTD_bm_buffered; 562 bounds.upperBound = (int)ZSTD_bm_stable; 563 return bounds; 564 565 case ZSTD_c_blockDelimiters: 566 bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters; 567 bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters; 568 return bounds; 569 570 case ZSTD_c_validateSequences: 571 bounds.lowerBound = 0; 572 bounds.upperBound = 1; 573 return bounds; 574 575 case ZSTD_c_splitAfterSequences: 576 bounds.lowerBound = (int)ZSTD_ps_auto; 577 bounds.upperBound = (int)ZSTD_ps_disable; 578 return bounds; 579 580 case ZSTD_c_blockSplitterLevel: 581 bounds.lowerBound = 0; 582 bounds.upperBound = ZSTD_BLOCKSPLITTER_LEVEL_MAX; 583 return bounds; 584 585 case ZSTD_c_useRowMatchFinder: 586 bounds.lowerBound = (int)ZSTD_ps_auto; 587 bounds.upperBound = (int)ZSTD_ps_disable; 588 return bounds; 589 590 case ZSTD_c_deterministicRefPrefix: 591 bounds.lowerBound = 0; 592 bounds.upperBound = 1; 593 return bounds; 594 595 case ZSTD_c_prefetchCDictTables: 596 bounds.lowerBound = (int)ZSTD_ps_auto; 597 bounds.upperBound = (int)ZSTD_ps_disable; 598 return bounds; 599 600 case ZSTD_c_enableSeqProducerFallback: 601 bounds.lowerBound = 0; 602 bounds.upperBound = 1; 603 return bounds; 604 605 case ZSTD_c_maxBlockSize: 606 bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; 607 bounds.upperBound = ZSTD_BLOCKSIZE_MAX; 608 return bounds; 609 610 case ZSTD_c_repcodeResolution: 611 bounds.lowerBound = (int)ZSTD_ps_auto; 612 bounds.upperBound = (int)ZSTD_ps_disable; 613 return bounds; 614 615 default: 616 bounds.error = ERROR(parameter_unsupported); 617 return bounds; 618 } 619 } 620 621 /* ZSTD_cParam_clampBounds: 622 * Clamps the value into the bounded range. 623 */ 624 static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) 625 { 626 ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); 627 if (ZSTD_isError(bounds.error)) return bounds.error; 628 if (*value < bounds.lowerBound) *value = bounds.lowerBound; 629 if (*value > bounds.upperBound) *value = bounds.upperBound; 630 return 0; 631 } 632 633 #define BOUNDCHECK(cParam, val) \ 634 do { \ 635 RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ 636 parameter_outOfBound, "Param out of bounds"); \ 637 } while (0) 638 639 640 static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) 641 { 642 switch(param) 643 { 644 case ZSTD_c_compressionLevel: 645 case ZSTD_c_hashLog: 646 case ZSTD_c_chainLog: 647 case ZSTD_c_searchLog: 648 case ZSTD_c_minMatch: 649 case ZSTD_c_targetLength: 650 case ZSTD_c_strategy: 651 case ZSTD_c_blockSplitterLevel: 652 return 1; 653 654 case ZSTD_c_format: 655 case ZSTD_c_windowLog: 656 case ZSTD_c_contentSizeFlag: 657 case ZSTD_c_checksumFlag: 658 case ZSTD_c_dictIDFlag: 659 case ZSTD_c_forceMaxWindow : 660 case ZSTD_c_nbWorkers: 661 case ZSTD_c_jobSize: 662 case ZSTD_c_overlapLog: 663 case ZSTD_c_rsyncable: 664 case ZSTD_c_enableDedicatedDictSearch: 665 case ZSTD_c_enableLongDistanceMatching: 666 case ZSTD_c_ldmHashLog: 667 case ZSTD_c_ldmMinMatch: 668 case ZSTD_c_ldmBucketSizeLog: 669 case ZSTD_c_ldmHashRateLog: 670 case ZSTD_c_forceAttachDict: 671 case ZSTD_c_literalCompressionMode: 672 case ZSTD_c_targetCBlockSize: 673 case ZSTD_c_srcSizeHint: 674 case ZSTD_c_stableInBuffer: 675 case ZSTD_c_stableOutBuffer: 676 case ZSTD_c_blockDelimiters: 677 case ZSTD_c_validateSequences: 678 case ZSTD_c_splitAfterSequences: 679 case ZSTD_c_useRowMatchFinder: 680 case ZSTD_c_deterministicRefPrefix: 681 case ZSTD_c_prefetchCDictTables: 682 case ZSTD_c_enableSeqProducerFallback: 683 case ZSTD_c_maxBlockSize: 684 case ZSTD_c_repcodeResolution: 685 default: 686 return 0; 687 } 688 } 689 690 size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) 691 { 692 DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value); 693 if (cctx->streamStage != zcss_init) { 694 if (ZSTD_isUpdateAuthorized(param)) { 695 cctx->cParamsChanged = 1; 696 } else { 697 RETURN_ERROR(stage_wrong, "can only set params in cctx init stage"); 698 } } 699 700 switch(param) 701 { 702 case ZSTD_c_nbWorkers: 703 RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported, 704 "MT not compatible with static alloc"); 705 break; 706 707 case ZSTD_c_compressionLevel: 708 case ZSTD_c_windowLog: 709 case ZSTD_c_hashLog: 710 case ZSTD_c_chainLog: 711 case ZSTD_c_searchLog: 712 case ZSTD_c_minMatch: 713 case ZSTD_c_targetLength: 714 case ZSTD_c_strategy: 715 case ZSTD_c_ldmHashRateLog: 716 case ZSTD_c_format: 717 case ZSTD_c_contentSizeFlag: 718 case ZSTD_c_checksumFlag: 719 case ZSTD_c_dictIDFlag: 720 case ZSTD_c_forceMaxWindow: 721 case ZSTD_c_forceAttachDict: 722 case ZSTD_c_literalCompressionMode: 723 case ZSTD_c_jobSize: 724 case ZSTD_c_overlapLog: 725 case ZSTD_c_rsyncable: 726 case ZSTD_c_enableDedicatedDictSearch: 727 case ZSTD_c_enableLongDistanceMatching: 728 case ZSTD_c_ldmHashLog: 729 case ZSTD_c_ldmMinMatch: 730 case ZSTD_c_ldmBucketSizeLog: 731 case ZSTD_c_targetCBlockSize: 732 case ZSTD_c_srcSizeHint: 733 case ZSTD_c_stableInBuffer: 734 case ZSTD_c_stableOutBuffer: 735 case ZSTD_c_blockDelimiters: 736 case ZSTD_c_validateSequences: 737 case ZSTD_c_splitAfterSequences: 738 case ZSTD_c_blockSplitterLevel: 739 case ZSTD_c_useRowMatchFinder: 740 case ZSTD_c_deterministicRefPrefix: 741 case ZSTD_c_prefetchCDictTables: 742 case ZSTD_c_enableSeqProducerFallback: 743 case ZSTD_c_maxBlockSize: 744 case ZSTD_c_repcodeResolution: 745 break; 746 747 default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); 748 } 749 return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); 750 } 751 752 size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, 753 ZSTD_cParameter param, int value) 754 { 755 DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value); 756 switch(param) 757 { 758 case ZSTD_c_format : 759 BOUNDCHECK(ZSTD_c_format, value); 760 CCtxParams->format = (ZSTD_format_e)value; 761 return (size_t)CCtxParams->format; 762 763 case ZSTD_c_compressionLevel : { 764 FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); 765 if (value == 0) 766 CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ 767 else 768 CCtxParams->compressionLevel = value; 769 if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel; 770 return 0; /* return type (size_t) cannot represent negative values */ 771 } 772 773 case ZSTD_c_windowLog : 774 if (value!=0) /* 0 => use default */ 775 BOUNDCHECK(ZSTD_c_windowLog, value); 776 CCtxParams->cParams.windowLog = (U32)value; 777 return CCtxParams->cParams.windowLog; 778 779 case ZSTD_c_hashLog : 780 if (value!=0) /* 0 => use default */ 781 BOUNDCHECK(ZSTD_c_hashLog, value); 782 CCtxParams->cParams.hashLog = (U32)value; 783 return CCtxParams->cParams.hashLog; 784 785 case ZSTD_c_chainLog : 786 if (value!=0) /* 0 => use default */ 787 BOUNDCHECK(ZSTD_c_chainLog, value); 788 CCtxParams->cParams.chainLog = (U32)value; 789 return CCtxParams->cParams.chainLog; 790 791 case ZSTD_c_searchLog : 792 if (value!=0) /* 0 => use default */ 793 BOUNDCHECK(ZSTD_c_searchLog, value); 794 CCtxParams->cParams.searchLog = (U32)value; 795 return (size_t)value; 796 797 case ZSTD_c_minMatch : 798 if (value!=0) /* 0 => use default */ 799 BOUNDCHECK(ZSTD_c_minMatch, value); 800 CCtxParams->cParams.minMatch = (U32)value; 801 return CCtxParams->cParams.minMatch; 802 803 case ZSTD_c_targetLength : 804 BOUNDCHECK(ZSTD_c_targetLength, value); 805 CCtxParams->cParams.targetLength = (U32)value; 806 return CCtxParams->cParams.targetLength; 807 808 case ZSTD_c_strategy : 809 if (value!=0) /* 0 => use default */ 810 BOUNDCHECK(ZSTD_c_strategy, value); 811 CCtxParams->cParams.strategy = (ZSTD_strategy)value; 812 return (size_t)CCtxParams->cParams.strategy; 813 814 case ZSTD_c_contentSizeFlag : 815 /* Content size written in frame header _when known_ (default:1) */ 816 DEBUGLOG(4, "set content size flag = %u", (value!=0)); 817 CCtxParams->fParams.contentSizeFlag = value != 0; 818 return (size_t)CCtxParams->fParams.contentSizeFlag; 819 820 case ZSTD_c_checksumFlag : 821 /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ 822 CCtxParams->fParams.checksumFlag = value != 0; 823 return (size_t)CCtxParams->fParams.checksumFlag; 824 825 case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ 826 DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); 827 CCtxParams->fParams.noDictIDFlag = !value; 828 return !CCtxParams->fParams.noDictIDFlag; 829 830 case ZSTD_c_forceMaxWindow : 831 CCtxParams->forceWindow = (value != 0); 832 return (size_t)CCtxParams->forceWindow; 833 834 case ZSTD_c_forceAttachDict : { 835 const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; 836 BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref); 837 CCtxParams->attachDictPref = pref; 838 return CCtxParams->attachDictPref; 839 } 840 841 case ZSTD_c_literalCompressionMode : { 842 const ZSTD_ParamSwitch_e lcm = (ZSTD_ParamSwitch_e)value; 843 BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm); 844 CCtxParams->literalCompressionMode = lcm; 845 return CCtxParams->literalCompressionMode; 846 } 847 848 case ZSTD_c_nbWorkers : 849 RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); 850 return 0; 851 852 case ZSTD_c_jobSize : 853 RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); 854 return 0; 855 856 case ZSTD_c_overlapLog : 857 RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); 858 return 0; 859 860 case ZSTD_c_rsyncable : 861 RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); 862 return 0; 863 864 case ZSTD_c_enableDedicatedDictSearch : 865 CCtxParams->enableDedicatedDictSearch = (value!=0); 866 return (size_t)CCtxParams->enableDedicatedDictSearch; 867 868 case ZSTD_c_enableLongDistanceMatching : 869 BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value); 870 CCtxParams->ldmParams.enableLdm = (ZSTD_ParamSwitch_e)value; 871 return CCtxParams->ldmParams.enableLdm; 872 873 case ZSTD_c_ldmHashLog : 874 if (value!=0) /* 0 ==> auto */ 875 BOUNDCHECK(ZSTD_c_ldmHashLog, value); 876 CCtxParams->ldmParams.hashLog = (U32)value; 877 return CCtxParams->ldmParams.hashLog; 878 879 case ZSTD_c_ldmMinMatch : 880 if (value!=0) /* 0 ==> default */ 881 BOUNDCHECK(ZSTD_c_ldmMinMatch, value); 882 CCtxParams->ldmParams.minMatchLength = (U32)value; 883 return CCtxParams->ldmParams.minMatchLength; 884 885 case ZSTD_c_ldmBucketSizeLog : 886 if (value!=0) /* 0 ==> default */ 887 BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); 888 CCtxParams->ldmParams.bucketSizeLog = (U32)value; 889 return CCtxParams->ldmParams.bucketSizeLog; 890 891 case ZSTD_c_ldmHashRateLog : 892 if (value!=0) /* 0 ==> default */ 893 BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); 894 CCtxParams->ldmParams.hashRateLog = (U32)value; 895 return CCtxParams->ldmParams.hashRateLog; 896 897 case ZSTD_c_targetCBlockSize : 898 if (value!=0) { /* 0 ==> default */ 899 value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN); 900 BOUNDCHECK(ZSTD_c_targetCBlockSize, value); 901 } 902 CCtxParams->targetCBlockSize = (U32)value; 903 return CCtxParams->targetCBlockSize; 904 905 case ZSTD_c_srcSizeHint : 906 if (value!=0) /* 0 ==> default */ 907 BOUNDCHECK(ZSTD_c_srcSizeHint, value); 908 CCtxParams->srcSizeHint = value; 909 return (size_t)CCtxParams->srcSizeHint; 910 911 case ZSTD_c_stableInBuffer: 912 BOUNDCHECK(ZSTD_c_stableInBuffer, value); 913 CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; 914 return CCtxParams->inBufferMode; 915 916 case ZSTD_c_stableOutBuffer: 917 BOUNDCHECK(ZSTD_c_stableOutBuffer, value); 918 CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; 919 return CCtxParams->outBufferMode; 920 921 case ZSTD_c_blockDelimiters: 922 BOUNDCHECK(ZSTD_c_blockDelimiters, value); 923 CCtxParams->blockDelimiters = (ZSTD_SequenceFormat_e)value; 924 return CCtxParams->blockDelimiters; 925 926 case ZSTD_c_validateSequences: 927 BOUNDCHECK(ZSTD_c_validateSequences, value); 928 CCtxParams->validateSequences = value; 929 return (size_t)CCtxParams->validateSequences; 930 931 case ZSTD_c_splitAfterSequences: 932 BOUNDCHECK(ZSTD_c_splitAfterSequences, value); 933 CCtxParams->postBlockSplitter = (ZSTD_ParamSwitch_e)value; 934 return CCtxParams->postBlockSplitter; 935 936 case ZSTD_c_blockSplitterLevel: 937 BOUNDCHECK(ZSTD_c_blockSplitterLevel, value); 938 CCtxParams->preBlockSplitter_level = value; 939 return (size_t)CCtxParams->preBlockSplitter_level; 940 941 case ZSTD_c_useRowMatchFinder: 942 BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); 943 CCtxParams->useRowMatchFinder = (ZSTD_ParamSwitch_e)value; 944 return CCtxParams->useRowMatchFinder; 945 946 case ZSTD_c_deterministicRefPrefix: 947 BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value); 948 CCtxParams->deterministicRefPrefix = !!value; 949 return (size_t)CCtxParams->deterministicRefPrefix; 950 951 case ZSTD_c_prefetchCDictTables: 952 BOUNDCHECK(ZSTD_c_prefetchCDictTables, value); 953 CCtxParams->prefetchCDictTables = (ZSTD_ParamSwitch_e)value; 954 return CCtxParams->prefetchCDictTables; 955 956 case ZSTD_c_enableSeqProducerFallback: 957 BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value); 958 CCtxParams->enableMatchFinderFallback = value; 959 return (size_t)CCtxParams->enableMatchFinderFallback; 960 961 case ZSTD_c_maxBlockSize: 962 if (value!=0) /* 0 ==> default */ 963 BOUNDCHECK(ZSTD_c_maxBlockSize, value); 964 assert(value>=0); 965 CCtxParams->maxBlockSize = (size_t)value; 966 return CCtxParams->maxBlockSize; 967 968 case ZSTD_c_repcodeResolution: 969 BOUNDCHECK(ZSTD_c_repcodeResolution, value); 970 CCtxParams->searchForExternalRepcodes = (ZSTD_ParamSwitch_e)value; 971 return CCtxParams->searchForExternalRepcodes; 972 973 default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); 974 } 975 } 976 977 size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value) 978 { 979 return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); 980 } 981 982 size_t ZSTD_CCtxParams_getParameter( 983 ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value) 984 { 985 switch(param) 986 { 987 case ZSTD_c_format : 988 *value = (int)CCtxParams->format; 989 break; 990 case ZSTD_c_compressionLevel : 991 *value = CCtxParams->compressionLevel; 992 break; 993 case ZSTD_c_windowLog : 994 *value = (int)CCtxParams->cParams.windowLog; 995 break; 996 case ZSTD_c_hashLog : 997 *value = (int)CCtxParams->cParams.hashLog; 998 break; 999 case ZSTD_c_chainLog : 1000 *value = (int)CCtxParams->cParams.chainLog; 1001 break; 1002 case ZSTD_c_searchLog : 1003 *value = (int)CCtxParams->cParams.searchLog; 1004 break; 1005 case ZSTD_c_minMatch : 1006 *value = (int)CCtxParams->cParams.minMatch; 1007 break; 1008 case ZSTD_c_targetLength : 1009 *value = (int)CCtxParams->cParams.targetLength; 1010 break; 1011 case ZSTD_c_strategy : 1012 *value = (int)CCtxParams->cParams.strategy; 1013 break; 1014 case ZSTD_c_contentSizeFlag : 1015 *value = CCtxParams->fParams.contentSizeFlag; 1016 break; 1017 case ZSTD_c_checksumFlag : 1018 *value = CCtxParams->fParams.checksumFlag; 1019 break; 1020 case ZSTD_c_dictIDFlag : 1021 *value = !CCtxParams->fParams.noDictIDFlag; 1022 break; 1023 case ZSTD_c_forceMaxWindow : 1024 *value = CCtxParams->forceWindow; 1025 break; 1026 case ZSTD_c_forceAttachDict : 1027 *value = (int)CCtxParams->attachDictPref; 1028 break; 1029 case ZSTD_c_literalCompressionMode : 1030 *value = (int)CCtxParams->literalCompressionMode; 1031 break; 1032 case ZSTD_c_nbWorkers : 1033 assert(CCtxParams->nbWorkers == 0); 1034 *value = CCtxParams->nbWorkers; 1035 break; 1036 case ZSTD_c_jobSize : 1037 RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); 1038 case ZSTD_c_overlapLog : 1039 RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); 1040 case ZSTD_c_rsyncable : 1041 RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); 1042 case ZSTD_c_enableDedicatedDictSearch : 1043 *value = CCtxParams->enableDedicatedDictSearch; 1044 break; 1045 case ZSTD_c_enableLongDistanceMatching : 1046 *value = (int)CCtxParams->ldmParams.enableLdm; 1047 break; 1048 case ZSTD_c_ldmHashLog : 1049 *value = (int)CCtxParams->ldmParams.hashLog; 1050 break; 1051 case ZSTD_c_ldmMinMatch : 1052 *value = (int)CCtxParams->ldmParams.minMatchLength; 1053 break; 1054 case ZSTD_c_ldmBucketSizeLog : 1055 *value = (int)CCtxParams->ldmParams.bucketSizeLog; 1056 break; 1057 case ZSTD_c_ldmHashRateLog : 1058 *value = (int)CCtxParams->ldmParams.hashRateLog; 1059 break; 1060 case ZSTD_c_targetCBlockSize : 1061 *value = (int)CCtxParams->targetCBlockSize; 1062 break; 1063 case ZSTD_c_srcSizeHint : 1064 *value = (int)CCtxParams->srcSizeHint; 1065 break; 1066 case ZSTD_c_stableInBuffer : 1067 *value = (int)CCtxParams->inBufferMode; 1068 break; 1069 case ZSTD_c_stableOutBuffer : 1070 *value = (int)CCtxParams->outBufferMode; 1071 break; 1072 case ZSTD_c_blockDelimiters : 1073 *value = (int)CCtxParams->blockDelimiters; 1074 break; 1075 case ZSTD_c_validateSequences : 1076 *value = (int)CCtxParams->validateSequences; 1077 break; 1078 case ZSTD_c_splitAfterSequences : 1079 *value = (int)CCtxParams->postBlockSplitter; 1080 break; 1081 case ZSTD_c_blockSplitterLevel : 1082 *value = CCtxParams->preBlockSplitter_level; 1083 break; 1084 case ZSTD_c_useRowMatchFinder : 1085 *value = (int)CCtxParams->useRowMatchFinder; 1086 break; 1087 case ZSTD_c_deterministicRefPrefix: 1088 *value = (int)CCtxParams->deterministicRefPrefix; 1089 break; 1090 case ZSTD_c_prefetchCDictTables: 1091 *value = (int)CCtxParams->prefetchCDictTables; 1092 break; 1093 case ZSTD_c_enableSeqProducerFallback: 1094 *value = CCtxParams->enableMatchFinderFallback; 1095 break; 1096 case ZSTD_c_maxBlockSize: 1097 *value = (int)CCtxParams->maxBlockSize; 1098 break; 1099 case ZSTD_c_repcodeResolution: 1100 *value = (int)CCtxParams->searchForExternalRepcodes; 1101 break; 1102 default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); 1103 } 1104 return 0; 1105 } 1106 1107 /* ZSTD_CCtx_setParametersUsingCCtxParams() : 1108 * just applies `params` into `cctx` 1109 * no action is performed, parameters are merely stored. 1110 * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. 1111 * This is possible even if a compression is ongoing. 1112 * In which case, new parameters will be applied on the fly, starting with next compression job. 1113 */ 1114 size_t ZSTD_CCtx_setParametersUsingCCtxParams( 1115 ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) 1116 { 1117 DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams"); 1118 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, 1119 "The context is in the wrong stage!"); 1120 RETURN_ERROR_IF(cctx->cdict, stage_wrong, 1121 "Can't override parameters with cdict attached (some must " 1122 "be inherited from the cdict)."); 1123 1124 cctx->requestedParams = *params; 1125 return 0; 1126 } 1127 1128 size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams) 1129 { 1130 ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */); 1131 DEBUGLOG(4, "ZSTD_CCtx_setCParams"); 1132 /* only update if all parameters are valid */ 1133 FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), ""); 1134 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)cparams.windowLog), ""); 1135 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, (int)cparams.chainLog), ""); 1136 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, (int)cparams.hashLog), ""); 1137 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, (int)cparams.searchLog), ""); 1138 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, (int)cparams.minMatch), ""); 1139 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, (int)cparams.targetLength), ""); 1140 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, (int)cparams.strategy), ""); 1141 return 0; 1142 } 1143 1144 size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams) 1145 { 1146 ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */); 1147 DEBUGLOG(4, "ZSTD_CCtx_setFParams"); 1148 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), ""); 1149 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), ""); 1150 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), ""); 1151 return 0; 1152 } 1153 1154 size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params) 1155 { 1156 DEBUGLOG(4, "ZSTD_CCtx_setParams"); 1157 /* First check cParams, because we want to update all or none. */ 1158 FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); 1159 /* Next set fParams, because this could fail if the cctx isn't in init stage. */ 1160 FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), ""); 1161 /* Finally set cParams, which should succeed. */ 1162 FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), ""); 1163 return 0; 1164 } 1165 1166 size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) 1167 { 1168 DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize); 1169 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, 1170 "Can't set pledgedSrcSize when not in init stage."); 1171 cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; 1172 return 0; 1173 } 1174 1175 static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams( 1176 int const compressionLevel, 1177 size_t const dictSize); 1178 static int ZSTD_dedicatedDictSearch_isSupported( 1179 const ZSTD_compressionParameters* cParams); 1180 static void ZSTD_dedicatedDictSearch_revertCParams( 1181 ZSTD_compressionParameters* cParams); 1182 1183 /* 1184 * Initializes the local dictionary using requested parameters. 1185 * NOTE: Initialization does not employ the pledged src size, 1186 * because the dictionary may be used for multiple compressions. 1187 */ 1188 static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) 1189 { 1190 ZSTD_localDict* const dl = &cctx->localDict; 1191 if (dl->dict == NULL) { 1192 /* No local dictionary. */ 1193 assert(dl->dictBuffer == NULL); 1194 assert(dl->cdict == NULL); 1195 assert(dl->dictSize == 0); 1196 return 0; 1197 } 1198 if (dl->cdict != NULL) { 1199 /* Local dictionary already initialized. */ 1200 assert(cctx->cdict == dl->cdict); 1201 return 0; 1202 } 1203 assert(dl->dictSize > 0); 1204 assert(cctx->cdict == NULL); 1205 assert(cctx->prefixDict.dict == NULL); 1206 1207 dl->cdict = ZSTD_createCDict_advanced2( 1208 dl->dict, 1209 dl->dictSize, 1210 ZSTD_dlm_byRef, 1211 dl->dictContentType, 1212 &cctx->requestedParams, 1213 cctx->customMem); 1214 RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed"); 1215 cctx->cdict = dl->cdict; 1216 return 0; 1217 } 1218 1219 size_t ZSTD_CCtx_loadDictionary_advanced( 1220 ZSTD_CCtx* cctx, 1221 const void* dict, size_t dictSize, 1222 ZSTD_dictLoadMethod_e dictLoadMethod, 1223 ZSTD_dictContentType_e dictContentType) 1224 { 1225 DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); 1226 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, 1227 "Can't load a dictionary when cctx is not in init stage."); 1228 ZSTD_clearAllDicts(cctx); /* erase any previously set dictionary */ 1229 if (dict == NULL || dictSize == 0) /* no dictionary */ 1230 return 0; 1231 if (dictLoadMethod == ZSTD_dlm_byRef) { 1232 cctx->localDict.dict = dict; 1233 } else { 1234 /* copy dictionary content inside CCtx to own its lifetime */ 1235 void* dictBuffer; 1236 RETURN_ERROR_IF(cctx->staticSize, memory_allocation, 1237 "static CCtx can't allocate for an internal copy of dictionary"); 1238 dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); 1239 RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation, 1240 "allocation failed for dictionary content"); 1241 ZSTD_memcpy(dictBuffer, dict, dictSize); 1242 cctx->localDict.dictBuffer = dictBuffer; /* owned ptr to free */ 1243 cctx->localDict.dict = dictBuffer; /* read-only reference */ 1244 } 1245 cctx->localDict.dictSize = dictSize; 1246 cctx->localDict.dictContentType = dictContentType; 1247 return 0; 1248 } 1249 1250 size_t ZSTD_CCtx_loadDictionary_byReference( 1251 ZSTD_CCtx* cctx, const void* dict, size_t dictSize) 1252 { 1253 return ZSTD_CCtx_loadDictionary_advanced( 1254 cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); 1255 } 1256 1257 size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) 1258 { 1259 return ZSTD_CCtx_loadDictionary_advanced( 1260 cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); 1261 } 1262 1263 1264 size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) 1265 { 1266 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, 1267 "Can't ref a dict when ctx not in init stage."); 1268 /* Free the existing local cdict (if any) to save memory. */ 1269 ZSTD_clearAllDicts(cctx); 1270 cctx->cdict = cdict; 1271 return 0; 1272 } 1273 1274 size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool) 1275 { 1276 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, 1277 "Can't ref a pool when ctx not in init stage."); 1278 cctx->pool = pool; 1279 return 0; 1280 } 1281 1282 size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) 1283 { 1284 return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); 1285 } 1286 1287 size_t ZSTD_CCtx_refPrefix_advanced( 1288 ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) 1289 { 1290 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, 1291 "Can't ref a prefix when ctx not in init stage."); 1292 ZSTD_clearAllDicts(cctx); 1293 if (prefix != NULL && prefixSize > 0) { 1294 cctx->prefixDict.dict = prefix; 1295 cctx->prefixDict.dictSize = prefixSize; 1296 cctx->prefixDict.dictContentType = dictContentType; 1297 } 1298 return 0; 1299 } 1300 1301 /*! ZSTD_CCtx_reset() : 1302 * Also dumps dictionary */ 1303 size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) 1304 { 1305 if ( (reset == ZSTD_reset_session_only) 1306 || (reset == ZSTD_reset_session_and_parameters) ) { 1307 cctx->streamStage = zcss_init; 1308 cctx->pledgedSrcSizePlusOne = 0; 1309 } 1310 if ( (reset == ZSTD_reset_parameters) 1311 || (reset == ZSTD_reset_session_and_parameters) ) { 1312 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, 1313 "Reset parameters is only possible during init stage."); 1314 ZSTD_clearAllDicts(cctx); 1315 return ZSTD_CCtxParams_reset(&cctx->requestedParams); 1316 } 1317 return 0; 1318 } 1319 1320 1321 /* ZSTD_checkCParams() : 1322 control CParam values remain within authorized range. 1323 @return : 0, or an error code if one value is beyond authorized range */ 1324 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) 1325 { 1326 BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog); 1327 BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog); 1328 BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog); 1329 BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); 1330 BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); 1331 BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); 1332 BOUNDCHECK(ZSTD_c_strategy, (int)cParams.strategy); 1333 return 0; 1334 } 1335 1336 /* ZSTD_clampCParams() : 1337 * make CParam values within valid range. 1338 * @return : valid CParams */ 1339 static ZSTD_compressionParameters 1340 ZSTD_clampCParams(ZSTD_compressionParameters cParams) 1341 { 1342 # define CLAMP_TYPE(cParam, val, type) \ 1343 do { \ 1344 ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ 1345 if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \ 1346 else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \ 1347 } while (0) 1348 # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) 1349 CLAMP(ZSTD_c_windowLog, cParams.windowLog); 1350 CLAMP(ZSTD_c_chainLog, cParams.chainLog); 1351 CLAMP(ZSTD_c_hashLog, cParams.hashLog); 1352 CLAMP(ZSTD_c_searchLog, cParams.searchLog); 1353 CLAMP(ZSTD_c_minMatch, cParams.minMatch); 1354 CLAMP(ZSTD_c_targetLength,cParams.targetLength); 1355 CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy); 1356 return cParams; 1357 } 1358 1359 /* ZSTD_cycleLog() : 1360 * condition for correct operation : hashLog > 1 */ 1361 U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) 1362 { 1363 U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); 1364 return hashLog - btScale; 1365 } 1366 1367 /* ZSTD_dictAndWindowLog() : 1368 * Returns an adjusted window log that is large enough to fit the source and the dictionary. 1369 * The zstd format says that the entire dictionary is valid if one byte of the dictionary 1370 * is within the window. So the hashLog and chainLog should be large enough to reference both 1371 * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing 1372 * the hashLog and windowLog. 1373 * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN. 1374 */ 1375 static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize) 1376 { 1377 const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX; 1378 /* No dictionary ==> No change */ 1379 if (dictSize == 0) { 1380 return windowLog; 1381 } 1382 assert(windowLog <= ZSTD_WINDOWLOG_MAX); 1383 assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */ 1384 { 1385 U64 const windowSize = 1ULL << windowLog; 1386 U64 const dictAndWindowSize = dictSize + windowSize; 1387 /* If the window size is already large enough to fit both the source and the dictionary 1388 * then just use the window size. Otherwise adjust so that it fits the dictionary and 1389 * the window. 1390 */ 1391 if (windowSize >= dictSize + srcSize) { 1392 return windowLog; /* Window size large enough already */ 1393 } else if (dictAndWindowSize >= maxWindowSize) { 1394 return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */ 1395 } else { 1396 return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1; 1397 } 1398 } 1399 } 1400 1401 /* ZSTD_adjustCParams_internal() : 1402 * optimize `cPar` for a specified input (`srcSize` and `dictSize`). 1403 * mostly downsize to reduce memory consumption and initialization latency. 1404 * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. 1405 * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`. 1406 * note : `srcSize==0` means 0! 1407 * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ 1408 static ZSTD_compressionParameters 1409 ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, 1410 unsigned long long srcSize, 1411 size_t dictSize, 1412 ZSTD_CParamMode_e mode, 1413 ZSTD_ParamSwitch_e useRowMatchFinder) 1414 { 1415 const U64 minSrcSize = 513; /* (1<<9) + 1 */ 1416 const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); 1417 assert(ZSTD_checkCParams(cPar)==0); 1418 1419 /* Cascade the selected strategy down to the next-highest one built into 1420 * this binary. */ 1421 #ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR 1422 if (cPar.strategy == ZSTD_btultra2) { 1423 cPar.strategy = ZSTD_btultra; 1424 } 1425 if (cPar.strategy == ZSTD_btultra) { 1426 cPar.strategy = ZSTD_btopt; 1427 } 1428 #endif 1429 #ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR 1430 if (cPar.strategy == ZSTD_btopt) { 1431 cPar.strategy = ZSTD_btlazy2; 1432 } 1433 #endif 1434 #ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR 1435 if (cPar.strategy == ZSTD_btlazy2) { 1436 cPar.strategy = ZSTD_lazy2; 1437 } 1438 #endif 1439 #ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR 1440 if (cPar.strategy == ZSTD_lazy2) { 1441 cPar.strategy = ZSTD_lazy; 1442 } 1443 #endif 1444 #ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR 1445 if (cPar.strategy == ZSTD_lazy) { 1446 cPar.strategy = ZSTD_greedy; 1447 } 1448 #endif 1449 #ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR 1450 if (cPar.strategy == ZSTD_greedy) { 1451 cPar.strategy = ZSTD_dfast; 1452 } 1453 #endif 1454 #ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR 1455 if (cPar.strategy == ZSTD_dfast) { 1456 cPar.strategy = ZSTD_fast; 1457 cPar.targetLength = 0; 1458 } 1459 #endif 1460 1461 switch (mode) { 1462 case ZSTD_cpm_unknown: 1463 case ZSTD_cpm_noAttachDict: 1464 /* If we don't know the source size, don't make any 1465 * assumptions about it. We will already have selected 1466 * smaller parameters if a dictionary is in use. 1467 */ 1468 break; 1469 case ZSTD_cpm_createCDict: 1470 /* Assume a small source size when creating a dictionary 1471 * with an unknown source size. 1472 */ 1473 if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) 1474 srcSize = minSrcSize; 1475 break; 1476 case ZSTD_cpm_attachDict: 1477 /* Dictionary has its own dedicated parameters which have 1478 * already been selected. We are selecting parameters 1479 * for only the source. 1480 */ 1481 dictSize = 0; 1482 break; 1483 default: 1484 assert(0); 1485 break; 1486 } 1487 1488 /* resize windowLog if input is small enough, to use less memory */ 1489 if ( (srcSize <= maxWindowResize) 1490 && (dictSize <= maxWindowResize) ) { 1491 U32 const tSize = (U32)(srcSize + dictSize); 1492 static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; 1493 U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : 1494 ZSTD_highbit32(tSize-1) + 1; 1495 if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; 1496 } 1497 if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) { 1498 U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize); 1499 U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); 1500 if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1; 1501 if (cycleLog > dictAndWindowLog) 1502 cPar.chainLog -= (cycleLog - dictAndWindowLog); 1503 } 1504 1505 if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) 1506 cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ 1507 1508 /* We can't use more than 32 bits of hash in total, so that means that we require: 1509 * (hashLog + 8) <= 32 && (chainLog + 8) <= 32 1510 */ 1511 if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) { 1512 U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS; 1513 if (cPar.hashLog > maxShortCacheHashLog) { 1514 cPar.hashLog = maxShortCacheHashLog; 1515 } 1516 if (cPar.chainLog > maxShortCacheHashLog) { 1517 cPar.chainLog = maxShortCacheHashLog; 1518 } 1519 } 1520 1521 1522 /* At this point, we aren't 100% sure if we are using the row match finder. 1523 * Unless it is explicitly disabled, conservatively assume that it is enabled. 1524 * In this case it will only be disabled for small sources, so shrinking the 1525 * hash log a little bit shouldn't result in any ratio loss. 1526 */ 1527 if (useRowMatchFinder == ZSTD_ps_auto) 1528 useRowMatchFinder = ZSTD_ps_enable; 1529 1530 /* We can't hash more than 32-bits in total. So that means that we require: 1531 * (hashLog - rowLog + 8) <= 32 1532 */ 1533 if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) { 1534 /* Switch to 32-entry rows if searchLog is 5 (or more) */ 1535 U32 const rowLog = BOUNDED(4, cPar.searchLog, 6); 1536 U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS; 1537 U32 const maxHashLog = maxRowHashLog + rowLog; 1538 assert(cPar.hashLog >= rowLog); 1539 if (cPar.hashLog > maxHashLog) { 1540 cPar.hashLog = maxHashLog; 1541 } 1542 } 1543 1544 return cPar; 1545 } 1546 1547 ZSTD_compressionParameters 1548 ZSTD_adjustCParams(ZSTD_compressionParameters cPar, 1549 unsigned long long srcSize, 1550 size_t dictSize) 1551 { 1552 cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ 1553 if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; 1554 return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto); 1555 } 1556 1557 static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); 1558 static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); 1559 1560 static void ZSTD_overrideCParams( 1561 ZSTD_compressionParameters* cParams, 1562 const ZSTD_compressionParameters* overrides) 1563 { 1564 if (overrides->windowLog) cParams->windowLog = overrides->windowLog; 1565 if (overrides->hashLog) cParams->hashLog = overrides->hashLog; 1566 if (overrides->chainLog) cParams->chainLog = overrides->chainLog; 1567 if (overrides->searchLog) cParams->searchLog = overrides->searchLog; 1568 if (overrides->minMatch) cParams->minMatch = overrides->minMatch; 1569 if (overrides->targetLength) cParams->targetLength = overrides->targetLength; 1570 if (overrides->strategy) cParams->strategy = overrides->strategy; 1571 } 1572 1573 ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( 1574 const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) 1575 { 1576 ZSTD_compressionParameters cParams; 1577 if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { 1578 assert(CCtxParams->srcSizeHint>=0); 1579 srcSizeHint = (U64)CCtxParams->srcSizeHint; 1580 } 1581 cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); 1582 if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; 1583 ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); 1584 assert(!ZSTD_checkCParams(cParams)); 1585 /* srcSizeHint == 0 means 0 */ 1586 return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder); 1587 } 1588 1589 static size_t 1590 ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, 1591 const ZSTD_ParamSwitch_e useRowMatchFinder, 1592 const int enableDedicatedDictSearch, 1593 const U32 forCCtx) 1594 { 1595 /* chain table size should be 0 for fast or row-hash strategies */ 1596 size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx) 1597 ? ((size_t)1 << cParams->chainLog) 1598 : 0; 1599 size_t const hSize = ((size_t)1) << cParams->hashLog; 1600 U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; 1601 size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; 1602 /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't 1603 * surrounded by redzones in ASAN. */ 1604 size_t const tableSpace = chainSize * sizeof(U32) 1605 + hSize * sizeof(U32) 1606 + h3Size * sizeof(U32); 1607 size_t const optPotentialSpace = 1608 ZSTD_cwksp_aligned64_alloc_size((MaxML+1) * sizeof(U32)) 1609 + ZSTD_cwksp_aligned64_alloc_size((MaxLL+1) * sizeof(U32)) 1610 + ZSTD_cwksp_aligned64_alloc_size((MaxOff+1) * sizeof(U32)) 1611 + ZSTD_cwksp_aligned64_alloc_size((1<<Litbits) * sizeof(U32)) 1612 + ZSTD_cwksp_aligned64_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)) 1613 + ZSTD_cwksp_aligned64_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t)); 1614 size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) 1615 ? ZSTD_cwksp_aligned64_alloc_size(hSize) 1616 : 0; 1617 size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt)) 1618 ? optPotentialSpace 1619 : 0; 1620 size_t const slackSpace = ZSTD_cwksp_slack_space_required(); 1621 1622 /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */ 1623 ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4); 1624 assert(useRowMatchFinder != ZSTD_ps_auto); 1625 1626 DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", 1627 (U32)chainSize, (U32)hSize, (U32)h3Size); 1628 return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; 1629 } 1630 1631 /* Helper function for calculating memory requirements. 1632 * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ 1633 static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) { 1634 U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4; 1635 return blockSize / divider; 1636 } 1637 1638 static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( 1639 const ZSTD_compressionParameters* cParams, 1640 const ldmParams_t* ldmParams, 1641 const int isStatic, 1642 const ZSTD_ParamSwitch_e useRowMatchFinder, 1643 const size_t buffInSize, 1644 const size_t buffOutSize, 1645 const U64 pledgedSrcSize, 1646 int useSequenceProducer, 1647 size_t maxBlockSize) 1648 { 1649 size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); 1650 size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize); 1651 size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); 1652 size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) 1653 + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * sizeof(SeqDef)) 1654 + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); 1655 size_t const tmpWorkSpace = ZSTD_cwksp_alloc_size(TMP_WORKSPACE_SIZE); 1656 size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); 1657 size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1); 1658 1659 size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); 1660 size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); 1661 size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? 1662 ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; 1663 1664 1665 size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) 1666 + ZSTD_cwksp_alloc_size(buffOutSize); 1667 1668 size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; 1669 1670 size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); 1671 size_t const externalSeqSpace = useSequenceProducer 1672 ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) 1673 : 0; 1674 1675 size_t const neededSpace = 1676 cctxSpace + 1677 tmpWorkSpace + 1678 blockStateSpace + 1679 ldmSpace + 1680 ldmSeqSpace + 1681 matchStateSize + 1682 tokenSpace + 1683 bufferSpace + 1684 externalSeqSpace; 1685 1686 DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); 1687 return neededSpace; 1688 } 1689 1690 size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) 1691 { 1692 ZSTD_compressionParameters const cParams = 1693 ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); 1694 ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, 1695 &cParams); 1696 1697 RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); 1698 /* estimateCCtxSize is for one-shot compression. So no buffers should 1699 * be needed. However, we still allocate two 0-sized buffers, which can 1700 * take space under ASAN. */ 1701 return ZSTD_estimateCCtxSize_usingCCtxParams_internal( 1702 &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize); 1703 } 1704 1705 size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) 1706 { 1707 ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); 1708 if (ZSTD_rowMatchFinderSupported(cParams.strategy)) { 1709 /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ 1710 size_t noRowCCtxSize; 1711 size_t rowCCtxSize; 1712 initialParams.useRowMatchFinder = ZSTD_ps_disable; 1713 noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); 1714 initialParams.useRowMatchFinder = ZSTD_ps_enable; 1715 rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); 1716 return MAX(noRowCCtxSize, rowCCtxSize); 1717 } else { 1718 return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); 1719 } 1720 } 1721 1722 static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) 1723 { 1724 int tier = 0; 1725 size_t largestSize = 0; 1726 static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN}; 1727 for (; tier < 4; ++tier) { 1728 /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */ 1729 ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict); 1730 largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize); 1731 } 1732 return largestSize; 1733 } 1734 1735 size_t ZSTD_estimateCCtxSize(int compressionLevel) 1736 { 1737 int level; 1738 size_t memBudget = 0; 1739 for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { 1740 /* Ensure monotonically increasing memory usage as compression level increases */ 1741 size_t const newMB = ZSTD_estimateCCtxSize_internal(level); 1742 if (newMB > memBudget) memBudget = newMB; 1743 } 1744 return memBudget; 1745 } 1746 1747 size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) 1748 { 1749 RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); 1750 { ZSTD_compressionParameters const cParams = 1751 ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); 1752 size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog); 1753 size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) 1754 ? ((size_t)1 << cParams.windowLog) + blockSize 1755 : 0; 1756 size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) 1757 ? ZSTD_compressBound(blockSize) + 1 1758 : 0; 1759 ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); 1760 1761 return ZSTD_estimateCCtxSize_usingCCtxParams_internal( 1762 &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, 1763 ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize); 1764 } 1765 } 1766 1767 size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) 1768 { 1769 ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); 1770 if (ZSTD_rowMatchFinderSupported(cParams.strategy)) { 1771 /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ 1772 size_t noRowCCtxSize; 1773 size_t rowCCtxSize; 1774 initialParams.useRowMatchFinder = ZSTD_ps_disable; 1775 noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); 1776 initialParams.useRowMatchFinder = ZSTD_ps_enable; 1777 rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); 1778 return MAX(noRowCCtxSize, rowCCtxSize); 1779 } else { 1780 return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); 1781 } 1782 } 1783 1784 static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) 1785 { 1786 ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); 1787 return ZSTD_estimateCStreamSize_usingCParams(cParams); 1788 } 1789 1790 size_t ZSTD_estimateCStreamSize(int compressionLevel) 1791 { 1792 int level; 1793 size_t memBudget = 0; 1794 for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { 1795 size_t const newMB = ZSTD_estimateCStreamSize_internal(level); 1796 if (newMB > memBudget) memBudget = newMB; 1797 } 1798 return memBudget; 1799 } 1800 1801 /* ZSTD_getFrameProgression(): 1802 * tells how much data has been consumed (input) and produced (output) for current frame. 1803 * able to count progression inside worker threads (non-blocking mode). 1804 */ 1805 ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx) 1806 { 1807 { ZSTD_frameProgression fp; 1808 size_t const buffered = (cctx->inBuff == NULL) ? 0 : 1809 cctx->inBuffPos - cctx->inToCompress; 1810 if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress); 1811 assert(buffered <= ZSTD_BLOCKSIZE_MAX); 1812 fp.ingested = cctx->consumedSrcSize + buffered; 1813 fp.consumed = cctx->consumedSrcSize; 1814 fp.produced = cctx->producedCSize; 1815 fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */ 1816 fp.currentJobID = 0; 1817 fp.nbActiveWorkers = 0; 1818 return fp; 1819 } } 1820 1821 /*! ZSTD_toFlushNow() 1822 * Only useful for multithreading scenarios currently (nbWorkers >= 1). 1823 */ 1824 size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx) 1825 { 1826 (void)cctx; 1827 return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */ 1828 } 1829 1830 static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, 1831 ZSTD_compressionParameters cParams2) 1832 { 1833 (void)cParams1; 1834 (void)cParams2; 1835 assert(cParams1.windowLog == cParams2.windowLog); 1836 assert(cParams1.chainLog == cParams2.chainLog); 1837 assert(cParams1.hashLog == cParams2.hashLog); 1838 assert(cParams1.searchLog == cParams2.searchLog); 1839 assert(cParams1.minMatch == cParams2.minMatch); 1840 assert(cParams1.targetLength == cParams2.targetLength); 1841 assert(cParams1.strategy == cParams2.strategy); 1842 } 1843 1844 void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) 1845 { 1846 int i; 1847 for (i = 0; i < ZSTD_REP_NUM; ++i) 1848 bs->rep[i] = repStartValue[i]; 1849 bs->entropy.huf.repeatMode = HUF_repeat_none; 1850 bs->entropy.fse.offcode_repeatMode = FSE_repeat_none; 1851 bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none; 1852 bs->entropy.fse.litlength_repeatMode = FSE_repeat_none; 1853 } 1854 1855 /*! ZSTD_invalidateMatchState() 1856 * Invalidate all the matches in the match finder tables. 1857 * Requires nextSrc and base to be set (can be NULL). 1858 */ 1859 static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms) 1860 { 1861 ZSTD_window_clear(&ms->window); 1862 1863 ms->nextToUpdate = ms->window.dictLimit; 1864 ms->loadedDictEnd = 0; 1865 ms->opt.litLengthSum = 0; /* force reset of btopt stats */ 1866 ms->dictMatchState = NULL; 1867 } 1868 1869 /* 1870 * Controls, for this matchState reset, whether the tables need to be cleared / 1871 * prepared for the coming compression (ZSTDcrp_makeClean), or whether the 1872 * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a 1873 * subsequent operation will overwrite the table space anyways (e.g., copying 1874 * the matchState contents in from a CDict). 1875 */ 1876 typedef enum { 1877 ZSTDcrp_makeClean, 1878 ZSTDcrp_leaveDirty 1879 } ZSTD_compResetPolicy_e; 1880 1881 /* 1882 * Controls, for this matchState reset, whether indexing can continue where it 1883 * left off (ZSTDirp_continue), or whether it needs to be restarted from zero 1884 * (ZSTDirp_reset). 1885 */ 1886 typedef enum { 1887 ZSTDirp_continue, 1888 ZSTDirp_reset 1889 } ZSTD_indexResetPolicy_e; 1890 1891 typedef enum { 1892 ZSTD_resetTarget_CDict, 1893 ZSTD_resetTarget_CCtx 1894 } ZSTD_resetTarget_e; 1895 1896 /* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ 1897 static U64 ZSTD_bitmix(U64 val, U64 len) { 1898 val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24); 1899 val *= 0x9FB21C651E98DF25ULL; 1900 val ^= (val >> 35) + len ; 1901 val *= 0x9FB21C651E98DF25ULL; 1902 return val ^ (val >> 28); 1903 } 1904 1905 /* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ 1906 static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) { 1907 ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4); 1908 } 1909 1910 static size_t 1911 ZSTD_reset_matchState(ZSTD_MatchState_t* ms, 1912 ZSTD_cwksp* ws, 1913 const ZSTD_compressionParameters* cParams, 1914 const ZSTD_ParamSwitch_e useRowMatchFinder, 1915 const ZSTD_compResetPolicy_e crp, 1916 const ZSTD_indexResetPolicy_e forceResetIndex, 1917 const ZSTD_resetTarget_e forWho) 1918 { 1919 /* disable chain table allocation for fast or row-based strategies */ 1920 size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, 1921 ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict)) 1922 ? ((size_t)1 << cParams->chainLog) 1923 : 0; 1924 size_t const hSize = ((size_t)1) << cParams->hashLog; 1925 U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; 1926 size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; 1927 1928 DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); 1929 assert(useRowMatchFinder != ZSTD_ps_auto); 1930 if (forceResetIndex == ZSTDirp_reset) { 1931 ZSTD_window_init(&ms->window); 1932 ZSTD_cwksp_mark_tables_dirty(ws); 1933 } 1934 1935 ms->hashLog3 = hashLog3; 1936 ms->lazySkipping = 0; 1937 1938 ZSTD_invalidateMatchState(ms); 1939 1940 assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */ 1941 1942 ZSTD_cwksp_clear_tables(ws); 1943 1944 DEBUGLOG(5, "reserving table space"); 1945 /* table Space */ 1946 ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32)); 1947 ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32)); 1948 ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32)); 1949 RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, 1950 "failed a workspace allocation in ZSTD_reset_matchState"); 1951 1952 DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty); 1953 if (crp!=ZSTDcrp_leaveDirty) { 1954 /* reset tables only */ 1955 ZSTD_cwksp_clean_tables(ws); 1956 } 1957 1958 if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) { 1959 /* Row match finder needs an additional table of hashes ("tags") */ 1960 size_t const tagTableSize = hSize; 1961 /* We want to generate a new salt in case we reset a Cctx, but we always want to use 1962 * 0 when we reset a Cdict */ 1963 if(forWho == ZSTD_resetTarget_CCtx) { 1964 ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); 1965 ZSTD_advanceHashSalt(ms); 1966 } else { 1967 /* When we are not salting we want to always memset the memory */ 1968 ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned64(ws, tagTableSize); 1969 ZSTD_memset(ms->tagTable, 0, tagTableSize); 1970 ms->hashSalt = 0; 1971 } 1972 { /* Switch to 32-entry rows if searchLog is 5 (or more) */ 1973 U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); 1974 assert(cParams->hashLog >= rowLog); 1975 ms->rowHashLog = cParams->hashLog - rowLog; 1976 } 1977 } 1978 1979 /* opt parser space */ 1980 if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { 1981 DEBUGLOG(4, "reserving optimal parser space"); 1982 ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (1<<Litbits) * sizeof(unsigned)); 1983 ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxLL+1) * sizeof(unsigned)); 1984 ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxML+1) * sizeof(unsigned)); 1985 ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxOff+1) * sizeof(unsigned)); 1986 ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)); 1987 ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t)); 1988 } 1989 1990 ms->cParams = *cParams; 1991 1992 RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, 1993 "failed a workspace allocation in ZSTD_reset_matchState"); 1994 return 0; 1995 } 1996 1997 /* ZSTD_indexTooCloseToMax() : 1998 * minor optimization : prefer memset() rather than reduceIndex() 1999 * which is measurably slow in some circumstances (reported for Visual Studio). 2000 * Works when re-using a context for a lot of smallish inputs : 2001 * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN, 2002 * memset() will be triggered before reduceIndex(). 2003 */ 2004 #define ZSTD_INDEXOVERFLOW_MARGIN (16 MB) 2005 static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) 2006 { 2007 return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN); 2008 } 2009 2010 /* ZSTD_dictTooBig(): 2011 * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in 2012 * one go generically. So we ensure that in that case we reset the tables to zero, 2013 * so that we can load as much of the dictionary as possible. 2014 */ 2015 static int ZSTD_dictTooBig(size_t const loadedDictSize) 2016 { 2017 return loadedDictSize > ZSTD_CHUNKSIZE_MAX; 2018 } 2019 2020 /*! ZSTD_resetCCtx_internal() : 2021 * @param loadedDictSize The size of the dictionary to be loaded 2022 * into the context, if any. If no dictionary is used, or the 2023 * dictionary is being attached / copied, then pass 0. 2024 * note : `params` are assumed fully validated at this stage. 2025 */ 2026 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, 2027 ZSTD_CCtx_params const* params, 2028 U64 const pledgedSrcSize, 2029 size_t const loadedDictSize, 2030 ZSTD_compResetPolicy_e const crp, 2031 ZSTD_buffered_policy_e const zbuff) 2032 { 2033 ZSTD_cwksp* const ws = &zc->workspace; 2034 DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", 2035 (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->postBlockSplitter); 2036 assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); 2037 2038 zc->isFirstBlock = 1; 2039 2040 /* Set applied params early so we can modify them for LDM, 2041 * and point params at the applied params. 2042 */ 2043 zc->appliedParams = *params; 2044 params = &zc->appliedParams; 2045 2046 assert(params->useRowMatchFinder != ZSTD_ps_auto); 2047 assert(params->postBlockSplitter != ZSTD_ps_auto); 2048 assert(params->ldmParams.enableLdm != ZSTD_ps_auto); 2049 assert(params->maxBlockSize != 0); 2050 if (params->ldmParams.enableLdm == ZSTD_ps_enable) { 2051 /* Adjust long distance matching parameters */ 2052 ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); 2053 assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog); 2054 assert(params->ldmParams.hashRateLog < 32); 2055 } 2056 2057 { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); 2058 size_t const blockSize = MIN(params->maxBlockSize, windowSize); 2059 size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params)); 2060 size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered) 2061 ? ZSTD_compressBound(blockSize) + 1 2062 : 0; 2063 size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered) 2064 ? windowSize + blockSize 2065 : 0; 2066 size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize); 2067 2068 int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); 2069 int const dictTooBig = ZSTD_dictTooBig(loadedDictSize); 2070 ZSTD_indexResetPolicy_e needsIndexReset = 2071 (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue; 2072 2073 size_t const neededSpace = 2074 ZSTD_estimateCCtxSize_usingCCtxParams_internal( 2075 ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, 2076 buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize); 2077 2078 FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); 2079 2080 if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0); 2081 2082 { /* Check if workspace is large enough, alloc a new one if needed */ 2083 int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; 2084 int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); 2085 int resizeWorkspace = workspaceTooSmall || workspaceWasteful; 2086 DEBUGLOG(4, "Need %zu B workspace", neededSpace); 2087 DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); 2088 2089 if (resizeWorkspace) { 2090 DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB", 2091 ZSTD_cwksp_sizeof(ws) >> 10, 2092 neededSpace >> 10); 2093 2094 RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize"); 2095 2096 needsIndexReset = ZSTDirp_reset; 2097 2098 ZSTD_cwksp_free(ws, zc->customMem); 2099 FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), ""); 2100 2101 DEBUGLOG(5, "reserving object space"); 2102 /* Statically sized space. 2103 * tmpWorkspace never moves, 2104 * though prev/next block swap places */ 2105 assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); 2106 zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); 2107 RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); 2108 zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); 2109 RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); 2110 zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, TMP_WORKSPACE_SIZE); 2111 RETURN_ERROR_IF(zc->tmpWorkspace == NULL, memory_allocation, "couldn't allocate tmpWorkspace"); 2112 zc->tmpWkspSize = TMP_WORKSPACE_SIZE; 2113 } } 2114 2115 ZSTD_cwksp_clear(ws); 2116 2117 /* init params */ 2118 zc->blockState.matchState.cParams = params->cParams; 2119 zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable; 2120 zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; 2121 zc->consumedSrcSize = 0; 2122 zc->producedCSize = 0; 2123 if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) 2124 zc->appliedParams.fParams.contentSizeFlag = 0; 2125 DEBUGLOG(4, "pledged content size : %u ; flag : %u", 2126 (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); 2127 zc->blockSizeMax = blockSize; 2128 2129 xxh64_reset(&zc->xxhState, 0); 2130 zc->stage = ZSTDcs_init; 2131 zc->dictID = 0; 2132 zc->dictContentSize = 0; 2133 2134 ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); 2135 2136 FORWARD_IF_ERROR(ZSTD_reset_matchState( 2137 &zc->blockState.matchState, 2138 ws, 2139 ¶ms->cParams, 2140 params->useRowMatchFinder, 2141 crp, 2142 needsIndexReset, 2143 ZSTD_resetTarget_CCtx), ""); 2144 2145 zc->seqStore.sequencesStart = (SeqDef*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * sizeof(SeqDef)); 2146 2147 /* ldm hash table */ 2148 if (params->ldmParams.enableLdm == ZSTD_ps_enable) { 2149 /* TODO: avoid memset? */ 2150 size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; 2151 zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * sizeof(ldmEntry_t)); 2152 ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); 2153 zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * sizeof(rawSeq)); 2154 zc->maxNbLdmSequences = maxNbLdmSeq; 2155 2156 ZSTD_window_init(&zc->ldmState.window); 2157 zc->ldmState.loadedDictEnd = 0; 2158 } 2159 2160 /* reserve space for block-level external sequences */ 2161 if (ZSTD_hasExtSeqProd(params)) { 2162 size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); 2163 zc->extSeqBufCapacity = maxNbExternalSeq; 2164 zc->extSeqBuf = 2165 (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); 2166 } 2167 2168 /* buffers */ 2169 2170 /* ZSTD_wildcopy() is used to copy into the literals buffer, 2171 * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. 2172 */ 2173 zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); 2174 zc->seqStore.maxNbLit = blockSize; 2175 2176 zc->bufferedPolicy = zbuff; 2177 zc->inBuffSize = buffInSize; 2178 zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); 2179 zc->outBuffSize = buffOutSize; 2180 zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); 2181 2182 /* ldm bucketOffsets table */ 2183 if (params->ldmParams.enableLdm == ZSTD_ps_enable) { 2184 /* TODO: avoid memset? */ 2185 size_t const numBuckets = 2186 ((size_t)1) << (params->ldmParams.hashLog - 2187 params->ldmParams.bucketSizeLog); 2188 zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); 2189 ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets); 2190 } 2191 2192 /* sequences storage */ 2193 ZSTD_referenceExternalSequences(zc, NULL, 0); 2194 zc->seqStore.maxNbSeq = maxNbSeq; 2195 zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); 2196 zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); 2197 zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); 2198 2199 DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); 2200 assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace)); 2201 2202 zc->initialized = 1; 2203 2204 return 0; 2205 } 2206 } 2207 2208 /* ZSTD_invalidateRepCodes() : 2209 * ensures next compression will not use repcodes from previous block. 2210 * Note : only works with regular variant; 2211 * do not use with extDict variant ! */ 2212 void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { 2213 int i; 2214 for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0; 2215 assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); 2216 } 2217 2218 /* These are the approximate sizes for each strategy past which copying the 2219 * dictionary tables into the working context is faster than using them 2220 * in-place. 2221 */ 2222 static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = { 2223 8 KB, /* unused */ 2224 8 KB, /* ZSTD_fast */ 2225 16 KB, /* ZSTD_dfast */ 2226 32 KB, /* ZSTD_greedy */ 2227 32 KB, /* ZSTD_lazy */ 2228 32 KB, /* ZSTD_lazy2 */ 2229 32 KB, /* ZSTD_btlazy2 */ 2230 32 KB, /* ZSTD_btopt */ 2231 8 KB, /* ZSTD_btultra */ 2232 8 KB /* ZSTD_btultra2 */ 2233 }; 2234 2235 static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict, 2236 const ZSTD_CCtx_params* params, 2237 U64 pledgedSrcSize) 2238 { 2239 size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy]; 2240 int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; 2241 return dedicatedDictSearch 2242 || ( ( pledgedSrcSize <= cutoff 2243 || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN 2244 || params->attachDictPref == ZSTD_dictForceAttach ) 2245 && params->attachDictPref != ZSTD_dictForceCopy 2246 && !params->forceWindow ); /* dictMatchState isn't correctly 2247 * handled in _enforceMaxDist */ 2248 } 2249 2250 static size_t 2251 ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, 2252 const ZSTD_CDict* cdict, 2253 ZSTD_CCtx_params params, 2254 U64 pledgedSrcSize, 2255 ZSTD_buffered_policy_e zbuff) 2256 { 2257 DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu", 2258 (unsigned long long)pledgedSrcSize); 2259 { 2260 ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; 2261 unsigned const windowLog = params.cParams.windowLog; 2262 assert(windowLog != 0); 2263 /* Resize working context table params for input only, since the dict 2264 * has its own tables. */ 2265 /* pledgedSrcSize == 0 means 0! */ 2266 2267 if (cdict->matchState.dedicatedDictSearch) { 2268 ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); 2269 } 2270 2271 params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, 2272 cdict->dictContentSize, ZSTD_cpm_attachDict, 2273 params.useRowMatchFinder); 2274 params.cParams.windowLog = windowLog; 2275 params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */ 2276 FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, 2277 /* loadedDictSize */ 0, 2278 ZSTDcrp_makeClean, zbuff), ""); 2279 assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy); 2280 } 2281 2282 { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc 2283 - cdict->matchState.window.base); 2284 const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit; 2285 if (cdictLen == 0) { 2286 /* don't even attach dictionaries with no contents */ 2287 DEBUGLOG(4, "skipping attaching empty dictionary"); 2288 } else { 2289 DEBUGLOG(4, "attaching dictionary into context"); 2290 cctx->blockState.matchState.dictMatchState = &cdict->matchState; 2291 2292 /* prep working match state so dict matches never have negative indices 2293 * when they are translated to the working context's index space. */ 2294 if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { 2295 cctx->blockState.matchState.window.nextSrc = 2296 cctx->blockState.matchState.window.base + cdictEnd; 2297 ZSTD_window_clear(&cctx->blockState.matchState.window); 2298 } 2299 /* loadedDictEnd is expressed within the referential of the active context */ 2300 cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; 2301 } } 2302 2303 cctx->dictID = cdict->dictID; 2304 cctx->dictContentSize = cdict->dictContentSize; 2305 2306 /* copy block state */ 2307 ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); 2308 2309 return 0; 2310 } 2311 2312 static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize, 2313 ZSTD_compressionParameters const* cParams) { 2314 if (ZSTD_CDictIndicesAreTagged(cParams)){ 2315 /* Remove tags from the CDict table if they are present. 2316 * See docs on "short cache" in zstd_compress_internal.h for context. */ 2317 size_t i; 2318 for (i = 0; i < tableSize; i++) { 2319 U32 const taggedIndex = src[i]; 2320 U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS; 2321 dst[i] = index; 2322 } 2323 } else { 2324 ZSTD_memcpy(dst, src, tableSize * sizeof(U32)); 2325 } 2326 } 2327 2328 static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, 2329 const ZSTD_CDict* cdict, 2330 ZSTD_CCtx_params params, 2331 U64 pledgedSrcSize, 2332 ZSTD_buffered_policy_e zbuff) 2333 { 2334 const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; 2335 2336 assert(!cdict->matchState.dedicatedDictSearch); 2337 DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu", 2338 (unsigned long long)pledgedSrcSize); 2339 2340 { unsigned const windowLog = params.cParams.windowLog; 2341 assert(windowLog != 0); 2342 /* Copy only compression parameters related to tables. */ 2343 params.cParams = *cdict_cParams; 2344 params.cParams.windowLog = windowLog; 2345 params.useRowMatchFinder = cdict->useRowMatchFinder; 2346 FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, 2347 /* loadedDictSize */ 0, 2348 ZSTDcrp_leaveDirty, zbuff), ""); 2349 assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); 2350 assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); 2351 assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); 2352 } 2353 2354 ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); 2355 assert(params.useRowMatchFinder != ZSTD_ps_auto); 2356 2357 /* copy tables */ 2358 { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */) 2359 ? ((size_t)1 << cdict_cParams->chainLog) 2360 : 0; 2361 size_t const hSize = (size_t)1 << cdict_cParams->hashLog; 2362 2363 ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable, 2364 cdict->matchState.hashTable, 2365 hSize, cdict_cParams); 2366 2367 /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */ 2368 if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) { 2369 ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable, 2370 cdict->matchState.chainTable, 2371 chainSize, cdict_cParams); 2372 } 2373 /* copy tag table */ 2374 if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) { 2375 size_t const tagTableSize = hSize; 2376 ZSTD_memcpy(cctx->blockState.matchState.tagTable, 2377 cdict->matchState.tagTable, 2378 tagTableSize); 2379 cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; 2380 } 2381 } 2382 2383 /* Zero the hashTable3, since the cdict never fills it */ 2384 assert(cctx->blockState.matchState.hashLog3 <= 31); 2385 { U32 const h3log = cctx->blockState.matchState.hashLog3; 2386 size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; 2387 assert(cdict->matchState.hashLog3 == 0); 2388 ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); 2389 } 2390 2391 ZSTD_cwksp_mark_tables_clean(&cctx->workspace); 2392 2393 /* copy dictionary offsets */ 2394 { ZSTD_MatchState_t const* srcMatchState = &cdict->matchState; 2395 ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState; 2396 dstMatchState->window = srcMatchState->window; 2397 dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; 2398 dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; 2399 } 2400 2401 cctx->dictID = cdict->dictID; 2402 cctx->dictContentSize = cdict->dictContentSize; 2403 2404 /* copy block state */ 2405 ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); 2406 2407 return 0; 2408 } 2409 2410 /* We have a choice between copying the dictionary context into the working 2411 * context, or referencing the dictionary context from the working context 2412 * in-place. We decide here which strategy to use. */ 2413 static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, 2414 const ZSTD_CDict* cdict, 2415 const ZSTD_CCtx_params* params, 2416 U64 pledgedSrcSize, 2417 ZSTD_buffered_policy_e zbuff) 2418 { 2419 2420 DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", 2421 (unsigned)pledgedSrcSize); 2422 2423 if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { 2424 return ZSTD_resetCCtx_byAttachingCDict( 2425 cctx, cdict, *params, pledgedSrcSize, zbuff); 2426 } else { 2427 return ZSTD_resetCCtx_byCopyingCDict( 2428 cctx, cdict, *params, pledgedSrcSize, zbuff); 2429 } 2430 } 2431 2432 /*! ZSTD_copyCCtx_internal() : 2433 * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. 2434 * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). 2435 * The "context", in this case, refers to the hash and chain tables, 2436 * entropy tables, and dictionary references. 2437 * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. 2438 * @return : 0, or an error code */ 2439 static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, 2440 const ZSTD_CCtx* srcCCtx, 2441 ZSTD_frameParameters fParams, 2442 U64 pledgedSrcSize, 2443 ZSTD_buffered_policy_e zbuff) 2444 { 2445 RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong, 2446 "Can't copy a ctx that's not in init stage."); 2447 DEBUGLOG(5, "ZSTD_copyCCtx_internal"); 2448 ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); 2449 { ZSTD_CCtx_params params = dstCCtx->requestedParams; 2450 /* Copy only compression parameters related to tables. */ 2451 params.cParams = srcCCtx->appliedParams.cParams; 2452 assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); 2453 assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_ps_auto); 2454 assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto); 2455 params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; 2456 params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; 2457 params.ldmParams = srcCCtx->appliedParams.ldmParams; 2458 params.fParams = fParams; 2459 params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; 2460 ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, 2461 /* loadedDictSize */ 0, 2462 ZSTDcrp_leaveDirty, zbuff); 2463 assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); 2464 assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); 2465 assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); 2466 assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); 2467 assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); 2468 } 2469 2470 ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); 2471 2472 /* copy tables */ 2473 { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy, 2474 srcCCtx->appliedParams.useRowMatchFinder, 2475 0 /* forDDSDict */) 2476 ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog) 2477 : 0; 2478 size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; 2479 U32 const h3log = srcCCtx->blockState.matchState.hashLog3; 2480 size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; 2481 2482 ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable, 2483 srcCCtx->blockState.matchState.hashTable, 2484 hSize * sizeof(U32)); 2485 ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable, 2486 srcCCtx->blockState.matchState.chainTable, 2487 chainSize * sizeof(U32)); 2488 ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3, 2489 srcCCtx->blockState.matchState.hashTable3, 2490 h3Size * sizeof(U32)); 2491 } 2492 2493 ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); 2494 2495 /* copy dictionary offsets */ 2496 { 2497 const ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState; 2498 ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState; 2499 dstMatchState->window = srcMatchState->window; 2500 dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; 2501 dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; 2502 } 2503 dstCCtx->dictID = srcCCtx->dictID; 2504 dstCCtx->dictContentSize = srcCCtx->dictContentSize; 2505 2506 /* copy block state */ 2507 ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); 2508 2509 return 0; 2510 } 2511 2512 /*! ZSTD_copyCCtx() : 2513 * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. 2514 * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). 2515 * pledgedSrcSize==0 means "unknown". 2516 * @return : 0, or an error code */ 2517 size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) 2518 { 2519 ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; 2520 ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy; 2521 ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); 2522 if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; 2523 fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); 2524 2525 return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, 2526 fParams, pledgedSrcSize, 2527 zbuff); 2528 } 2529 2530 2531 #define ZSTD_ROWSIZE 16 2532 /*! ZSTD_reduceTable() : 2533 * reduce table indexes by `reducerValue`, or squash to zero. 2534 * PreserveMark preserves "unsorted mark" for btlazy2 strategy. 2535 * It must be set to a clear 0/1 value, to remove branch during inlining. 2536 * Presume table size is a multiple of ZSTD_ROWSIZE 2537 * to help auto-vectorization */ 2538 FORCE_INLINE_TEMPLATE void 2539 ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark) 2540 { 2541 int const nbRows = (int)size / ZSTD_ROWSIZE; 2542 int cellNb = 0; 2543 int rowNb; 2544 /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ 2545 U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX; 2546 assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ 2547 assert(size < (1U<<31)); /* can be cast to int */ 2548 2549 2550 for (rowNb=0 ; rowNb < nbRows ; rowNb++) { 2551 int column; 2552 for (column=0; column<ZSTD_ROWSIZE; column++) { 2553 U32 newVal; 2554 if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) { 2555 /* This write is pointless, but is required(?) for the compiler 2556 * to auto-vectorize the loop. */ 2557 newVal = ZSTD_DUBT_UNSORTED_MARK; 2558 } else if (table[cellNb] < reducerThreshold) { 2559 newVal = 0; 2560 } else { 2561 newVal = table[cellNb] - reducerValue; 2562 } 2563 table[cellNb] = newVal; 2564 cellNb++; 2565 } } 2566 } 2567 2568 static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue) 2569 { 2570 ZSTD_reduceTable_internal(table, size, reducerValue, 0); 2571 } 2572 2573 static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue) 2574 { 2575 ZSTD_reduceTable_internal(table, size, reducerValue, 1); 2576 } 2577 2578 /*! ZSTD_reduceIndex() : 2579 * rescale all indexes to avoid future overflow (indexes are U32) */ 2580 static void ZSTD_reduceIndex (ZSTD_MatchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue) 2581 { 2582 { U32 const hSize = (U32)1 << params->cParams.hashLog; 2583 ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); 2584 } 2585 2586 if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) { 2587 U32 const chainSize = (U32)1 << params->cParams.chainLog; 2588 if (params->cParams.strategy == ZSTD_btlazy2) 2589 ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); 2590 else 2591 ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); 2592 } 2593 2594 if (ms->hashLog3) { 2595 U32 const h3Size = (U32)1 << ms->hashLog3; 2596 ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); 2597 } 2598 } 2599 2600 2601 /*-******************************************************* 2602 * Block entropic compression 2603 *********************************************************/ 2604 2605 /* See doc/zstd_compression_format.md for detailed format description */ 2606 2607 int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr) 2608 { 2609 const SeqDef* const sequences = seqStorePtr->sequencesStart; 2610 BYTE* const llCodeTable = seqStorePtr->llCode; 2611 BYTE* const ofCodeTable = seqStorePtr->ofCode; 2612 BYTE* const mlCodeTable = seqStorePtr->mlCode; 2613 U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); 2614 U32 u; 2615 int longOffsets = 0; 2616 assert(nbSeq <= seqStorePtr->maxNbSeq); 2617 for (u=0; u<nbSeq; u++) { 2618 U32 const llv = sequences[u].litLength; 2619 U32 const ofCode = ZSTD_highbit32(sequences[u].offBase); 2620 U32 const mlv = sequences[u].mlBase; 2621 llCodeTable[u] = (BYTE)ZSTD_LLcode(llv); 2622 ofCodeTable[u] = (BYTE)ofCode; 2623 mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv); 2624 assert(!(MEM_64bits() && ofCode >= STREAM_ACCUMULATOR_MIN)); 2625 if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN) 2626 longOffsets = 1; 2627 } 2628 if (seqStorePtr->longLengthType==ZSTD_llt_literalLength) 2629 llCodeTable[seqStorePtr->longLengthPos] = MaxLL; 2630 if (seqStorePtr->longLengthType==ZSTD_llt_matchLength) 2631 mlCodeTable[seqStorePtr->longLengthPos] = MaxML; 2632 return longOffsets; 2633 } 2634 2635 /* ZSTD_useTargetCBlockSize(): 2636 * Returns if target compressed block size param is being used. 2637 * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. 2638 * Returns 1 if true, 0 otherwise. */ 2639 static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) 2640 { 2641 DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize); 2642 return (cctxParams->targetCBlockSize != 0); 2643 } 2644 2645 /* ZSTD_blockSplitterEnabled(): 2646 * Returns if block splitting param is being used 2647 * If used, compression will do best effort to split a block in order to improve compression ratio. 2648 * At the time this function is called, the parameter must be finalized. 2649 * Returns 1 if true, 0 otherwise. */ 2650 static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams) 2651 { 2652 DEBUGLOG(5, "ZSTD_blockSplitterEnabled (postBlockSplitter=%d)", cctxParams->postBlockSplitter); 2653 assert(cctxParams->postBlockSplitter != ZSTD_ps_auto); 2654 return (cctxParams->postBlockSplitter == ZSTD_ps_enable); 2655 } 2656 2657 /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types 2658 * and size of the sequences statistics 2659 */ 2660 typedef struct { 2661 U32 LLtype; 2662 U32 Offtype; 2663 U32 MLtype; 2664 size_t size; 2665 size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ 2666 int longOffsets; 2667 } ZSTD_symbolEncodingTypeStats_t; 2668 2669 /* ZSTD_buildSequencesStatistics(): 2670 * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field. 2671 * Modifies `nextEntropy` to have the appropriate values as a side effect. 2672 * nbSeq must be greater than 0. 2673 * 2674 * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) 2675 */ 2676 static ZSTD_symbolEncodingTypeStats_t 2677 ZSTD_buildSequencesStatistics( 2678 const SeqStore_t* seqStorePtr, size_t nbSeq, 2679 const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, 2680 BYTE* dst, const BYTE* const dstEnd, 2681 ZSTD_strategy strategy, unsigned* countWorkspace, 2682 void* entropyWorkspace, size_t entropyWkspSize) 2683 { 2684 BYTE* const ostart = dst; 2685 const BYTE* const oend = dstEnd; 2686 BYTE* op = ostart; 2687 FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; 2688 FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; 2689 FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; 2690 const BYTE* const ofCodeTable = seqStorePtr->ofCode; 2691 const BYTE* const llCodeTable = seqStorePtr->llCode; 2692 const BYTE* const mlCodeTable = seqStorePtr->mlCode; 2693 ZSTD_symbolEncodingTypeStats_t stats; 2694 2695 stats.lastCountSize = 0; 2696 /* convert length/distances into codes */ 2697 stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); 2698 assert(op <= oend); 2699 assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */ 2700 /* build CTable for Literal Lengths */ 2701 { unsigned max = MaxLL; 2702 size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ 2703 DEBUGLOG(5, "Building LL table"); 2704 nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; 2705 stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, 2706 countWorkspace, max, mostFrequent, nbSeq, 2707 LLFSELog, prevEntropy->litlengthCTable, 2708 LL_defaultNorm, LL_defaultNormLog, 2709 ZSTD_defaultAllowed, strategy); 2710 assert(set_basic < set_compressed && set_rle < set_compressed); 2711 assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ 2712 { size_t const countSize = ZSTD_buildCTable( 2713 op, (size_t)(oend - op), 2714 CTable_LitLength, LLFSELog, (SymbolEncodingType_e)stats.LLtype, 2715 countWorkspace, max, llCodeTable, nbSeq, 2716 LL_defaultNorm, LL_defaultNormLog, MaxLL, 2717 prevEntropy->litlengthCTable, 2718 sizeof(prevEntropy->litlengthCTable), 2719 entropyWorkspace, entropyWkspSize); 2720 if (ZSTD_isError(countSize)) { 2721 DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed"); 2722 stats.size = countSize; 2723 return stats; 2724 } 2725 if (stats.LLtype == set_compressed) 2726 stats.lastCountSize = countSize; 2727 op += countSize; 2728 assert(op <= oend); 2729 } } 2730 /* build CTable for Offsets */ 2731 { unsigned max = MaxOff; 2732 size_t const mostFrequent = HIST_countFast_wksp( 2733 countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ 2734 /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ 2735 ZSTD_DefaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; 2736 DEBUGLOG(5, "Building OF table"); 2737 nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; 2738 stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, 2739 countWorkspace, max, mostFrequent, nbSeq, 2740 OffFSELog, prevEntropy->offcodeCTable, 2741 OF_defaultNorm, OF_defaultNormLog, 2742 defaultPolicy, strategy); 2743 assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ 2744 { size_t const countSize = ZSTD_buildCTable( 2745 op, (size_t)(oend - op), 2746 CTable_OffsetBits, OffFSELog, (SymbolEncodingType_e)stats.Offtype, 2747 countWorkspace, max, ofCodeTable, nbSeq, 2748 OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, 2749 prevEntropy->offcodeCTable, 2750 sizeof(prevEntropy->offcodeCTable), 2751 entropyWorkspace, entropyWkspSize); 2752 if (ZSTD_isError(countSize)) { 2753 DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed"); 2754 stats.size = countSize; 2755 return stats; 2756 } 2757 if (stats.Offtype == set_compressed) 2758 stats.lastCountSize = countSize; 2759 op += countSize; 2760 assert(op <= oend); 2761 } } 2762 /* build CTable for MatchLengths */ 2763 { unsigned max = MaxML; 2764 size_t const mostFrequent = HIST_countFast_wksp( 2765 countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ 2766 DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); 2767 nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; 2768 stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, 2769 countWorkspace, max, mostFrequent, nbSeq, 2770 MLFSELog, prevEntropy->matchlengthCTable, 2771 ML_defaultNorm, ML_defaultNormLog, 2772 ZSTD_defaultAllowed, strategy); 2773 assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ 2774 { size_t const countSize = ZSTD_buildCTable( 2775 op, (size_t)(oend - op), 2776 CTable_MatchLength, MLFSELog, (SymbolEncodingType_e)stats.MLtype, 2777 countWorkspace, max, mlCodeTable, nbSeq, 2778 ML_defaultNorm, ML_defaultNormLog, MaxML, 2779 prevEntropy->matchlengthCTable, 2780 sizeof(prevEntropy->matchlengthCTable), 2781 entropyWorkspace, entropyWkspSize); 2782 if (ZSTD_isError(countSize)) { 2783 DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed"); 2784 stats.size = countSize; 2785 return stats; 2786 } 2787 if (stats.MLtype == set_compressed) 2788 stats.lastCountSize = countSize; 2789 op += countSize; 2790 assert(op <= oend); 2791 } } 2792 stats.size = (size_t)(op-ostart); 2793 return stats; 2794 } 2795 2796 /* ZSTD_entropyCompressSeqStore_internal(): 2797 * compresses both literals and sequences 2798 * Returns compressed size of block, or a zstd error. 2799 */ 2800 #define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 2801 MEM_STATIC size_t 2802 ZSTD_entropyCompressSeqStore_internal( 2803 void* dst, size_t dstCapacity, 2804 const void* literals, size_t litSize, 2805 const SeqStore_t* seqStorePtr, 2806 const ZSTD_entropyCTables_t* prevEntropy, 2807 ZSTD_entropyCTables_t* nextEntropy, 2808 const ZSTD_CCtx_params* cctxParams, 2809 void* entropyWorkspace, size_t entropyWkspSize, 2810 const int bmi2) 2811 { 2812 ZSTD_strategy const strategy = cctxParams->cParams.strategy; 2813 unsigned* count = (unsigned*)entropyWorkspace; 2814 FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; 2815 FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; 2816 FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; 2817 const SeqDef* const sequences = seqStorePtr->sequencesStart; 2818 const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); 2819 const BYTE* const ofCodeTable = seqStorePtr->ofCode; 2820 const BYTE* const llCodeTable = seqStorePtr->llCode; 2821 const BYTE* const mlCodeTable = seqStorePtr->mlCode; 2822 BYTE* const ostart = (BYTE*)dst; 2823 BYTE* const oend = ostart + dstCapacity; 2824 BYTE* op = ostart; 2825 size_t lastCountSize; 2826 int longOffsets = 0; 2827 2828 entropyWorkspace = count + (MaxSeq + 1); 2829 entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); 2830 2831 DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity); 2832 ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog))); 2833 assert(entropyWkspSize >= HUF_WORKSPACE_SIZE); 2834 2835 /* Compress literals */ 2836 { size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); 2837 /* Base suspicion of uncompressibility on ratio of literals to sequences */ 2838 int const suspectUncompressible = (numSequences == 0) || (litSize / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); 2839 2840 size_t const cSize = ZSTD_compressLiterals( 2841 op, dstCapacity, 2842 literals, litSize, 2843 entropyWorkspace, entropyWkspSize, 2844 &prevEntropy->huf, &nextEntropy->huf, 2845 cctxParams->cParams.strategy, 2846 ZSTD_literalsCompressionIsDisabled(cctxParams), 2847 suspectUncompressible, bmi2); 2848 FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); 2849 assert(cSize <= dstCapacity); 2850 op += cSize; 2851 } 2852 2853 /* Sequences Header */ 2854 RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, 2855 dstSize_tooSmall, "Can't fit seq hdr in output buf!"); 2856 if (nbSeq < 128) { 2857 *op++ = (BYTE)nbSeq; 2858 } else if (nbSeq < LONGNBSEQ) { 2859 op[0] = (BYTE)((nbSeq>>8) + 0x80); 2860 op[1] = (BYTE)nbSeq; 2861 op+=2; 2862 } else { 2863 op[0]=0xFF; 2864 MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)); 2865 op+=3; 2866 } 2867 assert(op <= oend); 2868 if (nbSeq==0) { 2869 /* Copy the old tables over as if we repeated them */ 2870 ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); 2871 return (size_t)(op - ostart); 2872 } 2873 { BYTE* const seqHead = op++; 2874 /* build stats for sequences */ 2875 const ZSTD_symbolEncodingTypeStats_t stats = 2876 ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, 2877 &prevEntropy->fse, &nextEntropy->fse, 2878 op, oend, 2879 strategy, count, 2880 entropyWorkspace, entropyWkspSize); 2881 FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); 2882 *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2)); 2883 lastCountSize = stats.lastCountSize; 2884 op += stats.size; 2885 longOffsets = stats.longOffsets; 2886 } 2887 2888 { size_t const bitstreamSize = ZSTD_encodeSequences( 2889 op, (size_t)(oend - op), 2890 CTable_MatchLength, mlCodeTable, 2891 CTable_OffsetBits, ofCodeTable, 2892 CTable_LitLength, llCodeTable, 2893 sequences, nbSeq, 2894 longOffsets, bmi2); 2895 FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); 2896 op += bitstreamSize; 2897 assert(op <= oend); 2898 /* zstd versions <= 1.3.4 mistakenly report corruption when 2899 * FSE_readNCount() receives a buffer < 4 bytes. 2900 * Fixed by https://github.com/facebook/zstd/pull/1146. 2901 * This can happen when the last set_compressed table present is 2 2902 * bytes and the bitstream is only one byte. 2903 * In this exceedingly rare case, we will simply emit an uncompressed 2904 * block, since it isn't worth optimizing. 2905 */ 2906 if (lastCountSize && (lastCountSize + bitstreamSize) < 4) { 2907 /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ 2908 assert(lastCountSize + bitstreamSize == 3); 2909 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " 2910 "emitting an uncompressed block."); 2911 return 0; 2912 } 2913 } 2914 2915 DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart)); 2916 return (size_t)(op - ostart); 2917 } 2918 2919 static size_t 2920 ZSTD_entropyCompressSeqStore_wExtLitBuffer( 2921 void* dst, size_t dstCapacity, 2922 const void* literals, size_t litSize, 2923 size_t blockSize, 2924 const SeqStore_t* seqStorePtr, 2925 const ZSTD_entropyCTables_t* prevEntropy, 2926 ZSTD_entropyCTables_t* nextEntropy, 2927 const ZSTD_CCtx_params* cctxParams, 2928 void* entropyWorkspace, size_t entropyWkspSize, 2929 int bmi2) 2930 { 2931 size_t const cSize = ZSTD_entropyCompressSeqStore_internal( 2932 dst, dstCapacity, 2933 literals, litSize, 2934 seqStorePtr, prevEntropy, nextEntropy, cctxParams, 2935 entropyWorkspace, entropyWkspSize, bmi2); 2936 if (cSize == 0) return 0; 2937 /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. 2938 * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. 2939 */ 2940 if ((cSize == ERROR(dstSize_tooSmall)) & (blockSize <= dstCapacity)) { 2941 DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity); 2942 return 0; /* block not compressed */ 2943 } 2944 FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed"); 2945 2946 /* Check compressibility */ 2947 { size_t const maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy); 2948 if (cSize >= maxCSize) return 0; /* block not compressed */ 2949 } 2950 DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); 2951 /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly. 2952 * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above. 2953 */ 2954 assert(cSize < ZSTD_BLOCKSIZE_MAX); 2955 return cSize; 2956 } 2957 2958 static size_t 2959 ZSTD_entropyCompressSeqStore( 2960 const SeqStore_t* seqStorePtr, 2961 const ZSTD_entropyCTables_t* prevEntropy, 2962 ZSTD_entropyCTables_t* nextEntropy, 2963 const ZSTD_CCtx_params* cctxParams, 2964 void* dst, size_t dstCapacity, 2965 size_t srcSize, 2966 void* entropyWorkspace, size_t entropyWkspSize, 2967 int bmi2) 2968 { 2969 return ZSTD_entropyCompressSeqStore_wExtLitBuffer( 2970 dst, dstCapacity, 2971 seqStorePtr->litStart, (size_t)(seqStorePtr->lit - seqStorePtr->litStart), 2972 srcSize, 2973 seqStorePtr, 2974 prevEntropy, nextEntropy, 2975 cctxParams, 2976 entropyWorkspace, entropyWkspSize, 2977 bmi2); 2978 } 2979 2980 /* ZSTD_selectBlockCompressor() : 2981 * Not static, but internal use only (used by long distance matcher) 2982 * assumption : strat is a valid strategy */ 2983 ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) 2984 { 2985 static const ZSTD_BlockCompressor_f blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { 2986 { ZSTD_compressBlock_fast /* default for 0 */, 2987 ZSTD_compressBlock_fast, 2988 ZSTD_COMPRESSBLOCK_DOUBLEFAST, 2989 ZSTD_COMPRESSBLOCK_GREEDY, 2990 ZSTD_COMPRESSBLOCK_LAZY, 2991 ZSTD_COMPRESSBLOCK_LAZY2, 2992 ZSTD_COMPRESSBLOCK_BTLAZY2, 2993 ZSTD_COMPRESSBLOCK_BTOPT, 2994 ZSTD_COMPRESSBLOCK_BTULTRA, 2995 ZSTD_COMPRESSBLOCK_BTULTRA2 2996 }, 2997 { ZSTD_compressBlock_fast_extDict /* default for 0 */, 2998 ZSTD_compressBlock_fast_extDict, 2999 ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT, 3000 ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT, 3001 ZSTD_COMPRESSBLOCK_LAZY_EXTDICT, 3002 ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT, 3003 ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT, 3004 ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT, 3005 ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT, 3006 ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT 3007 }, 3008 { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, 3009 ZSTD_compressBlock_fast_dictMatchState, 3010 ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE, 3011 ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE, 3012 ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE, 3013 ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE, 3014 ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE, 3015 ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE, 3016 ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE, 3017 ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE 3018 }, 3019 { NULL /* default for 0 */, 3020 NULL, 3021 NULL, 3022 ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH, 3023 ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH, 3024 ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH, 3025 NULL, 3026 NULL, 3027 NULL, 3028 NULL } 3029 }; 3030 ZSTD_BlockCompressor_f selectedCompressor; 3031 ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); 3032 3033 assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); 3034 DEBUGLOG(5, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); 3035 if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) { 3036 static const ZSTD_BlockCompressor_f rowBasedBlockCompressors[4][3] = { 3037 { 3038 ZSTD_COMPRESSBLOCK_GREEDY_ROW, 3039 ZSTD_COMPRESSBLOCK_LAZY_ROW, 3040 ZSTD_COMPRESSBLOCK_LAZY2_ROW 3041 }, 3042 { 3043 ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW, 3044 ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW, 3045 ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW 3046 }, 3047 { 3048 ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW, 3049 ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW, 3050 ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW 3051 }, 3052 { 3053 ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW, 3054 ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW, 3055 ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW 3056 } 3057 }; 3058 DEBUGLOG(5, "Selecting a row-based matchfinder"); 3059 assert(useRowMatchFinder != ZSTD_ps_auto); 3060 selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy]; 3061 } else { 3062 selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; 3063 } 3064 assert(selectedCompressor != NULL); 3065 return selectedCompressor; 3066 } 3067 3068 static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr, 3069 const BYTE* anchor, size_t lastLLSize) 3070 { 3071 ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize); 3072 seqStorePtr->lit += lastLLSize; 3073 } 3074 3075 void ZSTD_resetSeqStore(SeqStore_t* ssPtr) 3076 { 3077 ssPtr->lit = ssPtr->litStart; 3078 ssPtr->sequences = ssPtr->sequencesStart; 3079 ssPtr->longLengthType = ZSTD_llt_none; 3080 } 3081 3082 /* ZSTD_postProcessSequenceProducerResult() : 3083 * Validates and post-processes sequences obtained through the external matchfinder API: 3084 * - Checks whether nbExternalSeqs represents an error condition. 3085 * - Appends a block delimiter to outSeqs if one is not already present. 3086 * See zstd.h for context regarding block delimiters. 3087 * Returns the number of sequences after post-processing, or an error code. */ 3088 static size_t ZSTD_postProcessSequenceProducerResult( 3089 ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize 3090 ) { 3091 RETURN_ERROR_IF( 3092 nbExternalSeqs > outSeqsCapacity, 3093 sequenceProducer_failed, 3094 "External sequence producer returned error code %lu", 3095 (unsigned long)nbExternalSeqs 3096 ); 3097 3098 RETURN_ERROR_IF( 3099 nbExternalSeqs == 0 && srcSize > 0, 3100 sequenceProducer_failed, 3101 "Got zero sequences from external sequence producer for a non-empty src buffer!" 3102 ); 3103 3104 if (srcSize == 0) { 3105 ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence)); 3106 return 1; 3107 } 3108 3109 { 3110 ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1]; 3111 3112 /* We can return early if lastSeq is already a block delimiter. */ 3113 if (lastSeq.offset == 0 && lastSeq.matchLength == 0) { 3114 return nbExternalSeqs; 3115 } 3116 3117 /* This error condition is only possible if the external matchfinder 3118 * produced an invalid parse, by definition of ZSTD_sequenceBound(). */ 3119 RETURN_ERROR_IF( 3120 nbExternalSeqs == outSeqsCapacity, 3121 sequenceProducer_failed, 3122 "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!" 3123 ); 3124 3125 /* lastSeq is not a block delimiter, so we need to append one. */ 3126 ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence)); 3127 return nbExternalSeqs + 1; 3128 } 3129 } 3130 3131 /* ZSTD_fastSequenceLengthSum() : 3132 * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. 3133 * Similar to another function in zstd_compress.c (determine_blockSize), 3134 * except it doesn't check for a block delimiter to end summation. 3135 * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). 3136 * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ 3137 static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) { 3138 size_t matchLenSum, litLenSum, i; 3139 matchLenSum = 0; 3140 litLenSum = 0; 3141 for (i = 0; i < seqBufSize; i++) { 3142 litLenSum += seqBuf[i].litLength; 3143 matchLenSum += seqBuf[i].matchLength; 3144 } 3145 return litLenSum + matchLenSum; 3146 } 3147 3148 /* 3149 * Function to validate sequences produced by a block compressor. 3150 */ 3151 static void ZSTD_validateSeqStore(const SeqStore_t* seqStore, const ZSTD_compressionParameters* cParams) 3152 { 3153 #if DEBUGLEVEL >= 1 3154 const SeqDef* seq = seqStore->sequencesStart; 3155 const SeqDef* const seqEnd = seqStore->sequences; 3156 size_t const matchLenLowerBound = cParams->minMatch == 3 ? 3 : 4; 3157 for (; seq < seqEnd; ++seq) { 3158 const ZSTD_SequenceLength seqLength = ZSTD_getSequenceLength(seqStore, seq); 3159 assert(seqLength.matchLength >= matchLenLowerBound); 3160 (void)seqLength; 3161 (void)matchLenLowerBound; 3162 } 3163 #else 3164 (void)seqStore; 3165 (void)cParams; 3166 #endif 3167 } 3168 3169 static size_t 3170 ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, 3171 ZSTD_SequencePosition* seqPos, 3172 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, 3173 const void* src, size_t blockSize, 3174 ZSTD_ParamSwitch_e externalRepSearch); 3175 3176 typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_BuildSeqStore_e; 3177 3178 static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) 3179 { 3180 ZSTD_MatchState_t* const ms = &zc->blockState.matchState; 3181 DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); 3182 assert(srcSize <= ZSTD_BLOCKSIZE_MAX); 3183 /* Assert that we have correctly flushed the ctx params into the ms's copy */ 3184 ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); 3185 /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding 3186 * additional 1. We need to revisit and change this logic to be more consistent */ 3187 if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { 3188 if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) { 3189 ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); 3190 } else { 3191 ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); 3192 } 3193 return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */ 3194 } 3195 ZSTD_resetSeqStore(&(zc->seqStore)); 3196 /* required for optimal parser to read stats from dictionary */ 3197 ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; 3198 /* tell the optimal parser how we expect to compress literals */ 3199 ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; 3200 /* a gap between an attached dict and the current window is not safe, 3201 * they must remain adjacent, 3202 * and when that stops being the case, the dict must be unset */ 3203 assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit); 3204 3205 /* limited update after a very long match */ 3206 { const BYTE* const base = ms->window.base; 3207 const BYTE* const istart = (const BYTE*)src; 3208 const U32 curr = (U32)(istart-base); 3209 if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */ 3210 if (curr > ms->nextToUpdate + 384) 3211 ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384)); 3212 } 3213 3214 /* select and store sequences */ 3215 { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms); 3216 size_t lastLLSize; 3217 { int i; 3218 for (i = 0; i < ZSTD_REP_NUM; ++i) 3219 zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; 3220 } 3221 if (zc->externSeqStore.pos < zc->externSeqStore.size) { 3222 assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); 3223 3224 /* External matchfinder + LDM is technically possible, just not implemented yet. 3225 * We need to revisit soon and implement it. */ 3226 RETURN_ERROR_IF( 3227 ZSTD_hasExtSeqProd(&zc->appliedParams), 3228 parameter_combination_unsupported, 3229 "Long-distance matching with external sequence producer enabled is not currently supported." 3230 ); 3231 3232 /* Updates ldmSeqStore.pos */ 3233 lastLLSize = 3234 ZSTD_ldm_blockCompress(&zc->externSeqStore, 3235 ms, &zc->seqStore, 3236 zc->blockState.nextCBlock->rep, 3237 zc->appliedParams.useRowMatchFinder, 3238 src, srcSize); 3239 assert(zc->externSeqStore.pos <= zc->externSeqStore.size); 3240 } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { 3241 RawSeqStore_t ldmSeqStore = kNullRawSeqStore; 3242 3243 /* External matchfinder + LDM is technically possible, just not implemented yet. 3244 * We need to revisit soon and implement it. */ 3245 RETURN_ERROR_IF( 3246 ZSTD_hasExtSeqProd(&zc->appliedParams), 3247 parameter_combination_unsupported, 3248 "Long-distance matching with external sequence producer enabled is not currently supported." 3249 ); 3250 3251 ldmSeqStore.seq = zc->ldmSequences; 3252 ldmSeqStore.capacity = zc->maxNbLdmSequences; 3253 /* Updates ldmSeqStore.size */ 3254 FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, 3255 &zc->appliedParams.ldmParams, 3256 src, srcSize), ""); 3257 /* Updates ldmSeqStore.pos */ 3258 lastLLSize = 3259 ZSTD_ldm_blockCompress(&ldmSeqStore, 3260 ms, &zc->seqStore, 3261 zc->blockState.nextCBlock->rep, 3262 zc->appliedParams.useRowMatchFinder, 3263 src, srcSize); 3264 assert(ldmSeqStore.pos == ldmSeqStore.size); 3265 } else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) { 3266 assert( 3267 zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize) 3268 ); 3269 assert(zc->appliedParams.extSeqProdFunc != NULL); 3270 3271 { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog; 3272 3273 size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)( 3274 zc->appliedParams.extSeqProdState, 3275 zc->extSeqBuf, 3276 zc->extSeqBufCapacity, 3277 src, srcSize, 3278 NULL, 0, /* dict and dictSize, currently not supported */ 3279 zc->appliedParams.compressionLevel, 3280 windowSize 3281 ); 3282 3283 size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( 3284 zc->extSeqBuf, 3285 nbExternalSeqs, 3286 zc->extSeqBufCapacity, 3287 srcSize 3288 ); 3289 3290 /* Return early if there is no error, since we don't need to worry about last literals */ 3291 if (!ZSTD_isError(nbPostProcessedSeqs)) { 3292 ZSTD_SequencePosition seqPos = {0,0,0}; 3293 size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs); 3294 RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!"); 3295 FORWARD_IF_ERROR( 3296 ZSTD_transferSequences_wBlockDelim( 3297 zc, &seqPos, 3298 zc->extSeqBuf, nbPostProcessedSeqs, 3299 src, srcSize, 3300 zc->appliedParams.searchForExternalRepcodes 3301 ), 3302 "Failed to copy external sequences to seqStore!" 3303 ); 3304 ms->ldmSeqStore = NULL; 3305 DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs); 3306 return ZSTDbss_compress; 3307 } 3308 3309 /* Propagate the error if fallback is disabled */ 3310 if (!zc->appliedParams.enableMatchFinderFallback) { 3311 return nbPostProcessedSeqs; 3312 } 3313 3314 /* Fallback to software matchfinder */ 3315 { ZSTD_BlockCompressor_f const blockCompressor = 3316 ZSTD_selectBlockCompressor( 3317 zc->appliedParams.cParams.strategy, 3318 zc->appliedParams.useRowMatchFinder, 3319 dictMode); 3320 ms->ldmSeqStore = NULL; 3321 DEBUGLOG( 3322 5, 3323 "External sequence producer returned error code %lu. Falling back to internal parser.", 3324 (unsigned long)nbExternalSeqs 3325 ); 3326 lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); 3327 } } 3328 } else { /* not long range mode and no external matchfinder */ 3329 ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor( 3330 zc->appliedParams.cParams.strategy, 3331 zc->appliedParams.useRowMatchFinder, 3332 dictMode); 3333 ms->ldmSeqStore = NULL; 3334 lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); 3335 } 3336 { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; 3337 ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); 3338 } } 3339 ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams); 3340 return ZSTDbss_compress; 3341 } 3342 3343 static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const SeqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM]) 3344 { 3345 const SeqDef* inSeqs = seqStore->sequencesStart; 3346 const size_t nbInSequences = (size_t)(seqStore->sequences - inSeqs); 3347 const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart); 3348 3349 ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex; 3350 const size_t nbOutSequences = nbInSequences + 1; 3351 size_t nbOutLiterals = 0; 3352 Repcodes_t repcodes; 3353 size_t i; 3354 3355 /* Bounds check that we have enough space for every input sequence 3356 * and the block delimiter 3357 */ 3358 assert(seqCollector->seqIndex <= seqCollector->maxSequences); 3359 RETURN_ERROR_IF( 3360 nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex), 3361 dstSize_tooSmall, 3362 "Not enough space to copy sequences"); 3363 3364 ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes)); 3365 for (i = 0; i < nbInSequences; ++i) { 3366 U32 rawOffset; 3367 outSeqs[i].litLength = inSeqs[i].litLength; 3368 outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH; 3369 outSeqs[i].rep = 0; 3370 3371 /* Handle the possible single length >= 64K 3372 * There can only be one because we add MINMATCH to every match length, 3373 * and blocks are at most 128K. 3374 */ 3375 if (i == seqStore->longLengthPos) { 3376 if (seqStore->longLengthType == ZSTD_llt_literalLength) { 3377 outSeqs[i].litLength += 0x10000; 3378 } else if (seqStore->longLengthType == ZSTD_llt_matchLength) { 3379 outSeqs[i].matchLength += 0x10000; 3380 } 3381 } 3382 3383 /* Determine the raw offset given the offBase, which may be a repcode. */ 3384 if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) { 3385 const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase); 3386 assert(repcode > 0); 3387 outSeqs[i].rep = repcode; 3388 if (outSeqs[i].litLength != 0) { 3389 rawOffset = repcodes.rep[repcode - 1]; 3390 } else { 3391 if (repcode == 3) { 3392 assert(repcodes.rep[0] > 1); 3393 rawOffset = repcodes.rep[0] - 1; 3394 } else { 3395 rawOffset = repcodes.rep[repcode]; 3396 } 3397 } 3398 } else { 3399 rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase); 3400 } 3401 outSeqs[i].offset = rawOffset; 3402 3403 /* Update repcode history for the sequence */ 3404 ZSTD_updateRep(repcodes.rep, 3405 inSeqs[i].offBase, 3406 inSeqs[i].litLength == 0); 3407 3408 nbOutLiterals += outSeqs[i].litLength; 3409 } 3410 /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0. 3411 * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker 3412 * for the block boundary, according to the API. 3413 */ 3414 assert(nbInLiterals >= nbOutLiterals); 3415 { 3416 const size_t lastLLSize = nbInLiterals - nbOutLiterals; 3417 outSeqs[nbInSequences].litLength = (U32)lastLLSize; 3418 outSeqs[nbInSequences].matchLength = 0; 3419 outSeqs[nbInSequences].offset = 0; 3420 assert(nbOutSequences == nbInSequences + 1); 3421 } 3422 seqCollector->seqIndex += nbOutSequences; 3423 assert(seqCollector->seqIndex <= seqCollector->maxSequences); 3424 3425 return 0; 3426 } 3427 3428 size_t ZSTD_sequenceBound(size_t srcSize) { 3429 const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1; 3430 const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1; 3431 return maxNbSeq + maxNbDelims; 3432 } 3433 3434 size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, 3435 size_t outSeqsSize, const void* src, size_t srcSize) 3436 { 3437 const size_t dstCapacity = ZSTD_compressBound(srcSize); 3438 void* dst; /* Make C90 happy. */ 3439 SeqCollector seqCollector; 3440 { 3441 int targetCBlockSize; 3442 FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), ""); 3443 RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0"); 3444 } 3445 { 3446 int nbWorkers; 3447 FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), ""); 3448 RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0"); 3449 } 3450 3451 dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); 3452 RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); 3453 3454 seqCollector.collectSequences = 1; 3455 seqCollector.seqStart = outSeqs; 3456 seqCollector.seqIndex = 0; 3457 seqCollector.maxSequences = outSeqsSize; 3458 zc->seqCollector = seqCollector; 3459 3460 { 3461 const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); 3462 ZSTD_customFree(dst, ZSTD_defaultCMem); 3463 FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed"); 3464 } 3465 assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize)); 3466 return zc->seqCollector.seqIndex; 3467 } 3468 3469 size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) { 3470 size_t in = 0; 3471 size_t out = 0; 3472 for (; in < seqsSize; ++in) { 3473 if (sequences[in].offset == 0 && sequences[in].matchLength == 0) { 3474 if (in != seqsSize - 1) { 3475 sequences[in+1].litLength += sequences[in].litLength; 3476 } 3477 } else { 3478 sequences[out] = sequences[in]; 3479 ++out; 3480 } 3481 } 3482 return out; 3483 } 3484 3485 /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */ 3486 static int ZSTD_isRLE(const BYTE* src, size_t length) { 3487 const BYTE* ip = src; 3488 const BYTE value = ip[0]; 3489 const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL); 3490 const size_t unrollSize = sizeof(size_t) * 4; 3491 const size_t unrollMask = unrollSize - 1; 3492 const size_t prefixLength = length & unrollMask; 3493 size_t i; 3494 if (length == 1) return 1; 3495 /* Check if prefix is RLE first before using unrolled loop */ 3496 if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) { 3497 return 0; 3498 } 3499 for (i = prefixLength; i != length; i += unrollSize) { 3500 size_t u; 3501 for (u = 0; u < unrollSize; u += sizeof(size_t)) { 3502 if (MEM_readST(ip + i + u) != valueST) { 3503 return 0; 3504 } } } 3505 return 1; 3506 } 3507 3508 /* Returns true if the given block may be RLE. 3509 * This is just a heuristic based on the compressibility. 3510 * It may return both false positives and false negatives. 3511 */ 3512 static int ZSTD_maybeRLE(SeqStore_t const* seqStore) 3513 { 3514 size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); 3515 size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); 3516 3517 return nbSeqs < 4 && nbLits < 10; 3518 } 3519 3520 static void 3521 ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) 3522 { 3523 ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock; 3524 bs->prevCBlock = bs->nextCBlock; 3525 bs->nextCBlock = tmp; 3526 } 3527 3528 /* Writes the block header */ 3529 static void 3530 writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) 3531 { 3532 U32 const cBlockHeader = cSize == 1 ? 3533 lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : 3534 lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); 3535 MEM_writeLE24(op, cBlockHeader); 3536 DEBUGLOG(5, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); 3537 } 3538 3539 /* ZSTD_buildBlockEntropyStats_literals() : 3540 * Builds entropy for the literals. 3541 * Stores literals block type (raw, rle, compressed, repeat) and 3542 * huffman description table to hufMetadata. 3543 * Requires ENTROPY_WORKSPACE_SIZE workspace 3544 * @return : size of huffman description table, or an error code 3545 */ 3546 static size_t 3547 ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, 3548 const ZSTD_hufCTables_t* prevHuf, 3549 ZSTD_hufCTables_t* nextHuf, 3550 ZSTD_hufCTablesMetadata_t* hufMetadata, 3551 const int literalsCompressionIsDisabled, 3552 void* workspace, size_t wkspSize, 3553 int hufFlags) 3554 { 3555 BYTE* const wkspStart = (BYTE*)workspace; 3556 BYTE* const wkspEnd = wkspStart + wkspSize; 3557 BYTE* const countWkspStart = wkspStart; 3558 unsigned* const countWksp = (unsigned*)workspace; 3559 const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); 3560 BYTE* const nodeWksp = countWkspStart + countWkspSize; 3561 const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp); 3562 unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; 3563 unsigned huffLog = LitHufLog; 3564 HUF_repeat repeat = prevHuf->repeatMode; 3565 DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize); 3566 3567 /* Prepare nextEntropy assuming reusing the existing table */ 3568 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); 3569 3570 if (literalsCompressionIsDisabled) { 3571 DEBUGLOG(5, "set_basic - disabled"); 3572 hufMetadata->hType = set_basic; 3573 return 0; 3574 } 3575 3576 /* small ? don't even attempt compression (speed opt) */ 3577 #ifndef COMPRESS_LITERALS_SIZE_MIN 3578 # define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */ 3579 #endif 3580 { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; 3581 if (srcSize <= minLitSize) { 3582 DEBUGLOG(5, "set_basic - too small"); 3583 hufMetadata->hType = set_basic; 3584 return 0; 3585 } } 3586 3587 /* Scan input and build symbol stats */ 3588 { size_t const largest = 3589 HIST_count_wksp (countWksp, &maxSymbolValue, 3590 (const BYTE*)src, srcSize, 3591 workspace, wkspSize); 3592 FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); 3593 if (largest == srcSize) { 3594 /* only one literal symbol */ 3595 DEBUGLOG(5, "set_rle"); 3596 hufMetadata->hType = set_rle; 3597 return 0; 3598 } 3599 if (largest <= (srcSize >> 7)+4) { 3600 /* heuristic: likely not compressible */ 3601 DEBUGLOG(5, "set_basic - no gain"); 3602 hufMetadata->hType = set_basic; 3603 return 0; 3604 } } 3605 3606 /* Validate the previous Huffman table */ 3607 if (repeat == HUF_repeat_check 3608 && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { 3609 repeat = HUF_repeat_none; 3610 } 3611 3612 /* Build Huffman Tree */ 3613 ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); 3614 huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags); 3615 assert(huffLog <= LitHufLog); 3616 { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, 3617 maxSymbolValue, huffLog, 3618 nodeWksp, nodeWkspSize); 3619 FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); 3620 huffLog = (U32)maxBits; 3621 } 3622 { /* Build and write the CTable */ 3623 size_t const newCSize = HUF_estimateCompressedSize( 3624 (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); 3625 size_t const hSize = HUF_writeCTable_wksp( 3626 hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), 3627 (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, 3628 nodeWksp, nodeWkspSize); 3629 /* Check against repeating the previous CTable */ 3630 if (repeat != HUF_repeat_none) { 3631 size_t const oldCSize = HUF_estimateCompressedSize( 3632 (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); 3633 if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { 3634 DEBUGLOG(5, "set_repeat - smaller"); 3635 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); 3636 hufMetadata->hType = set_repeat; 3637 return 0; 3638 } } 3639 if (newCSize + hSize >= srcSize) { 3640 DEBUGLOG(5, "set_basic - no gains"); 3641 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); 3642 hufMetadata->hType = set_basic; 3643 return 0; 3644 } 3645 DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); 3646 hufMetadata->hType = set_compressed; 3647 nextHuf->repeatMode = HUF_repeat_check; 3648 return hSize; 3649 } 3650 } 3651 3652 3653 /* ZSTD_buildDummySequencesStatistics(): 3654 * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic, 3655 * and updates nextEntropy to the appropriate repeatMode. 3656 */ 3657 static ZSTD_symbolEncodingTypeStats_t 3658 ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) 3659 { 3660 ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0}; 3661 nextEntropy->litlength_repeatMode = FSE_repeat_none; 3662 nextEntropy->offcode_repeatMode = FSE_repeat_none; 3663 nextEntropy->matchlength_repeatMode = FSE_repeat_none; 3664 return stats; 3665 } 3666 3667 /* ZSTD_buildBlockEntropyStats_sequences() : 3668 * Builds entropy for the sequences. 3669 * Stores symbol compression modes and fse table to fseMetadata. 3670 * Requires ENTROPY_WORKSPACE_SIZE wksp. 3671 * @return : size of fse tables or error code */ 3672 static size_t 3673 ZSTD_buildBlockEntropyStats_sequences( 3674 const SeqStore_t* seqStorePtr, 3675 const ZSTD_fseCTables_t* prevEntropy, 3676 ZSTD_fseCTables_t* nextEntropy, 3677 const ZSTD_CCtx_params* cctxParams, 3678 ZSTD_fseCTablesMetadata_t* fseMetadata, 3679 void* workspace, size_t wkspSize) 3680 { 3681 ZSTD_strategy const strategy = cctxParams->cParams.strategy; 3682 size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); 3683 BYTE* const ostart = fseMetadata->fseTablesBuffer; 3684 BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); 3685 BYTE* op = ostart; 3686 unsigned* countWorkspace = (unsigned*)workspace; 3687 unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1); 3688 size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace); 3689 ZSTD_symbolEncodingTypeStats_t stats; 3690 3691 DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq); 3692 stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, 3693 prevEntropy, nextEntropy, op, oend, 3694 strategy, countWorkspace, 3695 entropyWorkspace, entropyWorkspaceSize) 3696 : ZSTD_buildDummySequencesStatistics(nextEntropy); 3697 FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); 3698 fseMetadata->llType = (SymbolEncodingType_e) stats.LLtype; 3699 fseMetadata->ofType = (SymbolEncodingType_e) stats.Offtype; 3700 fseMetadata->mlType = (SymbolEncodingType_e) stats.MLtype; 3701 fseMetadata->lastCountSize = stats.lastCountSize; 3702 return stats.size; 3703 } 3704 3705 3706 /* ZSTD_buildBlockEntropyStats() : 3707 * Builds entropy for the block. 3708 * Requires workspace size ENTROPY_WORKSPACE_SIZE 3709 * @return : 0 on success, or an error code 3710 * Note : also employed in superblock 3711 */ 3712 size_t ZSTD_buildBlockEntropyStats( 3713 const SeqStore_t* seqStorePtr, 3714 const ZSTD_entropyCTables_t* prevEntropy, 3715 ZSTD_entropyCTables_t* nextEntropy, 3716 const ZSTD_CCtx_params* cctxParams, 3717 ZSTD_entropyCTablesMetadata_t* entropyMetadata, 3718 void* workspace, size_t wkspSize) 3719 { 3720 size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); 3721 int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD); 3722 int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0; 3723 3724 entropyMetadata->hufMetadata.hufDesSize = 3725 ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, 3726 &prevEntropy->huf, &nextEntropy->huf, 3727 &entropyMetadata->hufMetadata, 3728 ZSTD_literalsCompressionIsDisabled(cctxParams), 3729 workspace, wkspSize, hufFlags); 3730 3731 FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); 3732 entropyMetadata->fseMetadata.fseTablesSize = 3733 ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, 3734 &prevEntropy->fse, &nextEntropy->fse, 3735 cctxParams, 3736 &entropyMetadata->fseMetadata, 3737 workspace, wkspSize); 3738 FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed"); 3739 return 0; 3740 } 3741 3742 /* Returns the size estimate for the literals section (header + content) of a block */ 3743 static size_t 3744 ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, 3745 const ZSTD_hufCTables_t* huf, 3746 const ZSTD_hufCTablesMetadata_t* hufMetadata, 3747 void* workspace, size_t wkspSize, 3748 int writeEntropy) 3749 { 3750 unsigned* const countWksp = (unsigned*)workspace; 3751 unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; 3752 size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB); 3753 U32 singleStream = litSize < 256; 3754 3755 if (hufMetadata->hType == set_basic) return litSize; 3756 else if (hufMetadata->hType == set_rle) return 1; 3757 else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { 3758 size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); 3759 if (ZSTD_isError(largest)) return litSize; 3760 { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); 3761 if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; 3762 if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */ 3763 return cLitSizeEstimate + literalSectionHeaderSize; 3764 } } 3765 assert(0); /* impossible */ 3766 return 0; 3767 } 3768 3769 /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ 3770 static size_t 3771 ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type, 3772 const BYTE* codeTable, size_t nbSeq, unsigned maxCode, 3773 const FSE_CTable* fseCTable, 3774 const U8* additionalBits, 3775 short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, 3776 void* workspace, size_t wkspSize) 3777 { 3778 unsigned* const countWksp = (unsigned*)workspace; 3779 const BYTE* ctp = codeTable; 3780 const BYTE* const ctStart = ctp; 3781 const BYTE* const ctEnd = ctStart + nbSeq; 3782 size_t cSymbolTypeSizeEstimateInBits = 0; 3783 unsigned max = maxCode; 3784 3785 HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ 3786 if (type == set_basic) { 3787 /* We selected this encoding type, so it must be valid. */ 3788 assert(max <= defaultMax); 3789 (void)defaultMax; 3790 cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max); 3791 } else if (type == set_rle) { 3792 cSymbolTypeSizeEstimateInBits = 0; 3793 } else if (type == set_compressed || type == set_repeat) { 3794 cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); 3795 } 3796 if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) { 3797 return nbSeq * 10; 3798 } 3799 while (ctp < ctEnd) { 3800 if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; 3801 else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ 3802 ctp++; 3803 } 3804 return cSymbolTypeSizeEstimateInBits >> 3; 3805 } 3806 3807 /* Returns the size estimate for the sequences section (header + content) of a block */ 3808 static size_t 3809 ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, 3810 const BYTE* llCodeTable, 3811 const BYTE* mlCodeTable, 3812 size_t nbSeq, 3813 const ZSTD_fseCTables_t* fseTables, 3814 const ZSTD_fseCTablesMetadata_t* fseMetadata, 3815 void* workspace, size_t wkspSize, 3816 int writeEntropy) 3817 { 3818 size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ); 3819 size_t cSeqSizeEstimate = 0; 3820 cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff, 3821 fseTables->offcodeCTable, NULL, 3822 OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, 3823 workspace, wkspSize); 3824 cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL, 3825 fseTables->litlengthCTable, LL_bits, 3826 LL_defaultNorm, LL_defaultNormLog, MaxLL, 3827 workspace, wkspSize); 3828 cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML, 3829 fseTables->matchlengthCTable, ML_bits, 3830 ML_defaultNorm, ML_defaultNormLog, MaxML, 3831 workspace, wkspSize); 3832 if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; 3833 return cSeqSizeEstimate + sequencesSectionHeaderSize; 3834 } 3835 3836 /* Returns the size estimate for a given stream of literals, of, ll, ml */ 3837 static size_t 3838 ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, 3839 const BYTE* ofCodeTable, 3840 const BYTE* llCodeTable, 3841 const BYTE* mlCodeTable, 3842 size_t nbSeq, 3843 const ZSTD_entropyCTables_t* entropy, 3844 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 3845 void* workspace, size_t wkspSize, 3846 int writeLitEntropy, int writeSeqEntropy) 3847 { 3848 size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, 3849 &entropy->huf, &entropyMetadata->hufMetadata, 3850 workspace, wkspSize, writeLitEntropy); 3851 size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, 3852 nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, 3853 workspace, wkspSize, writeSeqEntropy); 3854 return seqSize + literalsSize + ZSTD_blockHeaderSize; 3855 } 3856 3857 /* Builds entropy statistics and uses them for blocksize estimation. 3858 * 3859 * @return: estimated compressed size of the seqStore, or a zstd error. 3860 */ 3861 static size_t 3862 ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx* zc) 3863 { 3864 ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata; 3865 DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); 3866 FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, 3867 &zc->blockState.prevCBlock->entropy, 3868 &zc->blockState.nextCBlock->entropy, 3869 &zc->appliedParams, 3870 entropyMetadata, 3871 zc->tmpWorkspace, zc->tmpWkspSize), ""); 3872 return ZSTD_estimateBlockSize( 3873 seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), 3874 seqStore->ofCode, seqStore->llCode, seqStore->mlCode, 3875 (size_t)(seqStore->sequences - seqStore->sequencesStart), 3876 &zc->blockState.nextCBlock->entropy, 3877 entropyMetadata, 3878 zc->tmpWorkspace, zc->tmpWkspSize, 3879 (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); 3880 } 3881 3882 /* Returns literals bytes represented in a seqStore */ 3883 static size_t ZSTD_countSeqStoreLiteralsBytes(const SeqStore_t* const seqStore) 3884 { 3885 size_t literalsBytes = 0; 3886 size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); 3887 size_t i; 3888 for (i = 0; i < nbSeqs; ++i) { 3889 SeqDef const seq = seqStore->sequencesStart[i]; 3890 literalsBytes += seq.litLength; 3891 if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) { 3892 literalsBytes += 0x10000; 3893 } } 3894 return literalsBytes; 3895 } 3896 3897 /* Returns match bytes represented in a seqStore */ 3898 static size_t ZSTD_countSeqStoreMatchBytes(const SeqStore_t* const seqStore) 3899 { 3900 size_t matchBytes = 0; 3901 size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); 3902 size_t i; 3903 for (i = 0; i < nbSeqs; ++i) { 3904 SeqDef seq = seqStore->sequencesStart[i]; 3905 matchBytes += seq.mlBase + MINMATCH; 3906 if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { 3907 matchBytes += 0x10000; 3908 } } 3909 return matchBytes; 3910 } 3911 3912 /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). 3913 * Stores the result in resultSeqStore. 3914 */ 3915 static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore, 3916 const SeqStore_t* originalSeqStore, 3917 size_t startIdx, size_t endIdx) 3918 { 3919 *resultSeqStore = *originalSeqStore; 3920 if (startIdx > 0) { 3921 resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; 3922 resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); 3923 } 3924 3925 /* Move longLengthPos into the correct position if necessary */ 3926 if (originalSeqStore->longLengthType != ZSTD_llt_none) { 3927 if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) { 3928 resultSeqStore->longLengthType = ZSTD_llt_none; 3929 } else { 3930 resultSeqStore->longLengthPos -= (U32)startIdx; 3931 } 3932 } 3933 resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; 3934 resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; 3935 if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) { 3936 /* This accounts for possible last literals if the derived chunk reaches the end of the block */ 3937 assert(resultSeqStore->lit == originalSeqStore->lit); 3938 } else { 3939 size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); 3940 resultSeqStore->lit = resultSeqStore->litStart + literalsBytes; 3941 } 3942 resultSeqStore->llCode += startIdx; 3943 resultSeqStore->mlCode += startIdx; 3944 resultSeqStore->ofCode += startIdx; 3945 } 3946 3947 /* 3948 * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. 3949 * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq(). 3950 */ 3951 static U32 3952 ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0) 3953 { 3954 U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */ 3955 assert(OFFBASE_IS_REPCODE(offBase)); 3956 if (adjustedRepCode == ZSTD_REP_NUM) { 3957 assert(ll0); 3958 /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 3959 * This is only valid if it results in a valid offset value, aka > 0. 3960 * Note : it may happen that `rep[0]==1` in exceptional circumstances. 3961 * In which case this function will return 0, which is an invalid offset. 3962 * It's not an issue though, since this value will be 3963 * compared and discarded within ZSTD_seqStore_resolveOffCodes(). 3964 */ 3965 return rep[0] - 1; 3966 } 3967 return rep[adjustedRepCode]; 3968 } 3969 3970 /* 3971 * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise 3972 * due to emission of RLE/raw blocks that disturb the offset history, 3973 * and replaces any repcodes within the seqStore that may be invalid. 3974 * 3975 * dRepcodes are updated as would be on the decompression side. 3976 * cRepcodes are updated exactly in accordance with the seqStore. 3977 * 3978 * Note : this function assumes seq->offBase respects the following numbering scheme : 3979 * 0 : invalid 3980 * 1-3 : repcode 1-3 3981 * 4+ : real_offset+3 3982 */ 3983 static void 3984 ZSTD_seqStore_resolveOffCodes(Repcodes_t* const dRepcodes, Repcodes_t* const cRepcodes, 3985 const SeqStore_t* const seqStore, U32 const nbSeq) 3986 { 3987 U32 idx = 0; 3988 U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq; 3989 for (; idx < nbSeq; ++idx) { 3990 SeqDef* const seq = seqStore->sequencesStart + idx; 3991 U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx); 3992 U32 const offBase = seq->offBase; 3993 assert(offBase > 0); 3994 if (OFFBASE_IS_REPCODE(offBase)) { 3995 U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); 3996 U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); 3997 /* Adjust simulated decompression repcode history if we come across a mismatch. Replace 3998 * the repcode with the offset it actually references, determined by the compression 3999 * repcode history. 4000 */ 4001 if (dRawOffset != cRawOffset) { 4002 seq->offBase = OFFSET_TO_OFFBASE(cRawOffset); 4003 } 4004 } 4005 /* Compression repcode history is always updated with values directly from the unmodified seqStore. 4006 * Decompression repcode history may use modified seq->offset value taken from compression repcode history. 4007 */ 4008 ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); 4009 ZSTD_updateRep(cRepcodes->rep, offBase, ll0); 4010 } 4011 } 4012 4013 /* ZSTD_compressSeqStore_singleBlock(): 4014 * Compresses a seqStore into a block with a block header, into the buffer dst. 4015 * 4016 * Returns the total size of that block (including header) or a ZSTD error code. 4017 */ 4018 static size_t 4019 ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, 4020 const SeqStore_t* const seqStore, 4021 Repcodes_t* const dRep, Repcodes_t* const cRep, 4022 void* dst, size_t dstCapacity, 4023 const void* src, size_t srcSize, 4024 U32 lastBlock, U32 isPartition) 4025 { 4026 const U32 rleMaxLength = 25; 4027 BYTE* op = (BYTE*)dst; 4028 const BYTE* ip = (const BYTE*)src; 4029 size_t cSize; 4030 size_t cSeqsSize; 4031 4032 /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ 4033 Repcodes_t const dRepOriginal = *dRep; 4034 DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock"); 4035 if (isPartition) 4036 ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); 4037 4038 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit"); 4039 cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore, 4040 &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, 4041 &zc->appliedParams, 4042 op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, 4043 srcSize, 4044 zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */, 4045 zc->bmi2); 4046 FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!"); 4047 4048 if (!zc->isFirstBlock && 4049 cSeqsSize < rleMaxLength && 4050 ZSTD_isRLE((BYTE const*)src, srcSize)) { 4051 /* We don't want to emit our first block as a RLE even if it qualifies because 4052 * doing so will cause the decoder (cli only) to throw a "should consume all input error." 4053 * This is only an issue for zstd <= v1.4.3 4054 */ 4055 cSeqsSize = 1; 4056 } 4057 4058 /* Sequence collection not supported when block splitting */ 4059 if (zc->seqCollector.collectSequences) { 4060 FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed"); 4061 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); 4062 return 0; 4063 } 4064 4065 if (cSeqsSize == 0) { 4066 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); 4067 FORWARD_IF_ERROR(cSize, "Nocompress block failed"); 4068 DEBUGLOG(5, "Writing out nocompress block, size: %zu", cSize); 4069 *dRep = dRepOriginal; /* reset simulated decompression repcode history */ 4070 } else if (cSeqsSize == 1) { 4071 cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); 4072 FORWARD_IF_ERROR(cSize, "RLE compress block failed"); 4073 DEBUGLOG(5, "Writing out RLE block, size: %zu", cSize); 4074 *dRep = dRepOriginal; /* reset simulated decompression repcode history */ 4075 } else { 4076 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); 4077 writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); 4078 cSize = ZSTD_blockHeaderSize + cSeqsSize; 4079 DEBUGLOG(5, "Writing out compressed block, size: %zu", cSize); 4080 } 4081 4082 if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) 4083 zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; 4084 4085 return cSize; 4086 } 4087 4088 /* Struct to keep track of where we are in our recursive calls. */ 4089 typedef struct { 4090 U32* splitLocations; /* Array of split indices */ 4091 size_t idx; /* The current index within splitLocations being worked on */ 4092 } seqStoreSplits; 4093 4094 #define MIN_SEQUENCES_BLOCK_SPLITTING 300 4095 4096 /* Helper function to perform the recursive search for block splits. 4097 * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. 4098 * If advantageous to split, then we recurse down the two sub-blocks. 4099 * If not, or if an error occurred in estimation, then we do not recurse. 4100 * 4101 * Note: The recursion depth is capped by a heuristic minimum number of sequences, 4102 * defined by MIN_SEQUENCES_BLOCK_SPLITTING. 4103 * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). 4104 * In practice, recursion depth usually doesn't go beyond 4. 4105 * 4106 * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. 4107 * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize 4108 * maximum of 128 KB, this value is actually impossible to reach. 4109 */ 4110 static void 4111 ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, 4112 ZSTD_CCtx* zc, const SeqStore_t* origSeqStore) 4113 { 4114 SeqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; 4115 SeqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; 4116 SeqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; 4117 size_t estimatedOriginalSize; 4118 size_t estimatedFirstHalfSize; 4119 size_t estimatedSecondHalfSize; 4120 size_t midIdx = (startIdx + endIdx)/2; 4121 4122 DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); 4123 assert(endIdx >= startIdx); 4124 if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { 4125 DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx); 4126 return; 4127 } 4128 ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); 4129 ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); 4130 ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); 4131 estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); 4132 estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); 4133 estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); 4134 DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", 4135 estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize); 4136 if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) { 4137 return; 4138 } 4139 if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) { 4140 DEBUGLOG(5, "split decided at seqNb:%zu", midIdx); 4141 ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); 4142 splits->splitLocations[splits->idx] = (U32)midIdx; 4143 splits->idx++; 4144 ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore); 4145 } 4146 } 4147 4148 /* Base recursive function. 4149 * Populates a table with intra-block partition indices that can improve compression ratio. 4150 * 4151 * @return: number of splits made (which equals the size of the partition table - 1). 4152 */ 4153 static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) 4154 { 4155 seqStoreSplits splits; 4156 splits.splitLocations = partitions; 4157 splits.idx = 0; 4158 if (nbSeq <= 4) { 4159 DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq); 4160 /* Refuse to try and split anything with less than 4 sequences */ 4161 return 0; 4162 } 4163 ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore); 4164 splits.splitLocations[splits.idx] = nbSeq; 4165 DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1); 4166 return splits.idx; 4167 } 4168 4169 /* ZSTD_compressBlock_splitBlock(): 4170 * Attempts to split a given block into multiple blocks to improve compression ratio. 4171 * 4172 * Returns combined size of all blocks (which includes headers), or a ZSTD error code. 4173 */ 4174 static size_t 4175 ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, 4176 void* dst, size_t dstCapacity, 4177 const void* src, size_t blockSize, 4178 U32 lastBlock, U32 nbSeq) 4179 { 4180 size_t cSize = 0; 4181 const BYTE* ip = (const BYTE*)src; 4182 BYTE* op = (BYTE*)dst; 4183 size_t i = 0; 4184 size_t srcBytesTotal = 0; 4185 U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ 4186 SeqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; 4187 SeqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; 4188 size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); 4189 4190 /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history 4191 * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two 4192 * separate repcode histories that simulate repcode history on compression and decompression side, 4193 * and use the histories to determine whether we must replace a particular repcode with its raw offset. 4194 * 4195 * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed 4196 * or RLE. This allows us to retrieve the offset value that an invalid repcode references within 4197 * a nocompress/RLE block. 4198 * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use 4199 * the replacement offset value rather than the original repcode to update the repcode history. 4200 * dRep also will be the final repcode history sent to the next block. 4201 * 4202 * See ZSTD_seqStore_resolveOffCodes() for more details. 4203 */ 4204 Repcodes_t dRep; 4205 Repcodes_t cRep; 4206 ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); 4207 ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); 4208 ZSTD_memset(nextSeqStore, 0, sizeof(SeqStore_t)); 4209 4210 DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", 4211 (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, 4212 (unsigned)zc->blockState.matchState.nextToUpdate); 4213 4214 if (numSplits == 0) { 4215 size_t cSizeSingleBlock = 4216 ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, 4217 &dRep, &cRep, 4218 op, dstCapacity, 4219 ip, blockSize, 4220 lastBlock, 0 /* isPartition */); 4221 FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!"); 4222 DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); 4223 assert(zc->blockSizeMax <= ZSTD_BLOCKSIZE_MAX); 4224 assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize); 4225 return cSizeSingleBlock; 4226 } 4227 4228 ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); 4229 for (i = 0; i <= numSplits; ++i) { 4230 size_t cSizeChunk; 4231 U32 const lastPartition = (i == numSplits); 4232 U32 lastBlockEntireSrc = 0; 4233 4234 size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); 4235 srcBytesTotal += srcBytes; 4236 if (lastPartition) { 4237 /* This is the final partition, need to account for possible last literals */ 4238 srcBytes += blockSize - srcBytesTotal; 4239 lastBlockEntireSrc = lastBlock; 4240 } else { 4241 ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]); 4242 } 4243 4244 cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore, 4245 &dRep, &cRep, 4246 op, dstCapacity, 4247 ip, srcBytes, 4248 lastBlockEntireSrc, 1 /* isPartition */); 4249 DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size", 4250 ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); 4251 FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); 4252 4253 ip += srcBytes; 4254 op += cSizeChunk; 4255 dstCapacity -= cSizeChunk; 4256 cSize += cSizeChunk; 4257 *currSeqStore = *nextSeqStore; 4258 assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize); 4259 } 4260 /* cRep and dRep may have diverged during the compression. 4261 * If so, we use the dRep repcodes for the next block. 4262 */ 4263 ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(Repcodes_t)); 4264 return cSize; 4265 } 4266 4267 static size_t 4268 ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, 4269 void* dst, size_t dstCapacity, 4270 const void* src, size_t srcSize, U32 lastBlock) 4271 { 4272 U32 nbSeq; 4273 size_t cSize; 4274 DEBUGLOG(5, "ZSTD_compressBlock_splitBlock"); 4275 assert(zc->appliedParams.postBlockSplitter == ZSTD_ps_enable); 4276 4277 { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); 4278 FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); 4279 if (bss == ZSTDbss_noCompress) { 4280 if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) 4281 zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; 4282 RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); 4283 cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); 4284 FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); 4285 DEBUGLOG(5, "ZSTD_compressBlock_splitBlock: Nocompress block"); 4286 return cSize; 4287 } 4288 nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); 4289 } 4290 4291 cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq); 4292 FORWARD_IF_ERROR(cSize, "Splitting blocks failed!"); 4293 return cSize; 4294 } 4295 4296 static size_t 4297 ZSTD_compressBlock_internal(ZSTD_CCtx* zc, 4298 void* dst, size_t dstCapacity, 4299 const void* src, size_t srcSize, U32 frame) 4300 { 4301 /* This is an estimated upper bound for the length of an rle block. 4302 * This isn't the actual upper bound. 4303 * Finding the real threshold needs further investigation. 4304 */ 4305 const U32 rleMaxLength = 25; 4306 size_t cSize; 4307 const BYTE* ip = (const BYTE*)src; 4308 BYTE* op = (BYTE*)dst; 4309 DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", 4310 (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, 4311 (unsigned)zc->blockState.matchState.nextToUpdate); 4312 4313 { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); 4314 FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); 4315 if (bss == ZSTDbss_noCompress) { 4316 RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); 4317 cSize = 0; 4318 goto out; 4319 } 4320 } 4321 4322 if (zc->seqCollector.collectSequences) { 4323 FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed"); 4324 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); 4325 return 0; 4326 } 4327 4328 /* encode sequences and literals */ 4329 cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore, 4330 &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, 4331 &zc->appliedParams, 4332 dst, dstCapacity, 4333 srcSize, 4334 zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */, 4335 zc->bmi2); 4336 4337 if (frame && 4338 /* We don't want to emit our first block as a RLE even if it qualifies because 4339 * doing so will cause the decoder (cli only) to throw a "should consume all input error." 4340 * This is only an issue for zstd <= v1.4.3 4341 */ 4342 !zc->isFirstBlock && 4343 cSize < rleMaxLength && 4344 ZSTD_isRLE(ip, srcSize)) 4345 { 4346 cSize = 1; 4347 op[0] = ip[0]; 4348 } 4349 4350 out: 4351 if (!ZSTD_isError(cSize) && cSize > 1) { 4352 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); 4353 } 4354 /* We check that dictionaries have offset codes available for the first 4355 * block. After the first block, the offcode table might not have large 4356 * enough codes to represent the offsets in the data. 4357 */ 4358 if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) 4359 zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; 4360 4361 return cSize; 4362 } 4363 4364 static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, 4365 void* dst, size_t dstCapacity, 4366 const void* src, size_t srcSize, 4367 const size_t bss, U32 lastBlock) 4368 { 4369 DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()"); 4370 if (bss == ZSTDbss_compress) { 4371 if (/* We don't want to emit our first block as a RLE even if it qualifies because 4372 * doing so will cause the decoder (cli only) to throw a "should consume all input error." 4373 * This is only an issue for zstd <= v1.4.3 4374 */ 4375 !zc->isFirstBlock && 4376 ZSTD_maybeRLE(&zc->seqStore) && 4377 ZSTD_isRLE((BYTE const*)src, srcSize)) 4378 { 4379 return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock); 4380 } 4381 /* Attempt superblock compression. 4382 * 4383 * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the 4384 * standard ZSTD_compressBound(). This is a problem, because even if we have 4385 * space now, taking an extra byte now could cause us to run out of space later 4386 * and violate ZSTD_compressBound(). 4387 * 4388 * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize. 4389 * 4390 * In order to respect ZSTD_compressBound() we must attempt to emit a raw 4391 * uncompressed block in these cases: 4392 * * cSize == 0: Return code for an uncompressed block. 4393 * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize). 4394 * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of 4395 * output space. 4396 * * cSize >= blockBound(srcSize): We have expanded the block too much so 4397 * emit an uncompressed block. 4398 */ 4399 { size_t const cSize = 4400 ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); 4401 if (cSize != ERROR(dstSize_tooSmall)) { 4402 size_t const maxCSize = 4403 srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); 4404 FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); 4405 if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { 4406 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); 4407 return cSize; 4408 } 4409 } 4410 } 4411 } /* if (bss == ZSTDbss_compress)*/ 4412 4413 DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); 4414 /* Superblock compression failed, attempt to emit a single no compress block. 4415 * The decoder will be able to stream this block since it is uncompressed. 4416 */ 4417 return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); 4418 } 4419 4420 static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, 4421 void* dst, size_t dstCapacity, 4422 const void* src, size_t srcSize, 4423 U32 lastBlock) 4424 { 4425 size_t cSize = 0; 4426 const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); 4427 DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)", 4428 (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize); 4429 FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); 4430 4431 cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); 4432 FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed"); 4433 4434 if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) 4435 zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; 4436 4437 return cSize; 4438 } 4439 4440 static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms, 4441 ZSTD_cwksp* ws, 4442 ZSTD_CCtx_params const* params, 4443 void const* ip, 4444 void const* iend) 4445 { 4446 U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy); 4447 U32 const maxDist = (U32)1 << params->cParams.windowLog; 4448 if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) { 4449 U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); 4450 ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); 4451 ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); 4452 ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); 4453 ZSTD_cwksp_mark_tables_dirty(ws); 4454 ZSTD_reduceIndex(ms, params, correction); 4455 ZSTD_cwksp_mark_tables_clean(ws); 4456 if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; 4457 else ms->nextToUpdate -= correction; 4458 /* invalidate dictionaries on overflow correction */ 4459 ms->loadedDictEnd = 0; 4460 ms->dictMatchState = NULL; 4461 } 4462 } 4463 4464 #include "zstd_preSplit.h" 4465 4466 static size_t ZSTD_optimalBlockSize(ZSTD_CCtx* cctx, const void* src, size_t srcSize, size_t blockSizeMax, int splitLevel, ZSTD_strategy strat, S64 savings) 4467 { 4468 /* split level based on compression strategy, from `fast` to `btultra2` */ 4469 static const int splitLevels[] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; 4470 /* note: conservatively only split full blocks (128 KB) currently. 4471 * While it's possible to go lower, let's keep it simple for a first implementation. 4472 * Besides, benefits of splitting are reduced when blocks are already small. 4473 */ 4474 if (srcSize < 128 KB || blockSizeMax < 128 KB) 4475 return MIN(srcSize, blockSizeMax); 4476 /* do not split incompressible data though: 4477 * require verified savings to allow pre-splitting. 4478 * Note: as a consequence, the first full block is not split. 4479 */ 4480 if (savings < 3) { 4481 DEBUGLOG(6, "don't attempt splitting: savings (%i) too low", (int)savings); 4482 return 128 KB; 4483 } 4484 /* apply @splitLevel, or use default value (which depends on @strat). 4485 * note that splitting heuristic is still conditioned by @savings >= 3, 4486 * so the first block will not reach this code path */ 4487 if (splitLevel == 1) return 128 KB; 4488 if (splitLevel == 0) { 4489 assert(ZSTD_fast <= strat && strat <= ZSTD_btultra2); 4490 splitLevel = splitLevels[strat]; 4491 } else { 4492 assert(2 <= splitLevel && splitLevel <= 6); 4493 splitLevel -= 2; 4494 } 4495 return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize); 4496 } 4497 4498 /*! ZSTD_compress_frameChunk() : 4499 * Compress a chunk of data into one or multiple blocks. 4500 * All blocks will be terminated, all input will be consumed. 4501 * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. 4502 * Frame is supposed already started (header already produced) 4503 * @return : compressed size, or an error code 4504 */ 4505 static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, 4506 void* dst, size_t dstCapacity, 4507 const void* src, size_t srcSize, 4508 U32 lastFrameChunk) 4509 { 4510 size_t blockSizeMax = cctx->blockSizeMax; 4511 size_t remaining = srcSize; 4512 const BYTE* ip = (const BYTE*)src; 4513 BYTE* const ostart = (BYTE*)dst; 4514 BYTE* op = ostart; 4515 U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; 4516 S64 savings = (S64)cctx->consumedSrcSize - (S64)cctx->producedCSize; 4517 4518 assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); 4519 4520 DEBUGLOG(5, "ZSTD_compress_frameChunk (srcSize=%u, blockSizeMax=%u)", (unsigned)srcSize, (unsigned)blockSizeMax); 4521 if (cctx->appliedParams.fParams.checksumFlag && srcSize) 4522 xxh64_update(&cctx->xxhState, src, srcSize); 4523 4524 while (remaining) { 4525 ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; 4526 size_t const blockSize = ZSTD_optimalBlockSize(cctx, 4527 ip, remaining, 4528 blockSizeMax, 4529 cctx->appliedParams.preBlockSplitter_level, 4530 cctx->appliedParams.cParams.strategy, 4531 savings); 4532 U32 const lastBlock = lastFrameChunk & (blockSize == remaining); 4533 assert(blockSize <= remaining); 4534 4535 /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding 4536 * additional 1. We need to revisit and change this logic to be more consistent */ 4537 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1, 4538 dstSize_tooSmall, 4539 "not enough space to store compressed block"); 4540 4541 ZSTD_overflowCorrectIfNeeded( 4542 ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); 4543 ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); 4544 ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); 4545 4546 /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ 4547 if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; 4548 4549 { size_t cSize; 4550 if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) { 4551 cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); 4552 FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed"); 4553 assert(cSize > 0); 4554 assert(cSize <= blockSize + ZSTD_blockHeaderSize); 4555 } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) { 4556 cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock); 4557 FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed"); 4558 assert(cSize > 0 || cctx->seqCollector.collectSequences == 1); 4559 } else { 4560 cSize = ZSTD_compressBlock_internal(cctx, 4561 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, 4562 ip, blockSize, 1 /* frame */); 4563 FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed"); 4564 4565 if (cSize == 0) { /* block is not compressible */ 4566 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); 4567 FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); 4568 } else { 4569 U32 const cBlockHeader = cSize == 1 ? 4570 lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : 4571 lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); 4572 MEM_writeLE24(op, cBlockHeader); 4573 cSize += ZSTD_blockHeaderSize; 4574 } 4575 } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/ 4576 4577 /* @savings is employed to ensure that splitting doesn't worsen expansion of incompressible data. 4578 * Without splitting, the maximum expansion is 3 bytes per full block. 4579 * An adversarial input could attempt to fudge the split detector, 4580 * and make it split incompressible data, resulting in more block headers. 4581 * Note that, since ZSTD_COMPRESSBOUND() assumes a worst case scenario of 1KB per block, 4582 * and the splitter never creates blocks that small (current lower limit is 8 KB), 4583 * there is already no risk to expand beyond ZSTD_COMPRESSBOUND() limit. 4584 * But if the goal is to not expand by more than 3-bytes per 128 KB full block, 4585 * then yes, it becomes possible to make the block splitter oversplit incompressible data. 4586 * Using @savings, we enforce an even more conservative condition, 4587 * requiring the presence of enough savings (at least 3 bytes) to authorize splitting, 4588 * otherwise only full blocks are used. 4589 * But being conservative is fine, 4590 * since splitting barely compressible blocks is not fruitful anyway */ 4591 savings += (S64)blockSize - (S64)cSize; 4592 4593 ip += blockSize; 4594 assert(remaining >= blockSize); 4595 remaining -= blockSize; 4596 op += cSize; 4597 assert(dstCapacity >= cSize); 4598 dstCapacity -= cSize; 4599 cctx->isFirstBlock = 0; 4600 DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", 4601 (unsigned)cSize); 4602 } } 4603 4604 if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; 4605 return (size_t)(op-ostart); 4606 } 4607 4608 4609 static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, 4610 const ZSTD_CCtx_params* params, 4611 U64 pledgedSrcSize, U32 dictID) 4612 { 4613 BYTE* const op = (BYTE*)dst; 4614 U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ 4615 U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ 4616 U32 const checksumFlag = params->fParams.checksumFlag>0; 4617 U32 const windowSize = (U32)1 << params->cParams.windowLog; 4618 U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); 4619 BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); 4620 U32 const fcsCode = params->fParams.contentSizeFlag ? 4621 (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ 4622 BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); 4623 size_t pos=0; 4624 4625 assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); 4626 RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall, 4627 "dst buf is too small to fit worst-case frame header size."); 4628 DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", 4629 !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode); 4630 if (params->format == ZSTD_f_zstd1) { 4631 MEM_writeLE32(dst, ZSTD_MAGICNUMBER); 4632 pos = 4; 4633 } 4634 op[pos++] = frameHeaderDescriptionByte; 4635 if (!singleSegment) op[pos++] = windowLogByte; 4636 switch(dictIDSizeCode) 4637 { 4638 default: 4639 assert(0); /* impossible */ 4640 ZSTD_FALLTHROUGH; 4641 case 0 : break; 4642 case 1 : op[pos] = (BYTE)(dictID); pos++; break; 4643 case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; 4644 case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; 4645 } 4646 switch(fcsCode) 4647 { 4648 default: 4649 assert(0); /* impossible */ 4650 ZSTD_FALLTHROUGH; 4651 case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; 4652 case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; 4653 case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; 4654 case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; 4655 } 4656 return pos; 4657 } 4658 4659 /* ZSTD_writeSkippableFrame_advanced() : 4660 * Writes out a skippable frame with the specified magic number variant (16 are supported), 4661 * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data. 4662 * 4663 * Returns the total number of bytes written, or a ZSTD error code. 4664 */ 4665 size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, 4666 const void* src, size_t srcSize, unsigned magicVariant) { 4667 BYTE* op = (BYTE*)dst; 4668 RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */, 4669 dstSize_tooSmall, "Not enough room for skippable frame"); 4670 RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame"); 4671 RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported"); 4672 4673 MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant)); 4674 MEM_writeLE32(op+4, (U32)srcSize); 4675 ZSTD_memcpy(op+8, src, srcSize); 4676 return srcSize + ZSTD_SKIPPABLEHEADERSIZE; 4677 } 4678 4679 /* ZSTD_writeLastEmptyBlock() : 4680 * output an empty Block with end-of-frame mark to complete a frame 4681 * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) 4682 * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize) 4683 */ 4684 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity) 4685 { 4686 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, 4687 "dst buf is too small to write frame trailer empty block."); 4688 { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */ 4689 MEM_writeLE24(dst, cBlockHeader24); 4690 return ZSTD_blockHeaderSize; 4691 } 4692 } 4693 4694 void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq) 4695 { 4696 assert(cctx->stage == ZSTDcs_init); 4697 assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable); 4698 cctx->externSeqStore.seq = seq; 4699 cctx->externSeqStore.size = nbSeq; 4700 cctx->externSeqStore.capacity = nbSeq; 4701 cctx->externSeqStore.pos = 0; 4702 cctx->externSeqStore.posInSequence = 0; 4703 } 4704 4705 4706 static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, 4707 void* dst, size_t dstCapacity, 4708 const void* src, size_t srcSize, 4709 U32 frame, U32 lastFrameChunk) 4710 { 4711 ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; 4712 size_t fhSize = 0; 4713 4714 DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", 4715 cctx->stage, (unsigned)srcSize); 4716 RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong, 4717 "missing init (ZSTD_compressBegin)"); 4718 4719 if (frame && (cctx->stage==ZSTDcs_init)) { 4720 fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 4721 cctx->pledgedSrcSizePlusOne-1, cctx->dictID); 4722 FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); 4723 assert(fhSize <= dstCapacity); 4724 dstCapacity -= fhSize; 4725 dst = (char*)dst + fhSize; 4726 cctx->stage = ZSTDcs_ongoing; 4727 } 4728 4729 if (!srcSize) return fhSize; /* do not generate an empty block if no input */ 4730 4731 if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) { 4732 ms->forceNonContiguous = 0; 4733 ms->nextToUpdate = ms->window.dictLimit; 4734 } 4735 if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { 4736 ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0); 4737 } 4738 4739 if (!frame) { 4740 /* overflow check and correction for block mode */ 4741 ZSTD_overflowCorrectIfNeeded( 4742 ms, &cctx->workspace, &cctx->appliedParams, 4743 src, (BYTE const*)src + srcSize); 4744 } 4745 4746 DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSizeMax); 4747 { size_t const cSize = frame ? 4748 ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : 4749 ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); 4750 FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed"); 4751 cctx->consumedSrcSize += srcSize; 4752 cctx->producedCSize += (cSize + fhSize); 4753 assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); 4754 if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ 4755 ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); 4756 RETURN_ERROR_IF( 4757 cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne, 4758 srcSize_wrong, 4759 "error : pledgedSrcSize = %u, while realSrcSize >= %u", 4760 (unsigned)cctx->pledgedSrcSizePlusOne-1, 4761 (unsigned)cctx->consumedSrcSize); 4762 } 4763 return cSize + fhSize; 4764 } 4765 } 4766 4767 size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, 4768 void* dst, size_t dstCapacity, 4769 const void* src, size_t srcSize) 4770 { 4771 DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); 4772 return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); 4773 } 4774 4775 /* NOTE: Must just wrap ZSTD_compressContinue_public() */ 4776 size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, 4777 void* dst, size_t dstCapacity, 4778 const void* src, size_t srcSize) 4779 { 4780 return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); 4781 } 4782 4783 static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx) 4784 { 4785 ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; 4786 assert(!ZSTD_checkCParams(cParams)); 4787 return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog); 4788 } 4789 4790 /* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ 4791 size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) 4792 { 4793 return ZSTD_getBlockSize_deprecated(cctx); 4794 } 4795 4796 /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ 4797 size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) 4798 { 4799 DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); 4800 { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); 4801 RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } 4802 4803 return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); 4804 } 4805 4806 /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ 4807 size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) 4808 { 4809 return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); 4810 } 4811 4812 /*! ZSTD_loadDictionaryContent() : 4813 * @return : 0, or an error code 4814 */ 4815 static size_t 4816 ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, 4817 ldmState_t* ls, 4818 ZSTD_cwksp* ws, 4819 ZSTD_CCtx_params const* params, 4820 const void* src, size_t srcSize, 4821 ZSTD_dictTableLoadMethod_e dtlm, 4822 ZSTD_tableFillPurpose_e tfp) 4823 { 4824 const BYTE* ip = (const BYTE*) src; 4825 const BYTE* const iend = ip + srcSize; 4826 int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; 4827 4828 /* Assert that the ms params match the params we're being given */ 4829 ZSTD_assertEqualCParams(params->cParams, ms->cParams); 4830 4831 { /* Ensure large dictionaries can't cause index overflow */ 4832 4833 /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. 4834 * Dictionaries right at the edge will immediately trigger overflow 4835 * correction, but I don't want to insert extra constraints here. 4836 */ 4837 U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX; 4838 4839 int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(¶ms->cParams); 4840 if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) { 4841 /* Some dictionary matchfinders in zstd use "short cache", 4842 * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each 4843 * CDict hashtable entry as a tag rather than as part of an index. 4844 * When short cache is used, we need to truncate the dictionary 4845 * so that its indices don't overlap with the tag. */ 4846 U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX; 4847 maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize); 4848 assert(!loadLdmDict); 4849 } 4850 4851 /* If the dictionary is too large, only load the suffix of the dictionary. */ 4852 if (srcSize > maxDictSize) { 4853 ip = iend - maxDictSize; 4854 src = ip; 4855 srcSize = maxDictSize; 4856 } 4857 } 4858 4859 if (srcSize > ZSTD_CHUNKSIZE_MAX) { 4860 /* We must have cleared our windows when our source is this large. */ 4861 assert(ZSTD_window_isEmpty(ms->window)); 4862 if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window)); 4863 } 4864 ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0); 4865 4866 DEBUGLOG(4, "ZSTD_loadDictionaryContent: useRowMatchFinder=%d", (int)params->useRowMatchFinder); 4867 4868 if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */ 4869 DEBUGLOG(4, "ZSTD_loadDictionaryContent: Trigger loadLdmDict"); 4870 ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0); 4871 ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); 4872 ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); 4873 DEBUGLOG(4, "ZSTD_loadDictionaryContent: ZSTD_ldm_fillHashTable completes"); 4874 } 4875 4876 /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */ 4877 { U32 maxDictSize = 1U << MIN(MAX(params->cParams.hashLog + 3, params->cParams.chainLog + 1), 31); 4878 if (srcSize > maxDictSize) { 4879 ip = iend - maxDictSize; 4880 src = ip; 4881 srcSize = maxDictSize; 4882 } 4883 } 4884 4885 ms->nextToUpdate = (U32)(ip - ms->window.base); 4886 ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); 4887 ms->forceNonContiguous = params->deterministicRefPrefix; 4888 4889 if (srcSize <= HASH_READ_SIZE) return 0; 4890 4891 ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); 4892 4893 switch(params->cParams.strategy) 4894 { 4895 case ZSTD_fast: 4896 ZSTD_fillHashTable(ms, iend, dtlm, tfp); 4897 break; 4898 case ZSTD_dfast: 4899 #ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR 4900 ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); 4901 #else 4902 assert(0); /* shouldn't be called: cparams should've been adjusted. */ 4903 #endif 4904 break; 4905 4906 case ZSTD_greedy: 4907 case ZSTD_lazy: 4908 case ZSTD_lazy2: 4909 #if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ 4910 || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ 4911 || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) 4912 assert(srcSize >= HASH_READ_SIZE); 4913 if (ms->dedicatedDictSearch) { 4914 assert(ms->chainTable != NULL); 4915 ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE); 4916 } else { 4917 assert(params->useRowMatchFinder != ZSTD_ps_auto); 4918 if (params->useRowMatchFinder == ZSTD_ps_enable) { 4919 size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog); 4920 ZSTD_memset(ms->tagTable, 0, tagTableSize); 4921 ZSTD_row_update(ms, iend-HASH_READ_SIZE); 4922 DEBUGLOG(4, "Using row-based hash table for lazy dict"); 4923 } else { 4924 ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE); 4925 DEBUGLOG(4, "Using chain-based hash table for lazy dict"); 4926 } 4927 } 4928 #else 4929 assert(0); /* shouldn't be called: cparams should've been adjusted. */ 4930 #endif 4931 break; 4932 4933 case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ 4934 case ZSTD_btopt: 4935 case ZSTD_btultra: 4936 case ZSTD_btultra2: 4937 #if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ 4938 || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ 4939 || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) 4940 assert(srcSize >= HASH_READ_SIZE); 4941 DEBUGLOG(4, "Fill %u bytes into the Binary Tree", (unsigned)srcSize); 4942 ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); 4943 #else 4944 assert(0); /* shouldn't be called: cparams should've been adjusted. */ 4945 #endif 4946 break; 4947 4948 default: 4949 assert(0); /* not possible : not a valid strategy id */ 4950 } 4951 4952 ms->nextToUpdate = (U32)(iend - ms->window.base); 4953 return 0; 4954 } 4955 4956 4957 /* Dictionaries that assign zero probability to symbols that show up causes problems 4958 * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check 4959 * and only dictionaries with 100% valid symbols can be assumed valid. 4960 */ 4961 static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) 4962 { 4963 U32 s; 4964 if (dictMaxSymbolValue < maxSymbolValue) { 4965 return FSE_repeat_check; 4966 } 4967 for (s = 0; s <= maxSymbolValue; ++s) { 4968 if (normalizedCounter[s] == 0) { 4969 return FSE_repeat_check; 4970 } 4971 } 4972 return FSE_repeat_valid; 4973 } 4974 4975 size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, 4976 const void* const dict, size_t dictSize) 4977 { 4978 short offcodeNCount[MaxOff+1]; 4979 unsigned offcodeMaxValue = MaxOff; 4980 const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */ 4981 const BYTE* const dictEnd = dictPtr + dictSize; 4982 dictPtr += 8; 4983 bs->entropy.huf.repeatMode = HUF_repeat_check; 4984 4985 { unsigned maxSymbolValue = 255; 4986 unsigned hasZeroWeights = 1; 4987 size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, 4988 (size_t)(dictEnd-dictPtr), &hasZeroWeights); 4989 4990 /* We only set the loaded table as valid if it contains all non-zero 4991 * weights. Otherwise, we set it to check */ 4992 if (!hasZeroWeights && maxSymbolValue == 255) 4993 bs->entropy.huf.repeatMode = HUF_repeat_valid; 4994 4995 RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); 4996 dictPtr += hufHeaderSize; 4997 } 4998 4999 { unsigned offcodeLog; 5000 size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr)); 5001 RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); 5002 RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); 5003 /* fill all offset symbols to avoid garbage at end of table */ 5004 RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( 5005 bs->entropy.fse.offcodeCTable, 5006 offcodeNCount, MaxOff, offcodeLog, 5007 workspace, HUF_WORKSPACE_SIZE)), 5008 dictionary_corrupted, ""); 5009 /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ 5010 dictPtr += offcodeHeaderSize; 5011 } 5012 5013 { short matchlengthNCount[MaxML+1]; 5014 unsigned matchlengthMaxValue = MaxML, matchlengthLog; 5015 size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); 5016 RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); 5017 RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); 5018 RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( 5019 bs->entropy.fse.matchlengthCTable, 5020 matchlengthNCount, matchlengthMaxValue, matchlengthLog, 5021 workspace, HUF_WORKSPACE_SIZE)), 5022 dictionary_corrupted, ""); 5023 bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML); 5024 dictPtr += matchlengthHeaderSize; 5025 } 5026 5027 { short litlengthNCount[MaxLL+1]; 5028 unsigned litlengthMaxValue = MaxLL, litlengthLog; 5029 size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); 5030 RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); 5031 RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); 5032 RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( 5033 bs->entropy.fse.litlengthCTable, 5034 litlengthNCount, litlengthMaxValue, litlengthLog, 5035 workspace, HUF_WORKSPACE_SIZE)), 5036 dictionary_corrupted, ""); 5037 bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL); 5038 dictPtr += litlengthHeaderSize; 5039 } 5040 5041 RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); 5042 bs->rep[0] = MEM_readLE32(dictPtr+0); 5043 bs->rep[1] = MEM_readLE32(dictPtr+4); 5044 bs->rep[2] = MEM_readLE32(dictPtr+8); 5045 dictPtr += 12; 5046 5047 { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); 5048 U32 offcodeMax = MaxOff; 5049 if (dictContentSize <= ((U32)-1) - 128 KB) { 5050 U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ 5051 offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ 5052 } 5053 /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */ 5054 bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)); 5055 5056 /* All repCodes must be <= dictContentSize and != 0 */ 5057 { U32 u; 5058 for (u=0; u<3; u++) { 5059 RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, ""); 5060 RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); 5061 } } } 5062 5063 return (size_t)(dictPtr - (const BYTE*)dict); 5064 } 5065 5066 /* Dictionary format : 5067 * See : 5068 * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format 5069 */ 5070 /*! ZSTD_loadZstdDictionary() : 5071 * @return : dictID, or an error code 5072 * assumptions : magic number supposed already checked 5073 * dictSize supposed >= 8 5074 */ 5075 static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, 5076 ZSTD_MatchState_t* ms, 5077 ZSTD_cwksp* ws, 5078 ZSTD_CCtx_params const* params, 5079 const void* dict, size_t dictSize, 5080 ZSTD_dictTableLoadMethod_e dtlm, 5081 ZSTD_tableFillPurpose_e tfp, 5082 void* workspace) 5083 { 5084 const BYTE* dictPtr = (const BYTE*)dict; 5085 const BYTE* const dictEnd = dictPtr + dictSize; 5086 size_t dictID; 5087 size_t eSize; 5088 ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog))); 5089 assert(dictSize >= 8); 5090 assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); 5091 5092 dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ ); 5093 eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize); 5094 FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed"); 5095 dictPtr += eSize; 5096 5097 { 5098 size_t const dictContentSize = (size_t)(dictEnd - dictPtr); 5099 FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( 5100 ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), ""); 5101 } 5102 return dictID; 5103 } 5104 5105 /* ZSTD_compress_insertDictionary() : 5106 * @return : dictID, or an error code */ 5107 static size_t 5108 ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, 5109 ZSTD_MatchState_t* ms, 5110 ldmState_t* ls, 5111 ZSTD_cwksp* ws, 5112 const ZSTD_CCtx_params* params, 5113 const void* dict, size_t dictSize, 5114 ZSTD_dictContentType_e dictContentType, 5115 ZSTD_dictTableLoadMethod_e dtlm, 5116 ZSTD_tableFillPurpose_e tfp, 5117 void* workspace) 5118 { 5119 DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); 5120 if ((dict==NULL) || (dictSize<8)) { 5121 RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); 5122 return 0; 5123 } 5124 5125 ZSTD_reset_compressedBlockState(bs); 5126 5127 /* dict restricted modes */ 5128 if (dictContentType == ZSTD_dct_rawContent) 5129 return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp); 5130 5131 if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { 5132 if (dictContentType == ZSTD_dct_auto) { 5133 DEBUGLOG(4, "raw content dictionary detected"); 5134 return ZSTD_loadDictionaryContent( 5135 ms, ls, ws, params, dict, dictSize, dtlm, tfp); 5136 } 5137 RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); 5138 assert(0); /* impossible */ 5139 } 5140 5141 /* dict as full zstd dictionary */ 5142 return ZSTD_loadZstdDictionary( 5143 bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace); 5144 } 5145 5146 #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) 5147 #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL) 5148 5149 /*! ZSTD_compressBegin_internal() : 5150 * Assumption : either @dict OR @cdict (or none) is non-NULL, never both 5151 * @return : 0, or an error code */ 5152 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, 5153 const void* dict, size_t dictSize, 5154 ZSTD_dictContentType_e dictContentType, 5155 ZSTD_dictTableLoadMethod_e dtlm, 5156 const ZSTD_CDict* cdict, 5157 const ZSTD_CCtx_params* params, U64 pledgedSrcSize, 5158 ZSTD_buffered_policy_e zbuff) 5159 { 5160 size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize; 5161 DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog); 5162 /* params are supposed to be fully validated at this point */ 5163 assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); 5164 assert(!((dict) && (cdict))); /* either dict or cdict, not both */ 5165 if ( (cdict) 5166 && (cdict->dictContentSize > 0) 5167 && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF 5168 || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER 5169 || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN 5170 || cdict->compressionLevel == 0) 5171 && (params->attachDictPref != ZSTD_dictForceLoad) ) { 5172 return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); 5173 } 5174 5175 FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, 5176 dictContentSize, 5177 ZSTDcrp_makeClean, zbuff) , ""); 5178 { size_t const dictID = cdict ? 5179 ZSTD_compress_insertDictionary( 5180 cctx->blockState.prevCBlock, &cctx->blockState.matchState, 5181 &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, 5182 cdict->dictContentSize, cdict->dictContentType, dtlm, 5183 ZSTD_tfp_forCCtx, cctx->tmpWorkspace) 5184 : ZSTD_compress_insertDictionary( 5185 cctx->blockState.prevCBlock, &cctx->blockState.matchState, 5186 &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, 5187 dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->tmpWorkspace); 5188 FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); 5189 assert(dictID <= UINT_MAX); 5190 cctx->dictID = (U32)dictID; 5191 cctx->dictContentSize = dictContentSize; 5192 } 5193 return 0; 5194 } 5195 5196 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, 5197 const void* dict, size_t dictSize, 5198 ZSTD_dictContentType_e dictContentType, 5199 ZSTD_dictTableLoadMethod_e dtlm, 5200 const ZSTD_CDict* cdict, 5201 const ZSTD_CCtx_params* params, 5202 unsigned long long pledgedSrcSize) 5203 { 5204 DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog); 5205 /* compression parameters verification and optimization */ 5206 FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , ""); 5207 return ZSTD_compressBegin_internal(cctx, 5208 dict, dictSize, dictContentType, dtlm, 5209 cdict, 5210 params, pledgedSrcSize, 5211 ZSTDb_not_buffered); 5212 } 5213 5214 /*! ZSTD_compressBegin_advanced() : 5215 * @return : 0, or an error code */ 5216 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, 5217 const void* dict, size_t dictSize, 5218 ZSTD_parameters params, unsigned long long pledgedSrcSize) 5219 { 5220 ZSTD_CCtx_params cctxParams; 5221 ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, ZSTD_NO_CLEVEL); 5222 return ZSTD_compressBegin_advanced_internal(cctx, 5223 dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, 5224 NULL /*cdict*/, 5225 &cctxParams, pledgedSrcSize); 5226 } 5227 5228 static size_t 5229 ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) 5230 { 5231 ZSTD_CCtx_params cctxParams; 5232 { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); 5233 ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel); 5234 } 5235 DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); 5236 return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, 5237 &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); 5238 } 5239 5240 size_t 5241 ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) 5242 { 5243 return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); 5244 } 5245 5246 size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) 5247 { 5248 return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel); 5249 } 5250 5251 5252 /*! ZSTD_writeEpilogue() : 5253 * Ends a frame. 5254 * @return : nb of bytes written into dst (or an error code) */ 5255 static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) 5256 { 5257 BYTE* const ostart = (BYTE*)dst; 5258 BYTE* op = ostart; 5259 5260 DEBUGLOG(4, "ZSTD_writeEpilogue"); 5261 RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); 5262 5263 /* special case : empty frame */ 5264 if (cctx->stage == ZSTDcs_init) { 5265 size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); 5266 FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); 5267 dstCapacity -= fhSize; 5268 op += fhSize; 5269 cctx->stage = ZSTDcs_ongoing; 5270 } 5271 5272 if (cctx->stage != ZSTDcs_ending) { 5273 /* write one last empty block, make it the "last" block */ 5274 U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; 5275 ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3); 5276 RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue"); 5277 MEM_writeLE24(op, cBlockHeader24); 5278 op += ZSTD_blockHeaderSize; 5279 dstCapacity -= ZSTD_blockHeaderSize; 5280 } 5281 5282 if (cctx->appliedParams.fParams.checksumFlag) { 5283 U32 const checksum = (U32) xxh64_digest(&cctx->xxhState); 5284 RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); 5285 DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum); 5286 MEM_writeLE32(op, checksum); 5287 op += 4; 5288 } 5289 5290 cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ 5291 return (size_t)(op-ostart); 5292 } 5293 5294 void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) 5295 { 5296 (void)cctx; 5297 (void)extraCSize; 5298 } 5299 5300 size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, 5301 void* dst, size_t dstCapacity, 5302 const void* src, size_t srcSize) 5303 { 5304 size_t endResult; 5305 size_t const cSize = ZSTD_compressContinue_internal(cctx, 5306 dst, dstCapacity, src, srcSize, 5307 1 /* frame mode */, 1 /* last chunk */); 5308 FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed"); 5309 endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); 5310 FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed"); 5311 assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); 5312 if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ 5313 ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); 5314 DEBUGLOG(4, "end of frame : controlling src size"); 5315 RETURN_ERROR_IF( 5316 cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1, 5317 srcSize_wrong, 5318 "error : pledgedSrcSize = %u, while realSrcSize = %u", 5319 (unsigned)cctx->pledgedSrcSizePlusOne-1, 5320 (unsigned)cctx->consumedSrcSize); 5321 } 5322 ZSTD_CCtx_trace(cctx, endResult); 5323 return cSize + endResult; 5324 } 5325 5326 /* NOTE: Must just wrap ZSTD_compressEnd_public() */ 5327 size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, 5328 void* dst, size_t dstCapacity, 5329 const void* src, size_t srcSize) 5330 { 5331 return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); 5332 } 5333 5334 size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, 5335 void* dst, size_t dstCapacity, 5336 const void* src, size_t srcSize, 5337 const void* dict,size_t dictSize, 5338 ZSTD_parameters params) 5339 { 5340 DEBUGLOG(4, "ZSTD_compress_advanced"); 5341 FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); 5342 ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, ZSTD_NO_CLEVEL); 5343 return ZSTD_compress_advanced_internal(cctx, 5344 dst, dstCapacity, 5345 src, srcSize, 5346 dict, dictSize, 5347 &cctx->simpleApiParams); 5348 } 5349 5350 /* Internal */ 5351 size_t ZSTD_compress_advanced_internal( 5352 ZSTD_CCtx* cctx, 5353 void* dst, size_t dstCapacity, 5354 const void* src, size_t srcSize, 5355 const void* dict,size_t dictSize, 5356 const ZSTD_CCtx_params* params) 5357 { 5358 DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize); 5359 FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, 5360 dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, 5361 params, srcSize, ZSTDb_not_buffered) , ""); 5362 return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); 5363 } 5364 5365 size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, 5366 void* dst, size_t dstCapacity, 5367 const void* src, size_t srcSize, 5368 const void* dict, size_t dictSize, 5369 int compressionLevel) 5370 { 5371 { 5372 ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict); 5373 assert(params.fParams.contentSizeFlag == 1); 5374 ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel); 5375 } 5376 DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize); 5377 return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); 5378 } 5379 5380 size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, 5381 void* dst, size_t dstCapacity, 5382 const void* src, size_t srcSize, 5383 int compressionLevel) 5384 { 5385 DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize); 5386 assert(cctx != NULL); 5387 return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); 5388 } 5389 5390 size_t ZSTD_compress(void* dst, size_t dstCapacity, 5391 const void* src, size_t srcSize, 5392 int compressionLevel) 5393 { 5394 size_t result; 5395 ZSTD_CCtx* cctx = ZSTD_createCCtx(); 5396 RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed"); 5397 result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel); 5398 ZSTD_freeCCtx(cctx); 5399 return result; 5400 } 5401 5402 5403 /* ===== Dictionary API ===== */ 5404 5405 /*! ZSTD_estimateCDictSize_advanced() : 5406 * Estimate amount of memory that will be needed to create a dictionary with following arguments */ 5407 size_t ZSTD_estimateCDictSize_advanced( 5408 size_t dictSize, ZSTD_compressionParameters cParams, 5409 ZSTD_dictLoadMethod_e dictLoadMethod) 5410 { 5411 DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict)); 5412 return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) 5413 + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) 5414 /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small 5415 * in case we are using DDS with row-hash. */ 5416 + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams), 5417 /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0) 5418 + (dictLoadMethod == ZSTD_dlm_byRef ? 0 5419 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); 5420 } 5421 5422 size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) 5423 { 5424 ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); 5425 return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); 5426 } 5427 5428 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) 5429 { 5430 if (cdict==NULL) return 0; /* support sizeof on NULL */ 5431 DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict)); 5432 /* cdict may be in the workspace */ 5433 return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict)) 5434 + ZSTD_cwksp_sizeof(&cdict->workspace); 5435 } 5436 5437 static size_t ZSTD_initCDict_internal( 5438 ZSTD_CDict* cdict, 5439 const void* dictBuffer, size_t dictSize, 5440 ZSTD_dictLoadMethod_e dictLoadMethod, 5441 ZSTD_dictContentType_e dictContentType, 5442 ZSTD_CCtx_params params) 5443 { 5444 DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType); 5445 assert(!ZSTD_checkCParams(params.cParams)); 5446 cdict->matchState.cParams = params.cParams; 5447 cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch; 5448 if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { 5449 cdict->dictContent = dictBuffer; 5450 } else { 5451 void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*))); 5452 RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!"); 5453 cdict->dictContent = internalBuffer; 5454 ZSTD_memcpy(internalBuffer, dictBuffer, dictSize); 5455 } 5456 cdict->dictContentSize = dictSize; 5457 cdict->dictContentType = dictContentType; 5458 5459 cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE); 5460 5461 5462 /* Reset the state to no dictionary */ 5463 ZSTD_reset_compressedBlockState(&cdict->cBlockState); 5464 FORWARD_IF_ERROR(ZSTD_reset_matchState( 5465 &cdict->matchState, 5466 &cdict->workspace, 5467 ¶ms.cParams, 5468 params.useRowMatchFinder, 5469 ZSTDcrp_makeClean, 5470 ZSTDirp_reset, 5471 ZSTD_resetTarget_CDict), ""); 5472 /* (Maybe) load the dictionary 5473 * Skips loading the dictionary if it is < 8 bytes. 5474 */ 5475 { params.compressionLevel = ZSTD_CLEVEL_DEFAULT; 5476 params.fParams.contentSizeFlag = 1; 5477 { size_t const dictID = ZSTD_compress_insertDictionary( 5478 &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, 5479 ¶ms, cdict->dictContent, cdict->dictContentSize, 5480 dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace); 5481 FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); 5482 assert(dictID <= (size_t)(U32)-1); 5483 cdict->dictID = (U32)dictID; 5484 } 5485 } 5486 5487 return 0; 5488 } 5489 5490 static ZSTD_CDict* 5491 ZSTD_createCDict_advanced_internal(size_t dictSize, 5492 ZSTD_dictLoadMethod_e dictLoadMethod, 5493 ZSTD_compressionParameters cParams, 5494 ZSTD_ParamSwitch_e useRowMatchFinder, 5495 int enableDedicatedDictSearch, 5496 ZSTD_customMem customMem) 5497 { 5498 if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; 5499 DEBUGLOG(3, "ZSTD_createCDict_advanced_internal (dictSize=%u)", (unsigned)dictSize); 5500 5501 { size_t const workspaceSize = 5502 ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + 5503 ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + 5504 ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) + 5505 (dictLoadMethod == ZSTD_dlm_byRef ? 0 5506 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))); 5507 void* const workspace = ZSTD_customMalloc(workspaceSize, customMem); 5508 ZSTD_cwksp ws; 5509 ZSTD_CDict* cdict; 5510 5511 if (!workspace) { 5512 ZSTD_customFree(workspace, customMem); 5513 return NULL; 5514 } 5515 5516 ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc); 5517 5518 cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); 5519 assert(cdict != NULL); 5520 ZSTD_cwksp_move(&cdict->workspace, &ws); 5521 cdict->customMem = customMem; 5522 cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */ 5523 cdict->useRowMatchFinder = useRowMatchFinder; 5524 return cdict; 5525 } 5526 } 5527 5528 ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, 5529 ZSTD_dictLoadMethod_e dictLoadMethod, 5530 ZSTD_dictContentType_e dictContentType, 5531 ZSTD_compressionParameters cParams, 5532 ZSTD_customMem customMem) 5533 { 5534 ZSTD_CCtx_params cctxParams; 5535 ZSTD_memset(&cctxParams, 0, sizeof(cctxParams)); 5536 DEBUGLOG(3, "ZSTD_createCDict_advanced, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType); 5537 ZSTD_CCtxParams_init(&cctxParams, 0); 5538 cctxParams.cParams = cParams; 5539 cctxParams.customMem = customMem; 5540 return ZSTD_createCDict_advanced2( 5541 dictBuffer, dictSize, 5542 dictLoadMethod, dictContentType, 5543 &cctxParams, customMem); 5544 } 5545 5546 ZSTD_CDict* ZSTD_createCDict_advanced2( 5547 const void* dict, size_t dictSize, 5548 ZSTD_dictLoadMethod_e dictLoadMethod, 5549 ZSTD_dictContentType_e dictContentType, 5550 const ZSTD_CCtx_params* originalCctxParams, 5551 ZSTD_customMem customMem) 5552 { 5553 ZSTD_CCtx_params cctxParams = *originalCctxParams; 5554 ZSTD_compressionParameters cParams; 5555 ZSTD_CDict* cdict; 5556 5557 DEBUGLOG(3, "ZSTD_createCDict_advanced2, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType); 5558 if (!customMem.customAlloc ^ !customMem.customFree) return NULL; 5559 5560 if (cctxParams.enableDedicatedDictSearch) { 5561 cParams = ZSTD_dedicatedDictSearch_getCParams( 5562 cctxParams.compressionLevel, dictSize); 5563 ZSTD_overrideCParams(&cParams, &cctxParams.cParams); 5564 } else { 5565 cParams = ZSTD_getCParamsFromCCtxParams( 5566 &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); 5567 } 5568 5569 if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) { 5570 /* Fall back to non-DDSS params */ 5571 cctxParams.enableDedicatedDictSearch = 0; 5572 cParams = ZSTD_getCParamsFromCCtxParams( 5573 &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); 5574 } 5575 5576 DEBUGLOG(3, "ZSTD_createCDict_advanced2: DedicatedDictSearch=%u", cctxParams.enableDedicatedDictSearch); 5577 cctxParams.cParams = cParams; 5578 cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); 5579 5580 cdict = ZSTD_createCDict_advanced_internal(dictSize, 5581 dictLoadMethod, cctxParams.cParams, 5582 cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch, 5583 customMem); 5584 5585 if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict, 5586 dict, dictSize, 5587 dictLoadMethod, dictContentType, 5588 cctxParams) )) { 5589 ZSTD_freeCDict(cdict); 5590 return NULL; 5591 } 5592 5593 return cdict; 5594 } 5595 5596 ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) 5597 { 5598 ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); 5599 ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, 5600 ZSTD_dlm_byCopy, ZSTD_dct_auto, 5601 cParams, ZSTD_defaultCMem); 5602 if (cdict) 5603 cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; 5604 return cdict; 5605 } 5606 5607 ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) 5608 { 5609 ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); 5610 ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, 5611 ZSTD_dlm_byRef, ZSTD_dct_auto, 5612 cParams, ZSTD_defaultCMem); 5613 if (cdict) 5614 cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; 5615 return cdict; 5616 } 5617 5618 size_t ZSTD_freeCDict(ZSTD_CDict* cdict) 5619 { 5620 if (cdict==NULL) return 0; /* support free on NULL */ 5621 { ZSTD_customMem const cMem = cdict->customMem; 5622 int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); 5623 ZSTD_cwksp_free(&cdict->workspace, cMem); 5624 if (!cdictInWorkspace) { 5625 ZSTD_customFree(cdict, cMem); 5626 } 5627 return 0; 5628 } 5629 } 5630 5631 /*! ZSTD_initStaticCDict_advanced() : 5632 * Generate a digested dictionary in provided memory area. 5633 * workspace: The memory area to emplace the dictionary into. 5634 * Provided pointer must 8-bytes aligned. 5635 * It must outlive dictionary usage. 5636 * workspaceSize: Use ZSTD_estimateCDictSize() 5637 * to determine how large workspace must be. 5638 * cParams : use ZSTD_getCParams() to transform a compression level 5639 * into its relevant cParams. 5640 * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) 5641 * Note : there is no corresponding "free" function. 5642 * Since workspace was allocated externally, it must be freed externally. 5643 */ 5644 const ZSTD_CDict* ZSTD_initStaticCDict( 5645 void* workspace, size_t workspaceSize, 5646 const void* dict, size_t dictSize, 5647 ZSTD_dictLoadMethod_e dictLoadMethod, 5648 ZSTD_dictContentType_e dictContentType, 5649 ZSTD_compressionParameters cParams) 5650 { 5651 ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); 5652 /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */ 5653 size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0); 5654 size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) 5655 + (dictLoadMethod == ZSTD_dlm_byRef ? 0 5656 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))) 5657 + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) 5658 + matchStateSize; 5659 ZSTD_CDict* cdict; 5660 ZSTD_CCtx_params params; 5661 5662 DEBUGLOG(4, "ZSTD_initStaticCDict (dictSize==%u)", (unsigned)dictSize); 5663 if ((size_t)workspace & 7) return NULL; /* 8-aligned */ 5664 5665 { 5666 ZSTD_cwksp ws; 5667 ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); 5668 cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); 5669 if (cdict == NULL) return NULL; 5670 ZSTD_cwksp_move(&cdict->workspace, &ws); 5671 } 5672 5673 if (workspaceSize < neededSize) return NULL; 5674 5675 ZSTD_CCtxParams_init(¶ms, 0); 5676 params.cParams = cParams; 5677 params.useRowMatchFinder = useRowMatchFinder; 5678 cdict->useRowMatchFinder = useRowMatchFinder; 5679 cdict->compressionLevel = ZSTD_NO_CLEVEL; 5680 5681 if (ZSTD_isError( ZSTD_initCDict_internal(cdict, 5682 dict, dictSize, 5683 dictLoadMethod, dictContentType, 5684 params) )) 5685 return NULL; 5686 5687 return cdict; 5688 } 5689 5690 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) 5691 { 5692 assert(cdict != NULL); 5693 return cdict->matchState.cParams; 5694 } 5695 5696 /*! ZSTD_getDictID_fromCDict() : 5697 * Provides the dictID of the dictionary loaded into `cdict`. 5698 * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. 5699 * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ 5700 unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict) 5701 { 5702 if (cdict==NULL) return 0; 5703 return cdict->dictID; 5704 } 5705 5706 /* ZSTD_compressBegin_usingCDict_internal() : 5707 * Implementation of various ZSTD_compressBegin_usingCDict* functions. 5708 */ 5709 static size_t ZSTD_compressBegin_usingCDict_internal( 5710 ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, 5711 ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) 5712 { 5713 ZSTD_CCtx_params cctxParams; 5714 DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal"); 5715 RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!"); 5716 /* Initialize the cctxParams from the cdict */ 5717 { 5718 ZSTD_parameters params; 5719 params.fParams = fParams; 5720 params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF 5721 || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER 5722 || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN 5723 || cdict->compressionLevel == 0 ) ? 5724 ZSTD_getCParamsFromCDict(cdict) 5725 : ZSTD_getCParams(cdict->compressionLevel, 5726 pledgedSrcSize, 5727 cdict->dictContentSize); 5728 ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, cdict->compressionLevel); 5729 } 5730 /* Increase window log to fit the entire dictionary and source if the 5731 * source size is known. Limit the increase to 19, which is the 5732 * window log for compression level 1 with the largest source size. 5733 */ 5734 if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { 5735 U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); 5736 U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; 5737 cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog); 5738 } 5739 return ZSTD_compressBegin_internal(cctx, 5740 NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, 5741 cdict, 5742 &cctxParams, pledgedSrcSize, 5743 ZSTDb_not_buffered); 5744 } 5745 5746 5747 /* ZSTD_compressBegin_usingCDict_advanced() : 5748 * This function is DEPRECATED. 5749 * cdict must be != NULL */ 5750 size_t ZSTD_compressBegin_usingCDict_advanced( 5751 ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, 5752 ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) 5753 { 5754 return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize); 5755 } 5756 5757 /* ZSTD_compressBegin_usingCDict() : 5758 * cdict must be != NULL */ 5759 size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) 5760 { 5761 ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; 5762 return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); 5763 } 5764 5765 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) 5766 { 5767 return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); 5768 } 5769 5770 /*! ZSTD_compress_usingCDict_internal(): 5771 * Implementation of various ZSTD_compress_usingCDict* functions. 5772 */ 5773 static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx, 5774 void* dst, size_t dstCapacity, 5775 const void* src, size_t srcSize, 5776 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) 5777 { 5778 FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ 5779 return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); 5780 } 5781 5782 /*! ZSTD_compress_usingCDict_advanced(): 5783 * This function is DEPRECATED. 5784 */ 5785 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, 5786 void* dst, size_t dstCapacity, 5787 const void* src, size_t srcSize, 5788 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) 5789 { 5790 return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); 5791 } 5792 5793 /*! ZSTD_compress_usingCDict() : 5794 * Compression using a digested Dictionary. 5795 * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. 5796 * Note that compression parameters are decided at CDict creation time 5797 * while frame parameters are hardcoded */ 5798 size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, 5799 void* dst, size_t dstCapacity, 5800 const void* src, size_t srcSize, 5801 const ZSTD_CDict* cdict) 5802 { 5803 ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; 5804 return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); 5805 } 5806 5807 5808 5809 /* ****************************************************************** 5810 * Streaming 5811 ********************************************************************/ 5812 5813 ZSTD_CStream* ZSTD_createCStream(void) 5814 { 5815 DEBUGLOG(3, "ZSTD_createCStream"); 5816 return ZSTD_createCStream_advanced(ZSTD_defaultCMem); 5817 } 5818 5819 ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) 5820 { 5821 return ZSTD_initStaticCCtx(workspace, workspaceSize); 5822 } 5823 5824 ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) 5825 { /* CStream and CCtx are now same object */ 5826 return ZSTD_createCCtx_advanced(customMem); 5827 } 5828 5829 size_t ZSTD_freeCStream(ZSTD_CStream* zcs) 5830 { 5831 return ZSTD_freeCCtx(zcs); /* same object */ 5832 } 5833 5834 5835 5836 /*====== Initialization ======*/ 5837 5838 size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } 5839 5840 size_t ZSTD_CStreamOutSize(void) 5841 { 5842 return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; 5843 } 5844 5845 static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) 5846 { 5847 if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) 5848 return ZSTD_cpm_attachDict; 5849 else 5850 return ZSTD_cpm_noAttachDict; 5851 } 5852 5853 /* ZSTD_resetCStream(): 5854 * pledgedSrcSize == 0 means "unknown" */ 5855 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss) 5856 { 5857 /* temporary : 0 interpreted as "unknown" during transition period. 5858 * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. 5859 * 0 will be interpreted as "empty" in the future. 5860 */ 5861 U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; 5862 DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize); 5863 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); 5864 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); 5865 return 0; 5866 } 5867 5868 /*! ZSTD_initCStream_internal() : 5869 * Note : for lib/compress only. Used by zstdmt_compress.c. 5870 * Assumption 1 : params are valid 5871 * Assumption 2 : either dict, or cdict, is defined, not both */ 5872 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, 5873 const void* dict, size_t dictSize, const ZSTD_CDict* cdict, 5874 const ZSTD_CCtx_params* params, 5875 unsigned long long pledgedSrcSize) 5876 { 5877 DEBUGLOG(4, "ZSTD_initCStream_internal"); 5878 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); 5879 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); 5880 assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); 5881 zcs->requestedParams = *params; 5882 assert(!((dict) && (cdict))); /* either dict or cdict, not both */ 5883 if (dict) { 5884 FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); 5885 } else { 5886 /* Dictionary is cleared if !cdict */ 5887 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); 5888 } 5889 return 0; 5890 } 5891 5892 /* ZSTD_initCStream_usingCDict_advanced() : 5893 * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ 5894 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, 5895 const ZSTD_CDict* cdict, 5896 ZSTD_frameParameters fParams, 5897 unsigned long long pledgedSrcSize) 5898 { 5899 DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); 5900 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); 5901 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); 5902 zcs->requestedParams.fParams = fParams; 5903 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); 5904 return 0; 5905 } 5906 5907 /* note : cdict must outlive compression session */ 5908 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) 5909 { 5910 DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); 5911 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); 5912 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); 5913 return 0; 5914 } 5915 5916 5917 /* ZSTD_initCStream_advanced() : 5918 * pledgedSrcSize must be exact. 5919 * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. 5920 * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ 5921 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, 5922 const void* dict, size_t dictSize, 5923 ZSTD_parameters params, unsigned long long pss) 5924 { 5925 /* for compatibility with older programs relying on this behavior. 5926 * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. 5927 * This line will be removed in the future. 5928 */ 5929 U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; 5930 DEBUGLOG(4, "ZSTD_initCStream_advanced"); 5931 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); 5932 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); 5933 FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); 5934 ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, ¶ms); 5935 FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); 5936 return 0; 5937 } 5938 5939 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) 5940 { 5941 DEBUGLOG(4, "ZSTD_initCStream_usingDict"); 5942 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); 5943 FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); 5944 FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); 5945 return 0; 5946 } 5947 5948 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) 5949 { 5950 /* temporary : 0 interpreted as "unknown" during transition period. 5951 * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. 5952 * 0 will be interpreted as "empty" in the future. 5953 */ 5954 U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; 5955 DEBUGLOG(4, "ZSTD_initCStream_srcSize"); 5956 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); 5957 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); 5958 FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); 5959 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); 5960 return 0; 5961 } 5962 5963 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) 5964 { 5965 DEBUGLOG(4, "ZSTD_initCStream"); 5966 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); 5967 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); 5968 FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); 5969 return 0; 5970 } 5971 5972 /*====== Compression ======*/ 5973 5974 static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) 5975 { 5976 if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { 5977 return cctx->blockSizeMax - cctx->stableIn_notConsumed; 5978 } 5979 assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered); 5980 { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; 5981 if (hintInSize==0) hintInSize = cctx->blockSizeMax; 5982 return hintInSize; 5983 } 5984 } 5985 5986 /* ZSTD_compressStream_generic(): 5987 * internal function for all *compressStream*() variants 5988 * @return : hint size for next input to complete ongoing block */ 5989 static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, 5990 ZSTD_outBuffer* output, 5991 ZSTD_inBuffer* input, 5992 ZSTD_EndDirective const flushMode) 5993 { 5994 const char* const istart = (assert(input != NULL), (const char*)input->src); 5995 const char* const iend = (istart != NULL) ? istart + input->size : istart; 5996 const char* ip = (istart != NULL) ? istart + input->pos : istart; 5997 char* const ostart = (assert(output != NULL), (char*)output->dst); 5998 char* const oend = (ostart != NULL) ? ostart + output->size : ostart; 5999 char* op = (ostart != NULL) ? ostart + output->pos : ostart; 6000 U32 someMoreWork = 1; 6001 6002 /* check expectations */ 6003 DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos); 6004 assert(zcs != NULL); 6005 if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) { 6006 assert(input->pos >= zcs->stableIn_notConsumed); 6007 input->pos -= zcs->stableIn_notConsumed; 6008 if (ip) ip -= zcs->stableIn_notConsumed; 6009 zcs->stableIn_notConsumed = 0; 6010 } 6011 if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { 6012 assert(zcs->inBuff != NULL); 6013 assert(zcs->inBuffSize > 0); 6014 } 6015 if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) { 6016 assert(zcs->outBuff != NULL); 6017 assert(zcs->outBuffSize > 0); 6018 } 6019 if (input->src == NULL) assert(input->size == 0); 6020 assert(input->pos <= input->size); 6021 if (output->dst == NULL) assert(output->size == 0); 6022 assert(output->pos <= output->size); 6023 assert((U32)flushMode <= (U32)ZSTD_e_end); 6024 6025 while (someMoreWork) { 6026 switch(zcs->streamStage) 6027 { 6028 case zcss_init: 6029 RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!"); 6030 6031 case zcss_load: 6032 if ( (flushMode == ZSTD_e_end) 6033 && ( (size_t)(oend-op) >= ZSTD_compressBound((size_t)(iend-ip)) /* Enough output space */ 6034 || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */ 6035 && (zcs->inBuffPos == 0) ) { 6036 /* shortcut to compression pass directly into output buffer */ 6037 size_t const cSize = ZSTD_compressEnd_public(zcs, 6038 op, (size_t)(oend-op), 6039 ip, (size_t)(iend-ip)); 6040 DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); 6041 FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); 6042 ip = iend; 6043 op += cSize; 6044 zcs->frameEnded = 1; 6045 ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); 6046 someMoreWork = 0; break; 6047 } 6048 /* complete loading into inBuffer in buffered mode */ 6049 if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { 6050 size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; 6051 size_t const loaded = ZSTD_limitCopy( 6052 zcs->inBuff + zcs->inBuffPos, toLoad, 6053 ip, (size_t)(iend-ip)); 6054 zcs->inBuffPos += loaded; 6055 if (ip) ip += loaded; 6056 if ( (flushMode == ZSTD_e_continue) 6057 && (zcs->inBuffPos < zcs->inBuffTarget) ) { 6058 /* not enough input to fill full block : stop here */ 6059 someMoreWork = 0; break; 6060 } 6061 if ( (flushMode == ZSTD_e_flush) 6062 && (zcs->inBuffPos == zcs->inToCompress) ) { 6063 /* empty */ 6064 someMoreWork = 0; break; 6065 } 6066 } else { 6067 assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable); 6068 if ( (flushMode == ZSTD_e_continue) 6069 && ( (size_t)(iend - ip) < zcs->blockSizeMax) ) { 6070 /* can't compress a full block : stop here */ 6071 zcs->stableIn_notConsumed = (size_t)(iend - ip); 6072 ip = iend; /* pretend to have consumed input */ 6073 someMoreWork = 0; break; 6074 } 6075 if ( (flushMode == ZSTD_e_flush) 6076 && (ip == iend) ) { 6077 /* empty */ 6078 someMoreWork = 0; break; 6079 } 6080 } 6081 /* compress current block (note : this stage cannot be stopped in the middle) */ 6082 DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); 6083 { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered); 6084 void* cDst; 6085 size_t cSize; 6086 size_t oSize = (size_t)(oend-op); 6087 size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress 6088 : MIN((size_t)(iend - ip), zcs->blockSizeMax); 6089 if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) 6090 cDst = op; /* compress into output buffer, to skip flush stage */ 6091 else 6092 cDst = zcs->outBuff, oSize = zcs->outBuffSize; 6093 if (inputBuffered) { 6094 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); 6095 cSize = lastBlock ? 6096 ZSTD_compressEnd_public(zcs, cDst, oSize, 6097 zcs->inBuff + zcs->inToCompress, iSize) : 6098 ZSTD_compressContinue_public(zcs, cDst, oSize, 6099 zcs->inBuff + zcs->inToCompress, iSize); 6100 FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); 6101 zcs->frameEnded = lastBlock; 6102 /* prepare next block */ 6103 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; 6104 if (zcs->inBuffTarget > zcs->inBuffSize) 6105 zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSizeMax; 6106 DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", 6107 (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); 6108 if (!lastBlock) 6109 assert(zcs->inBuffTarget <= zcs->inBuffSize); 6110 zcs->inToCompress = zcs->inBuffPos; 6111 } else { /* !inputBuffered, hence ZSTD_bm_stable */ 6112 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend); 6113 cSize = lastBlock ? 6114 ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) : 6115 ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); 6116 /* Consume the input prior to error checking to mirror buffered mode. */ 6117 if (ip) ip += iSize; 6118 FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); 6119 zcs->frameEnded = lastBlock; 6120 if (lastBlock) assert(ip == iend); 6121 } 6122 if (cDst == op) { /* no need to flush */ 6123 op += cSize; 6124 if (zcs->frameEnded) { 6125 DEBUGLOG(5, "Frame completed directly in outBuffer"); 6126 someMoreWork = 0; 6127 ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); 6128 } 6129 break; 6130 } 6131 zcs->outBuffContentSize = cSize; 6132 zcs->outBuffFlushedSize = 0; 6133 zcs->streamStage = zcss_flush; /* pass-through to flush stage */ 6134 } 6135 ZSTD_FALLTHROUGH; 6136 case zcss_flush: 6137 DEBUGLOG(5, "flush stage"); 6138 assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered); 6139 { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; 6140 size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op), 6141 zcs->outBuff + zcs->outBuffFlushedSize, toFlush); 6142 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", 6143 (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed); 6144 if (flushed) 6145 op += flushed; 6146 zcs->outBuffFlushedSize += flushed; 6147 if (toFlush!=flushed) { 6148 /* flush not fully completed, presumably because dst is too small */ 6149 assert(op==oend); 6150 someMoreWork = 0; 6151 break; 6152 } 6153 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; 6154 if (zcs->frameEnded) { 6155 DEBUGLOG(5, "Frame completed on flush"); 6156 someMoreWork = 0; 6157 ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); 6158 break; 6159 } 6160 zcs->streamStage = zcss_load; 6161 break; 6162 } 6163 6164 default: /* impossible */ 6165 assert(0); 6166 } 6167 } 6168 6169 input->pos = (size_t)(ip - istart); 6170 output->pos = (size_t)(op - ostart); 6171 if (zcs->frameEnded) return 0; 6172 return ZSTD_nextInputSizeHint(zcs); 6173 } 6174 6175 static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx) 6176 { 6177 return ZSTD_nextInputSizeHint(cctx); 6178 6179 } 6180 6181 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) 6182 { 6183 FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , ""); 6184 return ZSTD_nextInputSizeHint_MTorST(zcs); 6185 } 6186 6187 /* After a compression call set the expected input/output buffer. 6188 * This is validated at the start of the next compression call. 6189 */ 6190 static void 6191 ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input) 6192 { 6193 DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)"); 6194 if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { 6195 cctx->expectedInBuffer = *input; 6196 } 6197 if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { 6198 cctx->expectedOutBufferSize = output->size - output->pos; 6199 } 6200 } 6201 6202 /* Validate that the input/output buffers match the expectations set by 6203 * ZSTD_setBufferExpectations. 6204 */ 6205 static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, 6206 ZSTD_outBuffer const* output, 6207 ZSTD_inBuffer const* input, 6208 ZSTD_EndDirective endOp) 6209 { 6210 if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { 6211 ZSTD_inBuffer const expect = cctx->expectedInBuffer; 6212 if (expect.src != input->src || expect.pos != input->pos) 6213 RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!"); 6214 } 6215 (void)endOp; 6216 if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { 6217 size_t const outBufferSize = output->size - output->pos; 6218 if (cctx->expectedOutBufferSize != outBufferSize) 6219 RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!"); 6220 } 6221 return 0; 6222 } 6223 6224 /* 6225 * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize. 6226 * Otherwise, it's ignored. 6227 * @return: 0 on success, or a ZSTD_error code otherwise. 6228 */ 6229 static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, 6230 ZSTD_EndDirective endOp, 6231 size_t inSize) 6232 { 6233 ZSTD_CCtx_params params = cctx->requestedParams; 6234 ZSTD_prefixDict const prefixDict = cctx->prefixDict; 6235 FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ 6236 ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ 6237 assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ 6238 if (cctx->cdict && !cctx->localDict.cdict) { 6239 /* Let the cdict's compression level take priority over the requested params. 6240 * But do not take the cdict's compression level if the "cdict" is actually a localDict 6241 * generated from ZSTD_initLocalDict(). 6242 */ 6243 params.compressionLevel = cctx->cdict->compressionLevel; 6244 } 6245 DEBUGLOG(4, "ZSTD_CCtx_init_compressStream2 : transparent init stage"); 6246 if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */ 6247 6248 { size_t const dictSize = prefixDict.dict 6249 ? prefixDict.dictSize 6250 : (cctx->cdict ? cctx->cdict->dictContentSize : 0); 6251 ZSTD_CParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); 6252 params.cParams = ZSTD_getCParamsFromCCtxParams( 6253 ¶ms, cctx->pledgedSrcSizePlusOne-1, 6254 dictSize, mode); 6255 } 6256 6257 params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(params.postBlockSplitter, ¶ms.cParams); 6258 params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); 6259 params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); 6260 params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences); 6261 params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize); 6262 params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel); 6263 6264 { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; 6265 assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); 6266 FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, 6267 prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast, 6268 cctx->cdict, 6269 ¶ms, pledgedSrcSize, 6270 ZSTDb_buffered) , ""); 6271 assert(cctx->appliedParams.nbWorkers == 0); 6272 cctx->inToCompress = 0; 6273 cctx->inBuffPos = 0; 6274 if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) { 6275 /* for small input: avoid automatic flush on reaching end of block, since 6276 * it would require to add a 3-bytes null block to end frame 6277 */ 6278 cctx->inBuffTarget = cctx->blockSizeMax + (cctx->blockSizeMax == pledgedSrcSize); 6279 } else { 6280 cctx->inBuffTarget = 0; 6281 } 6282 cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; 6283 cctx->streamStage = zcss_load; 6284 cctx->frameEnded = 0; 6285 } 6286 return 0; 6287 } 6288 6289 /* @return provides a minimum amount of data remaining to be flushed from internal buffers 6290 */ 6291 size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, 6292 ZSTD_outBuffer* output, 6293 ZSTD_inBuffer* input, 6294 ZSTD_EndDirective endOp) 6295 { 6296 DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp); 6297 /* check conditions */ 6298 RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer"); 6299 RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer"); 6300 RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective"); 6301 assert(cctx != NULL); 6302 6303 /* transparent initialization stage */ 6304 if (cctx->streamStage == zcss_init) { 6305 size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */ 6306 size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed; 6307 if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */ 6308 && (endOp == ZSTD_e_continue) /* no flush requested, more input to come */ 6309 && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */ 6310 if (cctx->stableIn_notConsumed) { /* not the first time */ 6311 /* check stable source guarantees */ 6312 RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer"); 6313 RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos"); 6314 } 6315 /* pretend input was consumed, to give a sense forward progress */ 6316 input->pos = input->size; 6317 /* save stable inBuffer, for later control, and flush/end */ 6318 cctx->expectedInBuffer = *input; 6319 /* but actually input wasn't consumed, so keep track of position from where compression shall resume */ 6320 cctx->stableIn_notConsumed += inputSize; 6321 /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */ 6322 return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */ 6323 } 6324 FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed"); 6325 ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ 6326 } 6327 /* end of transparent initialization stage */ 6328 6329 FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers"); 6330 /* compression stage */ 6331 FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); 6332 DEBUGLOG(5, "completed ZSTD_compressStream2"); 6333 ZSTD_setBufferExpectations(cctx, output, input); 6334 return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ 6335 } 6336 6337 size_t ZSTD_compressStream2_simpleArgs ( 6338 ZSTD_CCtx* cctx, 6339 void* dst, size_t dstCapacity, size_t* dstPos, 6340 const void* src, size_t srcSize, size_t* srcPos, 6341 ZSTD_EndDirective endOp) 6342 { 6343 ZSTD_outBuffer output; 6344 ZSTD_inBuffer input; 6345 output.dst = dst; 6346 output.size = dstCapacity; 6347 output.pos = *dstPos; 6348 input.src = src; 6349 input.size = srcSize; 6350 input.pos = *srcPos; 6351 /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ 6352 { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); 6353 *dstPos = output.pos; 6354 *srcPos = input.pos; 6355 return cErr; 6356 } 6357 } 6358 6359 size_t ZSTD_compress2(ZSTD_CCtx* cctx, 6360 void* dst, size_t dstCapacity, 6361 const void* src, size_t srcSize) 6362 { 6363 ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode; 6364 ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode; 6365 DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize); 6366 ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); 6367 /* Enable stable input/output buffers. */ 6368 cctx->requestedParams.inBufferMode = ZSTD_bm_stable; 6369 cctx->requestedParams.outBufferMode = ZSTD_bm_stable; 6370 { size_t oPos = 0; 6371 size_t iPos = 0; 6372 size_t const result = ZSTD_compressStream2_simpleArgs(cctx, 6373 dst, dstCapacity, &oPos, 6374 src, srcSize, &iPos, 6375 ZSTD_e_end); 6376 /* Reset to the original values. */ 6377 cctx->requestedParams.inBufferMode = originalInBufferMode; 6378 cctx->requestedParams.outBufferMode = originalOutBufferMode; 6379 6380 FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); 6381 if (result != 0) { /* compression not completed, due to lack of output space */ 6382 assert(oPos == dstCapacity); 6383 RETURN_ERROR(dstSize_tooSmall, ""); 6384 } 6385 assert(iPos == srcSize); /* all input is expected consumed */ 6386 return oPos; 6387 } 6388 } 6389 6390 /* ZSTD_validateSequence() : 6391 * @offBase : must use the format required by ZSTD_storeSeq() 6392 * @returns a ZSTD error code if sequence is not valid 6393 */ 6394 static size_t 6395 ZSTD_validateSequence(U32 offBase, U32 matchLength, U32 minMatch, 6396 size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer) 6397 { 6398 U32 const windowSize = 1u << windowLog; 6399 /* posInSrc represents the amount of data the decoder would decode up to this point. 6400 * As long as the amount of data decoded is less than or equal to window size, offsets may be 6401 * larger than the total length of output decoded in order to reference the dict, even larger than 6402 * window size. After output surpasses windowSize, we're limited to windowSize offsets again. 6403 */ 6404 size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; 6405 size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4; 6406 RETURN_ERROR_IF(offBase > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); 6407 /* Validate maxNbSeq is large enough for the given matchLength and minMatch */ 6408 RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch"); 6409 return 0; 6410 } 6411 6412 /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ 6413 static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) 6414 { 6415 U32 offBase = OFFSET_TO_OFFBASE(rawOffset); 6416 6417 if (!ll0 && rawOffset == rep[0]) { 6418 offBase = REPCODE1_TO_OFFBASE; 6419 } else if (rawOffset == rep[1]) { 6420 offBase = REPCODE_TO_OFFBASE(2 - ll0); 6421 } else if (rawOffset == rep[2]) { 6422 offBase = REPCODE_TO_OFFBASE(3 - ll0); 6423 } else if (ll0 && rawOffset == rep[0] - 1) { 6424 offBase = REPCODE3_TO_OFFBASE; 6425 } 6426 return offBase; 6427 } 6428 6429 /* This function scans through an array of ZSTD_Sequence, 6430 * storing the sequences it reads, until it reaches a block delimiter. 6431 * Note that the block delimiter includes the last literals of the block. 6432 * @blockSize must be == sum(sequence_lengths). 6433 * @returns @blockSize on success, and a ZSTD_error otherwise. 6434 */ 6435 static size_t 6436 ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, 6437 ZSTD_SequencePosition* seqPos, 6438 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, 6439 const void* src, size_t blockSize, 6440 ZSTD_ParamSwitch_e externalRepSearch) 6441 { 6442 U32 idx = seqPos->idx; 6443 U32 const startIdx = idx; 6444 BYTE const* ip = (BYTE const*)(src); 6445 const BYTE* const iend = ip + blockSize; 6446 Repcodes_t updatedRepcodes; 6447 U32 dictSize; 6448 6449 DEBUGLOG(5, "ZSTD_transferSequences_wBlockDelim (blockSize = %zu)", blockSize); 6450 6451 if (cctx->cdict) { 6452 dictSize = (U32)cctx->cdict->dictContentSize; 6453 } else if (cctx->prefixDict.dict) { 6454 dictSize = (U32)cctx->prefixDict.dictSize; 6455 } else { 6456 dictSize = 0; 6457 } 6458 ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); 6459 for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) { 6460 U32 const litLength = inSeqs[idx].litLength; 6461 U32 const matchLength = inSeqs[idx].matchLength; 6462 U32 offBase; 6463 6464 if (externalRepSearch == ZSTD_ps_disable) { 6465 offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset); 6466 } else { 6467 U32 const ll0 = (litLength == 0); 6468 offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); 6469 ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); 6470 } 6471 6472 DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); 6473 if (cctx->appliedParams.validateSequences) { 6474 seqPos->posInSrc += litLength + matchLength; 6475 FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, 6476 seqPos->posInSrc, 6477 cctx->appliedParams.cParams.windowLog, dictSize, 6478 ZSTD_hasExtSeqProd(&cctx->appliedParams)), 6479 "Sequence validation failed"); 6480 } 6481 RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, 6482 "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); 6483 ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); 6484 ip += matchLength + litLength; 6485 } 6486 RETURN_ERROR_IF(idx == inSeqsSize, externalSequences_invalid, "Block delimiter not found."); 6487 6488 /* If we skipped repcode search while parsing, we need to update repcodes now */ 6489 assert(externalRepSearch != ZSTD_ps_auto); 6490 assert(idx >= startIdx); 6491 if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) { 6492 U32* const rep = updatedRepcodes.rep; 6493 U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */ 6494 6495 if (lastSeqIdx >= startIdx + 2) { 6496 rep[2] = inSeqs[lastSeqIdx - 2].offset; 6497 rep[1] = inSeqs[lastSeqIdx - 1].offset; 6498 rep[0] = inSeqs[lastSeqIdx].offset; 6499 } else if (lastSeqIdx == startIdx + 1) { 6500 rep[2] = rep[0]; 6501 rep[1] = inSeqs[lastSeqIdx - 1].offset; 6502 rep[0] = inSeqs[lastSeqIdx].offset; 6503 } else { 6504 assert(lastSeqIdx == startIdx); 6505 rep[2] = rep[1]; 6506 rep[1] = rep[0]; 6507 rep[0] = inSeqs[lastSeqIdx].offset; 6508 } 6509 } 6510 6511 ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); 6512 6513 if (inSeqs[idx].litLength) { 6514 DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength); 6515 ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength); 6516 ip += inSeqs[idx].litLength; 6517 seqPos->posInSrc += inSeqs[idx].litLength; 6518 } 6519 RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!"); 6520 seqPos->idx = idx+1; 6521 return blockSize; 6522 } 6523 6524 /* 6525 * This function attempts to scan through @blockSize bytes in @src 6526 * represented by the sequences in @inSeqs, 6527 * storing any (partial) sequences. 6528 * 6529 * Occasionally, we may want to reduce the actual number of bytes consumed from @src 6530 * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH. 6531 * 6532 * @returns the number of bytes consumed from @src, necessarily <= @blockSize. 6533 * Otherwise, it may return a ZSTD error if something went wrong. 6534 */ 6535 static size_t 6536 ZSTD_transferSequences_noDelim(ZSTD_CCtx* cctx, 6537 ZSTD_SequencePosition* seqPos, 6538 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, 6539 const void* src, size_t blockSize, 6540 ZSTD_ParamSwitch_e externalRepSearch) 6541 { 6542 U32 idx = seqPos->idx; 6543 U32 startPosInSequence = seqPos->posInSequence; 6544 U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize; 6545 size_t dictSize; 6546 const BYTE* const istart = (const BYTE*)(src); 6547 const BYTE* ip = istart; 6548 const BYTE* iend = istart + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ 6549 Repcodes_t updatedRepcodes; 6550 U32 bytesAdjustment = 0; 6551 U32 finalMatchSplit = 0; 6552 6553 /* TODO(embg) support fast parsing mode in noBlockDelim mode */ 6554 (void)externalRepSearch; 6555 6556 if (cctx->cdict) { 6557 dictSize = cctx->cdict->dictContentSize; 6558 } else if (cctx->prefixDict.dict) { 6559 dictSize = cctx->prefixDict.dictSize; 6560 } else { 6561 dictSize = 0; 6562 } 6563 DEBUGLOG(5, "ZSTD_transferSequences_noDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); 6564 DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); 6565 ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); 6566 while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { 6567 const ZSTD_Sequence currSeq = inSeqs[idx]; 6568 U32 litLength = currSeq.litLength; 6569 U32 matchLength = currSeq.matchLength; 6570 U32 const rawOffset = currSeq.offset; 6571 U32 offBase; 6572 6573 /* Modify the sequence depending on where endPosInSequence lies */ 6574 if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { 6575 if (startPosInSequence >= litLength) { 6576 startPosInSequence -= litLength; 6577 litLength = 0; 6578 matchLength -= startPosInSequence; 6579 } else { 6580 litLength -= startPosInSequence; 6581 } 6582 /* Move to the next sequence */ 6583 endPosInSequence -= currSeq.litLength + currSeq.matchLength; 6584 startPosInSequence = 0; 6585 } else { 6586 /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence 6587 does not reach the end of the match. So, we have to split the sequence */ 6588 DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u", 6589 currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence); 6590 if (endPosInSequence > litLength) { 6591 U32 firstHalfMatchLength; 6592 litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; 6593 firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; 6594 if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) { 6595 /* Only ever split the match if it is larger than the block size */ 6596 U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence; 6597 if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) { 6598 /* Move the endPosInSequence backward so that it creates match of minMatch length */ 6599 endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; 6600 bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; 6601 firstHalfMatchLength -= bytesAdjustment; 6602 } 6603 matchLength = firstHalfMatchLength; 6604 /* Flag that we split the last match - after storing the sequence, exit the loop, 6605 but keep the value of endPosInSequence */ 6606 finalMatchSplit = 1; 6607 } else { 6608 /* Move the position in sequence backwards so that we don't split match, and break to store 6609 * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence 6610 * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so 6611 * would cause the first half of the match to be too small 6612 */ 6613 bytesAdjustment = endPosInSequence - currSeq.litLength; 6614 endPosInSequence = currSeq.litLength; 6615 break; 6616 } 6617 } else { 6618 /* This sequence ends inside the literals, break to store the last literals */ 6619 break; 6620 } 6621 } 6622 /* Check if this offset can be represented with a repcode */ 6623 { U32 const ll0 = (litLength == 0); 6624 offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); 6625 ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); 6626 } 6627 6628 if (cctx->appliedParams.validateSequences) { 6629 seqPos->posInSrc += litLength + matchLength; 6630 FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, 6631 cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)), 6632 "Sequence validation failed"); 6633 } 6634 DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); 6635 RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, 6636 "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); 6637 ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); 6638 ip += matchLength + litLength; 6639 if (!finalMatchSplit) 6640 idx++; /* Next Sequence */ 6641 } 6642 DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); 6643 assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); 6644 seqPos->idx = idx; 6645 seqPos->posInSequence = endPosInSequence; 6646 ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); 6647 6648 iend -= bytesAdjustment; 6649 if (ip != iend) { 6650 /* Store any last literals */ 6651 U32 const lastLLSize = (U32)(iend - ip); 6652 assert(ip <= iend); 6653 DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize); 6654 ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); 6655 seqPos->posInSrc += lastLLSize; 6656 } 6657 6658 return (size_t)(iend-istart); 6659 } 6660 6661 /* @seqPos represents a position within @inSeqs, 6662 * it is read and updated by this function, 6663 * once the goal to produce a block of size @blockSize is reached. 6664 * @return: nb of bytes consumed from @src, necessarily <= @blockSize. 6665 */ 6666 typedef size_t (*ZSTD_SequenceCopier_f)(ZSTD_CCtx* cctx, 6667 ZSTD_SequencePosition* seqPos, 6668 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, 6669 const void* src, size_t blockSize, 6670 ZSTD_ParamSwitch_e externalRepSearch); 6671 6672 static ZSTD_SequenceCopier_f ZSTD_selectSequenceCopier(ZSTD_SequenceFormat_e mode) 6673 { 6674 assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, (int)mode)); 6675 if (mode == ZSTD_sf_explicitBlockDelimiters) { 6676 return ZSTD_transferSequences_wBlockDelim; 6677 } 6678 assert(mode == ZSTD_sf_noBlockDelimiters); 6679 return ZSTD_transferSequences_noDelim; 6680 } 6681 6682 /* Discover the size of next block by searching for the delimiter. 6683 * Note that a block delimiter **must** exist in this mode, 6684 * otherwise it's an input error. 6685 * The block size retrieved will be later compared to ensure it remains within bounds */ 6686 static size_t 6687 blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_SequencePosition seqPos) 6688 { 6689 int end = 0; 6690 size_t blockSize = 0; 6691 size_t spos = seqPos.idx; 6692 DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize); 6693 assert(spos <= inSeqsSize); 6694 while (spos < inSeqsSize) { 6695 end = (inSeqs[spos].offset == 0); 6696 blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; 6697 if (end) { 6698 if (inSeqs[spos].matchLength != 0) 6699 RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0"); 6700 break; 6701 } 6702 spos++; 6703 } 6704 if (!end) 6705 RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter"); 6706 return blockSize; 6707 } 6708 6709 static size_t determine_blockSize(ZSTD_SequenceFormat_e mode, 6710 size_t blockSize, size_t remaining, 6711 const ZSTD_Sequence* inSeqs, size_t inSeqsSize, 6712 ZSTD_SequencePosition seqPos) 6713 { 6714 DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining); 6715 if (mode == ZSTD_sf_noBlockDelimiters) { 6716 /* Note: more a "target" block size */ 6717 return MIN(remaining, blockSize); 6718 } 6719 assert(mode == ZSTD_sf_explicitBlockDelimiters); 6720 { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); 6721 FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters"); 6722 if (explicitBlockSize > blockSize) 6723 RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block"); 6724 if (explicitBlockSize > remaining) 6725 RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source"); 6726 return explicitBlockSize; 6727 } 6728 } 6729 6730 /* Compress all provided sequences, block-by-block. 6731 * 6732 * Returns the cumulative size of all compressed blocks (including their headers), 6733 * otherwise a ZSTD error. 6734 */ 6735 static size_t 6736 ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, 6737 void* dst, size_t dstCapacity, 6738 const ZSTD_Sequence* inSeqs, size_t inSeqsSize, 6739 const void* src, size_t srcSize) 6740 { 6741 size_t cSize = 0; 6742 size_t remaining = srcSize; 6743 ZSTD_SequencePosition seqPos = {0, 0, 0}; 6744 6745 const BYTE* ip = (BYTE const*)src; 6746 BYTE* op = (BYTE*)dst; 6747 ZSTD_SequenceCopier_f const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); 6748 6749 DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize); 6750 /* Special case: empty frame */ 6751 if (remaining == 0) { 6752 U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); 6753 RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header"); 6754 MEM_writeLE32(op, cBlockHeader24); 6755 op += ZSTD_blockHeaderSize; 6756 dstCapacity -= ZSTD_blockHeaderSize; 6757 cSize += ZSTD_blockHeaderSize; 6758 } 6759 6760 while (remaining) { 6761 size_t compressedSeqsSize; 6762 size_t cBlockSize; 6763 size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, 6764 cctx->blockSizeMax, remaining, 6765 inSeqs, inSeqsSize, seqPos); 6766 U32 const lastBlock = (blockSize == remaining); 6767 FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size"); 6768 assert(blockSize <= remaining); 6769 ZSTD_resetSeqStore(&cctx->seqStore); 6770 6771 blockSize = sequenceCopier(cctx, 6772 &seqPos, inSeqs, inSeqsSize, 6773 ip, blockSize, 6774 cctx->appliedParams.searchForExternalRepcodes); 6775 FORWARD_IF_ERROR(blockSize, "Bad sequence copy"); 6776 6777 /* If blocks are too small, emit as a nocompress block */ 6778 /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding 6779 * additional 1. We need to revisit and change this logic to be more consistent */ 6780 if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { 6781 cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); 6782 FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); 6783 DEBUGLOG(5, "Block too small (%zu): data remains uncompressed: cSize=%zu", blockSize, cBlockSize); 6784 cSize += cBlockSize; 6785 ip += blockSize; 6786 op += cBlockSize; 6787 remaining -= blockSize; 6788 dstCapacity -= cBlockSize; 6789 continue; 6790 } 6791 6792 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); 6793 compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, 6794 &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, 6795 &cctx->appliedParams, 6796 op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, 6797 blockSize, 6798 cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, 6799 cctx->bmi2); 6800 FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); 6801 DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); 6802 6803 if (!cctx->isFirstBlock && 6804 ZSTD_maybeRLE(&cctx->seqStore) && 6805 ZSTD_isRLE(ip, blockSize)) { 6806 /* Note: don't emit the first block as RLE even if it qualifies because 6807 * doing so will cause the decoder (cli <= v1.4.3 only) to throw an (invalid) error 6808 * "should consume all input error." 6809 */ 6810 compressedSeqsSize = 1; 6811 } 6812 6813 if (compressedSeqsSize == 0) { 6814 /* ZSTD_noCompressBlock writes the block header as well */ 6815 cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); 6816 FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed"); 6817 DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize); 6818 } else if (compressedSeqsSize == 1) { 6819 cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); 6820 FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed"); 6821 DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize); 6822 } else { 6823 U32 cBlockHeader; 6824 /* Error checking and repcodes update */ 6825 ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); 6826 if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) 6827 cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; 6828 6829 /* Write block header into beginning of block*/ 6830 cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); 6831 MEM_writeLE24(op, cBlockHeader); 6832 cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; 6833 DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); 6834 } 6835 6836 cSize += cBlockSize; 6837 6838 if (lastBlock) { 6839 break; 6840 } else { 6841 ip += blockSize; 6842 op += cBlockSize; 6843 remaining -= blockSize; 6844 dstCapacity -= cBlockSize; 6845 cctx->isFirstBlock = 0; 6846 } 6847 DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); 6848 } 6849 6850 DEBUGLOG(4, "cSize final total: %zu", cSize); 6851 return cSize; 6852 } 6853 6854 size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, 6855 void* dst, size_t dstCapacity, 6856 const ZSTD_Sequence* inSeqs, size_t inSeqsSize, 6857 const void* src, size_t srcSize) 6858 { 6859 BYTE* op = (BYTE*)dst; 6860 size_t cSize = 0; 6861 6862 /* Transparent initialization stage, same as compressStream2() */ 6863 DEBUGLOG(4, "ZSTD_compressSequences (nbSeqs=%zu,dstCapacity=%zu)", inSeqsSize, dstCapacity); 6864 assert(cctx != NULL); 6865 FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); 6866 6867 /* Begin writing output, starting with frame header */ 6868 { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, 6869 &cctx->appliedParams, srcSize, cctx->dictID); 6870 op += frameHeaderSize; 6871 assert(frameHeaderSize <= dstCapacity); 6872 dstCapacity -= frameHeaderSize; 6873 cSize += frameHeaderSize; 6874 } 6875 if (cctx->appliedParams.fParams.checksumFlag && srcSize) { 6876 xxh64_update(&cctx->xxhState, src, srcSize); 6877 } 6878 6879 /* Now generate compressed blocks */ 6880 { size_t const cBlocksSize = ZSTD_compressSequences_internal(cctx, 6881 op, dstCapacity, 6882 inSeqs, inSeqsSize, 6883 src, srcSize); 6884 FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); 6885 cSize += cBlocksSize; 6886 assert(cBlocksSize <= dstCapacity); 6887 dstCapacity -= cBlocksSize; 6888 } 6889 6890 /* Complete with frame checksum, if needed */ 6891 if (cctx->appliedParams.fParams.checksumFlag) { 6892 U32 const checksum = (U32) xxh64_digest(&cctx->xxhState); 6893 RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); 6894 DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum); 6895 MEM_writeLE32((char*)dst + cSize, checksum); 6896 cSize += 4; 6897 } 6898 6899 DEBUGLOG(4, "Final compressed size: %zu", cSize); 6900 return cSize; 6901 } 6902 6903 6904 #if defined(__AVX2__) 6905 6906 #include <immintrin.h> /* AVX2 intrinsics */ 6907 6908 /* 6909 * Convert 2 sequences per iteration, using AVX2 intrinsics: 6910 * - offset -> offBase = offset + 2 6911 * - litLength -> (U16) litLength 6912 * - matchLength -> (U16)(matchLength - 3) 6913 * - rep is ignored 6914 * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]). 6915 * 6916 * At the end, instead of extracting two __m128i, 6917 * we use _mm256_permute4x64_epi64(..., 0xE8) to move lane2 into lane1, 6918 * then store the lower 16 bytes in one go. 6919 * 6920 * @returns 0 on succes, with no long length detected 6921 * @returns > 0 if there is one long length (> 65535), 6922 * indicating the position, and type. 6923 */ 6924 static size_t convertSequences_noRepcodes( 6925 SeqDef* dstSeqs, 6926 const ZSTD_Sequence* inSeqs, 6927 size_t nbSequences) 6928 { 6929 /* 6930 * addition: 6931 * For each 128-bit half: (offset+2, litLength+0, matchLength-3, rep+0) 6932 */ 6933 const __m256i addition = _mm256_setr_epi32( 6934 ZSTD_REP_NUM, 0, -MINMATCH, 0, /* for sequence i */ 6935 ZSTD_REP_NUM, 0, -MINMATCH, 0 /* for sequence i+1 */ 6936 ); 6937 6938 /* limit: check if there is a long length */ 6939 const __m256i limit = _mm256_set1_epi32(65535); 6940 6941 /* 6942 * shuffle mask for byte-level rearrangement in each 128-bit half: 6943 * 6944 * Input layout (after addition) per 128-bit half: 6945 * [ offset+2 (4 bytes) | litLength (4 bytes) | matchLength (4 bytes) | rep (4 bytes) ] 6946 * We only need: 6947 * offBase (4 bytes) = offset+2 6948 * litLength (2 bytes) = low 2 bytes of litLength 6949 * mlBase (2 bytes) = low 2 bytes of (matchLength) 6950 * => Bytes [0..3, 4..5, 8..9], zero the rest. 6951 */ 6952 const __m256i mask = _mm256_setr_epi8( 6953 /* For the lower 128 bits => sequence i */ 6954 0, 1, 2, 3, /* offset+2 */ 6955 4, 5, /* litLength (16 bits) */ 6956 8, 9, /* matchLength (16 bits) */ 6957 (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, 6958 (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, 6959 6960 /* For the upper 128 bits => sequence i+1 */ 6961 16,17,18,19, /* offset+2 */ 6962 20,21, /* litLength */ 6963 24,25, /* matchLength */ 6964 (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, 6965 (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80 6966 ); 6967 6968 /* 6969 * Next, we'll use _mm256_permute4x64_epi64(vshf, 0xE8). 6970 * Explanation of 0xE8 = 11101000b => [lane0, lane2, lane2, lane3]. 6971 * So the lower 128 bits become [lane0, lane2] => combining seq0 and seq1. 6972 */ 6973 #define PERM_LANE_0X_E8 0xE8 /* [0,2,2,3] in lane indices */ 6974 6975 size_t longLen = 0, i = 0; 6976 6977 /* AVX permutation depends on the specific definition of target structures */ 6978 ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); 6979 ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); 6980 ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); 6981 ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); 6982 ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); 6983 ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); 6984 ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); 6985 ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); 6986 6987 /* Process 2 sequences per loop iteration */ 6988 for (; i + 1 < nbSequences; i += 2) { 6989 /* Load 2 ZSTD_Sequence (32 bytes) */ 6990 __m256i vin = _mm256_loadu_si256((const __m256i*)(const void*)&inSeqs[i]); 6991 6992 /* Add {2, 0, -3, 0} in each 128-bit half */ 6993 __m256i vadd = _mm256_add_epi32(vin, addition); 6994 6995 /* Check for long length */ 6996 __m256i ll_cmp = _mm256_cmpgt_epi32(vadd, limit); /* 0xFFFFFFFF for element > 65535 */ 6997 int ll_res = _mm256_movemask_epi8(ll_cmp); 6998 6999 /* Shuffle bytes so each half gives us the 8 bytes we need */ 7000 __m256i vshf = _mm256_shuffle_epi8(vadd, mask); 7001 /* 7002 * Now: 7003 * Lane0 = seq0's 8 bytes 7004 * Lane1 = 0 7005 * Lane2 = seq1's 8 bytes 7006 * Lane3 = 0 7007 */ 7008 7009 /* Permute 64-bit lanes => move Lane2 down into Lane1. */ 7010 __m256i vperm = _mm256_permute4x64_epi64(vshf, PERM_LANE_0X_E8); 7011 /* 7012 * Now the lower 16 bytes (Lane0+Lane1) = [seq0, seq1]. 7013 * The upper 16 bytes are [Lane2, Lane3] = [seq1, 0], but we won't use them. 7014 */ 7015 7016 /* Store only the lower 16 bytes => 2 SeqDef (8 bytes each) */ 7017 _mm_storeu_si128((__m128i *)(void*)&dstSeqs[i], _mm256_castsi256_si128(vperm)); 7018 /* 7019 * This writes out 16 bytes total: 7020 * - offset 0..7 => seq0 (offBase, litLength, mlBase) 7021 * - offset 8..15 => seq1 (offBase, litLength, mlBase) 7022 */ 7023 7024 /* check (unlikely) long lengths > 65535 7025 * indices for lengths correspond to bits [4..7], [8..11], [20..23], [24..27] 7026 * => combined mask = 0x0FF00FF0 7027 */ 7028 if (UNLIKELY((ll_res & 0x0FF00FF0) != 0)) { 7029 /* long length detected: let's figure out which one*/ 7030 if (inSeqs[i].matchLength > 65535+MINMATCH) { 7031 assert(longLen == 0); 7032 longLen = i + 1; 7033 } 7034 if (inSeqs[i].litLength > 65535) { 7035 assert(longLen == 0); 7036 longLen = i + nbSequences + 1; 7037 } 7038 if (inSeqs[i+1].matchLength > 65535+MINMATCH) { 7039 assert(longLen == 0); 7040 longLen = i + 1 + 1; 7041 } 7042 if (inSeqs[i+1].litLength > 65535) { 7043 assert(longLen == 0); 7044 longLen = i + 1 + nbSequences + 1; 7045 } 7046 } 7047 } 7048 7049 /* Handle leftover if @nbSequences is odd */ 7050 if (i < nbSequences) { 7051 /* process last sequence */ 7052 assert(i == nbSequences - 1); 7053 dstSeqs[i].offBase = OFFSET_TO_OFFBASE(inSeqs[i].offset); 7054 dstSeqs[i].litLength = (U16)inSeqs[i].litLength; 7055 dstSeqs[i].mlBase = (U16)(inSeqs[i].matchLength - MINMATCH); 7056 /* check (unlikely) long lengths > 65535 */ 7057 if (UNLIKELY(inSeqs[i].matchLength > 65535+MINMATCH)) { 7058 assert(longLen == 0); 7059 longLen = i + 1; 7060 } 7061 if (UNLIKELY(inSeqs[i].litLength > 65535)) { 7062 assert(longLen == 0); 7063 longLen = i + nbSequences + 1; 7064 } 7065 } 7066 7067 return longLen; 7068 } 7069 7070 /* the vector implementation could also be ported to SSSE3, 7071 * but since this implementation is targeting modern systems (>= Sapphire Rapid), 7072 * it's not useful to develop and maintain code for older pre-AVX2 platforms */ 7073 7074 #else /* no AVX2 */ 7075 7076 static size_t convertSequences_noRepcodes( 7077 SeqDef* dstSeqs, 7078 const ZSTD_Sequence* inSeqs, 7079 size_t nbSequences) 7080 { 7081 size_t longLen = 0; 7082 size_t n; 7083 for (n=0; n<nbSequences; n++) { 7084 dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset); 7085 dstSeqs[n].litLength = (U16)inSeqs[n].litLength; 7086 dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH); 7087 /* check for long length > 65535 */ 7088 if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) { 7089 assert(longLen == 0); 7090 longLen = n + 1; 7091 } 7092 if (UNLIKELY(inSeqs[n].litLength > 65535)) { 7093 assert(longLen == 0); 7094 longLen = n + nbSequences + 1; 7095 } 7096 } 7097 return longLen; 7098 } 7099 7100 #endif 7101 7102 /* 7103 * Precondition: Sequences must end on an explicit Block Delimiter 7104 * @return: 0 on success, or an error code. 7105 * Note: Sequence validation functionality has been disabled (removed). 7106 * This is helpful to generate a lean main pipeline, improving performance. 7107 * It may be re-inserted later. 7108 */ 7109 size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx, 7110 const ZSTD_Sequence* const inSeqs, size_t nbSequences, 7111 int repcodeResolution) 7112 { 7113 Repcodes_t updatedRepcodes; 7114 size_t seqNb = 0; 7115 7116 DEBUGLOG(5, "ZSTD_convertBlockSequences (nbSequences = %zu)", nbSequences); 7117 7118 RETURN_ERROR_IF(nbSequences >= cctx->seqStore.maxNbSeq, externalSequences_invalid, 7119 "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); 7120 7121 ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); 7122 7123 /* check end condition */ 7124 assert(nbSequences >= 1); 7125 assert(inSeqs[nbSequences-1].matchLength == 0); 7126 assert(inSeqs[nbSequences-1].offset == 0); 7127 7128 /* Convert Sequences from public format to internal format */ 7129 if (!repcodeResolution) { 7130 size_t const longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences-1); 7131 cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences-1; 7132 if (longl) { 7133 DEBUGLOG(5, "long length"); 7134 assert(cctx->seqStore.longLengthType == ZSTD_llt_none); 7135 if (longl <= nbSequences-1) { 7136 DEBUGLOG(5, "long match length detected at pos %zu", longl-1); 7137 cctx->seqStore.longLengthType = ZSTD_llt_matchLength; 7138 cctx->seqStore.longLengthPos = (U32)(longl-1); 7139 } else { 7140 DEBUGLOG(5, "long literals length detected at pos %zu", longl-nbSequences); 7141 assert(longl <= 2* (nbSequences-1)); 7142 cctx->seqStore.longLengthType = ZSTD_llt_literalLength; 7143 cctx->seqStore.longLengthPos = (U32)(longl-(nbSequences-1)-1); 7144 } 7145 } 7146 } else { 7147 for (seqNb = 0; seqNb < nbSequences - 1 ; seqNb++) { 7148 U32 const litLength = inSeqs[seqNb].litLength; 7149 U32 const matchLength = inSeqs[seqNb].matchLength; 7150 U32 const ll0 = (litLength == 0); 7151 U32 const offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0); 7152 7153 DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); 7154 ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); 7155 ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); 7156 } 7157 } 7158 7159 /* If we skipped repcode search while parsing, we need to update repcodes now */ 7160 if (!repcodeResolution && nbSequences > 1) { 7161 U32* const rep = updatedRepcodes.rep; 7162 7163 if (nbSequences >= 4) { 7164 U32 lastSeqIdx = (U32)nbSequences - 2; /* index of last full sequence */ 7165 rep[2] = inSeqs[lastSeqIdx - 2].offset; 7166 rep[1] = inSeqs[lastSeqIdx - 1].offset; 7167 rep[0] = inSeqs[lastSeqIdx].offset; 7168 } else if (nbSequences == 3) { 7169 rep[2] = rep[0]; 7170 rep[1] = inSeqs[0].offset; 7171 rep[0] = inSeqs[1].offset; 7172 } else { 7173 assert(nbSequences == 2); 7174 rep[2] = rep[1]; 7175 rep[1] = rep[0]; 7176 rep[0] = inSeqs[0].offset; 7177 } 7178 } 7179 7180 ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); 7181 7182 return 0; 7183 } 7184 7185 #if defined(ZSTD_ARCH_X86_AVX2) 7186 7187 BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) 7188 { 7189 size_t i; 7190 __m256i const zeroVec = _mm256_setzero_si256(); 7191 __m256i sumVec = zeroVec; /* accumulates match+lit in 32-bit lanes */ 7192 ZSTD_ALIGNED(32) U32 tmp[8]; /* temporary buffer for reduction */ 7193 size_t mSum = 0, lSum = 0; 7194 ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); 7195 7196 /* Process 2 structs (32 bytes) at a time */ 7197 for (i = 0; i + 2 <= nbSeqs; i += 2) { 7198 /* Load two consecutive ZSTD_Sequence (8×4 = 32 bytes) */ 7199 __m256i data = _mm256_loadu_si256((const __m256i*)(const void*)&seqs[i]); 7200 /* check end of block signal */ 7201 __m256i cmp = _mm256_cmpeq_epi32(data, zeroVec); 7202 int cmp_res = _mm256_movemask_epi8(cmp); 7203 /* indices for match lengths correspond to bits [8..11], [24..27] 7204 * => combined mask = 0x0F000F00 */ 7205 ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); 7206 if (cmp_res & 0x0F000F00) break; 7207 /* Accumulate in sumVec */ 7208 sumVec = _mm256_add_epi32(sumVec, data); 7209 } 7210 7211 /* Horizontal reduction */ 7212 _mm256_store_si256((__m256i*)tmp, sumVec); 7213 lSum = tmp[1] + tmp[5]; 7214 mSum = tmp[2] + tmp[6]; 7215 7216 /* Handle the leftover */ 7217 for (; i < nbSeqs; i++) { 7218 lSum += seqs[i].litLength; 7219 mSum += seqs[i].matchLength; 7220 if (seqs[i].matchLength == 0) break; /* end of block */ 7221 } 7222 7223 if (i==nbSeqs) { 7224 /* reaching end of sequences: end of block signal was not present */ 7225 BlockSummary bs; 7226 bs.nbSequences = ERROR(externalSequences_invalid); 7227 return bs; 7228 } 7229 { BlockSummary bs; 7230 bs.nbSequences = i+1; 7231 bs.blockSize = lSum + mSum; 7232 bs.litSize = lSum; 7233 return bs; 7234 } 7235 } 7236 7237 #else 7238 7239 BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) 7240 { 7241 size_t totalMatchSize = 0; 7242 size_t litSize = 0; 7243 size_t n; 7244 assert(seqs); 7245 for (n=0; n<nbSeqs; n++) { 7246 totalMatchSize += seqs[n].matchLength; 7247 litSize += seqs[n].litLength; 7248 if (seqs[n].matchLength == 0) { 7249 assert(seqs[n].offset == 0); 7250 break; 7251 } 7252 } 7253 if (n==nbSeqs) { 7254 BlockSummary bs; 7255 bs.nbSequences = ERROR(externalSequences_invalid); 7256 return bs; 7257 } 7258 { BlockSummary bs; 7259 bs.nbSequences = n+1; 7260 bs.blockSize = litSize + totalMatchSize; 7261 bs.litSize = litSize; 7262 return bs; 7263 } 7264 } 7265 #endif 7266 7267 7268 static size_t 7269 ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx* cctx, 7270 void* dst, size_t dstCapacity, 7271 const ZSTD_Sequence* inSeqs, size_t nbSequences, 7272 const void* literals, size_t litSize, size_t srcSize) 7273 { 7274 size_t remaining = srcSize; 7275 size_t cSize = 0; 7276 BYTE* op = (BYTE*)dst; 7277 int const repcodeResolution = (cctx->appliedParams.searchForExternalRepcodes == ZSTD_ps_enable); 7278 assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_ps_auto); 7279 7280 DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals_internal: nbSeqs=%zu, litSize=%zu", nbSequences, litSize); 7281 RETURN_ERROR_IF(nbSequences == 0, externalSequences_invalid, "Requires at least 1 end-of-block"); 7282 7283 /* Special case: empty frame */ 7284 if ((nbSequences == 1) && (inSeqs[0].litLength == 0)) { 7285 U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); 7286 RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "No room for empty frame block header"); 7287 MEM_writeLE24(op, cBlockHeader24); 7288 op += ZSTD_blockHeaderSize; 7289 dstCapacity -= ZSTD_blockHeaderSize; 7290 cSize += ZSTD_blockHeaderSize; 7291 } 7292 7293 while (nbSequences) { 7294 size_t compressedSeqsSize, cBlockSize, conversionStatus; 7295 BlockSummary const block = ZSTD_get1BlockSummary(inSeqs, nbSequences); 7296 U32 const lastBlock = (block.nbSequences == nbSequences); 7297 FORWARD_IF_ERROR(block.nbSequences, "Error while trying to determine nb of sequences for a block"); 7298 assert(block.nbSequences <= nbSequences); 7299 RETURN_ERROR_IF(block.litSize > litSize, externalSequences_invalid, "discrepancy: Sequences require more literals than present in buffer"); 7300 ZSTD_resetSeqStore(&cctx->seqStore); 7301 7302 conversionStatus = ZSTD_convertBlockSequences(cctx, 7303 inSeqs, block.nbSequences, 7304 repcodeResolution); 7305 FORWARD_IF_ERROR(conversionStatus, "Bad sequence conversion"); 7306 inSeqs += block.nbSequences; 7307 nbSequences -= block.nbSequences; 7308 remaining -= block.blockSize; 7309 7310 /* Note: when blockSize is very small, other variant send it uncompressed. 7311 * Here, we still send the sequences, because we don't have the original source to send it uncompressed. 7312 * One could imagine in theory reproducing the source from the sequences, 7313 * but that's complex and costly memory intensive, and goes against the objectives of this variant. */ 7314 7315 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); 7316 7317 compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal( 7318 op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, 7319 literals, block.litSize, 7320 &cctx->seqStore, 7321 &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, 7322 &cctx->appliedParams, 7323 cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, 7324 cctx->bmi2); 7325 FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); 7326 /* note: the spec forbids for any compressed block to be larger than maximum block size */ 7327 if (compressedSeqsSize > cctx->blockSizeMax) compressedSeqsSize = 0; 7328 DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); 7329 litSize -= block.litSize; 7330 literals = (const char*)literals + block.litSize; 7331 7332 /* Note: difficult to check source for RLE block when only Literals are provided, 7333 * but it could be considered from analyzing the sequence directly */ 7334 7335 if (compressedSeqsSize == 0) { 7336 /* Sending uncompressed blocks is out of reach, because the source is not provided. 7337 * In theory, one could use the sequences to regenerate the source, like a decompressor, 7338 * but it's complex, and memory hungry, killing the purpose of this variant. 7339 * Current outcome: generate an error code. 7340 */ 7341 RETURN_ERROR(cannotProduce_uncompressedBlock, "ZSTD_compressSequencesAndLiterals cannot generate an uncompressed block"); 7342 } else { 7343 U32 cBlockHeader; 7344 assert(compressedSeqsSize > 1); /* no RLE */ 7345 /* Error checking and repcodes update */ 7346 ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); 7347 if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) 7348 cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; 7349 7350 /* Write block header into beginning of block*/ 7351 cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); 7352 MEM_writeLE24(op, cBlockHeader); 7353 cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; 7354 DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); 7355 } 7356 7357 cSize += cBlockSize; 7358 op += cBlockSize; 7359 dstCapacity -= cBlockSize; 7360 cctx->isFirstBlock = 0; 7361 DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); 7362 7363 if (lastBlock) { 7364 assert(nbSequences == 0); 7365 break; 7366 } 7367 } 7368 7369 RETURN_ERROR_IF(litSize != 0, externalSequences_invalid, "literals must be entirely and exactly consumed"); 7370 RETURN_ERROR_IF(remaining != 0, externalSequences_invalid, "Sequences must represent a total of exactly srcSize=%zu", srcSize); 7371 DEBUGLOG(4, "cSize final total: %zu", cSize); 7372 return cSize; 7373 } 7374 7375 size_t 7376 ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx, 7377 void* dst, size_t dstCapacity, 7378 const ZSTD_Sequence* inSeqs, size_t inSeqsSize, 7379 const void* literals, size_t litSize, size_t litCapacity, 7380 size_t decompressedSize) 7381 { 7382 BYTE* op = (BYTE*)dst; 7383 size_t cSize = 0; 7384 7385 /* Transparent initialization stage, same as compressStream2() */ 7386 DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals (dstCapacity=%zu)", dstCapacity); 7387 assert(cctx != NULL); 7388 if (litCapacity < litSize) { 7389 RETURN_ERROR(workSpace_tooSmall, "literals buffer is not large enough: must be at least 8 bytes larger than litSize (risk of read out-of-bound)"); 7390 } 7391 FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, decompressedSize), "CCtx initialization failed"); 7392 7393 if (cctx->appliedParams.blockDelimiters == ZSTD_sf_noBlockDelimiters) { 7394 RETURN_ERROR(frameParameter_unsupported, "This mode is only compatible with explicit delimiters"); 7395 } 7396 if (cctx->appliedParams.validateSequences) { 7397 RETURN_ERROR(parameter_unsupported, "This mode is not compatible with Sequence validation"); 7398 } 7399 if (cctx->appliedParams.fParams.checksumFlag) { 7400 RETURN_ERROR(frameParameter_unsupported, "this mode is not compatible with frame checksum"); 7401 } 7402 7403 /* Begin writing output, starting with frame header */ 7404 { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, 7405 &cctx->appliedParams, decompressedSize, cctx->dictID); 7406 op += frameHeaderSize; 7407 assert(frameHeaderSize <= dstCapacity); 7408 dstCapacity -= frameHeaderSize; 7409 cSize += frameHeaderSize; 7410 } 7411 7412 /* Now generate compressed blocks */ 7413 { size_t const cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx, 7414 op, dstCapacity, 7415 inSeqs, inSeqsSize, 7416 literals, litSize, decompressedSize); 7417 FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); 7418 cSize += cBlocksSize; 7419 assert(cBlocksSize <= dstCapacity); 7420 dstCapacity -= cBlocksSize; 7421 } 7422 7423 DEBUGLOG(4, "Final compressed size: %zu", cSize); 7424 return cSize; 7425 } 7426 7427 /*====== Finalize ======*/ 7428 7429 static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs) 7430 { 7431 const ZSTD_inBuffer nullInput = { NULL, 0, 0 }; 7432 const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable); 7433 return stableInput ? zcs->expectedInBuffer : nullInput; 7434 } 7435 7436 /*! ZSTD_flushStream() : 7437 * @return : amount of data remaining to flush */ 7438 size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) 7439 { 7440 ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); 7441 input.size = input.pos; /* do not ingest more input during flush */ 7442 return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); 7443 } 7444 7445 size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) 7446 { 7447 ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); 7448 size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); 7449 FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed"); 7450 if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ 7451 /* single thread mode : attempt to calculate remaining to flush more precisely */ 7452 { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; 7453 size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); 7454 size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize; 7455 DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush); 7456 return toFlush; 7457 } 7458 } 7459 7460 7461 /*-===== Pre-defined compression levels =====-*/ 7462 #include "clevels.h" 7463 7464 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } 7465 int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } 7466 int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; } 7467 7468 static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize) 7469 { 7470 ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict); 7471 switch (cParams.strategy) { 7472 case ZSTD_fast: 7473 case ZSTD_dfast: 7474 break; 7475 case ZSTD_greedy: 7476 case ZSTD_lazy: 7477 case ZSTD_lazy2: 7478 cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG; 7479 break; 7480 case ZSTD_btlazy2: 7481 case ZSTD_btopt: 7482 case ZSTD_btultra: 7483 case ZSTD_btultra2: 7484 break; 7485 } 7486 return cParams; 7487 } 7488 7489 static int ZSTD_dedicatedDictSearch_isSupported( 7490 ZSTD_compressionParameters const* cParams) 7491 { 7492 return (cParams->strategy >= ZSTD_greedy) 7493 && (cParams->strategy <= ZSTD_lazy2) 7494 && (cParams->hashLog > cParams->chainLog) 7495 && (cParams->chainLog <= 24); 7496 } 7497 7498 /* 7499 * Reverses the adjustment applied to cparams when enabling dedicated dict 7500 * search. This is used to recover the params set to be used in the working 7501 * context. (Otherwise, those tables would also grow.) 7502 */ 7503 static void ZSTD_dedicatedDictSearch_revertCParams( 7504 ZSTD_compressionParameters* cParams) { 7505 switch (cParams->strategy) { 7506 case ZSTD_fast: 7507 case ZSTD_dfast: 7508 break; 7509 case ZSTD_greedy: 7510 case ZSTD_lazy: 7511 case ZSTD_lazy2: 7512 cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG; 7513 if (cParams->hashLog < ZSTD_HASHLOG_MIN) { 7514 cParams->hashLog = ZSTD_HASHLOG_MIN; 7515 } 7516 break; 7517 case ZSTD_btlazy2: 7518 case ZSTD_btopt: 7519 case ZSTD_btultra: 7520 case ZSTD_btultra2: 7521 break; 7522 } 7523 } 7524 7525 static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) 7526 { 7527 switch (mode) { 7528 case ZSTD_cpm_unknown: 7529 case ZSTD_cpm_noAttachDict: 7530 case ZSTD_cpm_createCDict: 7531 break; 7532 case ZSTD_cpm_attachDict: 7533 dictSize = 0; 7534 break; 7535 default: 7536 assert(0); 7537 break; 7538 } 7539 { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN; 7540 size_t const addedSize = unknown && dictSize > 0 ? 500 : 0; 7541 return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize; 7542 } 7543 } 7544 7545 /*! ZSTD_getCParams_internal() : 7546 * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. 7547 * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. 7548 * Use dictSize == 0 for unknown or unused. 7549 * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ 7550 static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) 7551 { 7552 U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); 7553 U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); 7554 int row; 7555 DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel); 7556 7557 /* row */ 7558 if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ 7559 else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ 7560 else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; 7561 else row = compressionLevel; 7562 7563 { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; 7564 DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy); 7565 /* acceleration factor */ 7566 if (compressionLevel < 0) { 7567 int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel); 7568 cp.targetLength = (unsigned)(-clampedCompressionLevel); 7569 } 7570 /* refine parameters based on srcSize & dictSize */ 7571 return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto); 7572 } 7573 } 7574 7575 /*! ZSTD_getCParams() : 7576 * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. 7577 * Size values are optional, provide 0 if not known or unused */ 7578 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) 7579 { 7580 if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; 7581 return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); 7582 } 7583 7584 /*! ZSTD_getParams() : 7585 * same idea as ZSTD_getCParams() 7586 * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). 7587 * Fields of `ZSTD_frameParameters` are set to default values */ 7588 static ZSTD_parameters 7589 ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) 7590 { 7591 ZSTD_parameters params; 7592 ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); 7593 DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); 7594 ZSTD_memset(¶ms, 0, sizeof(params)); 7595 params.cParams = cParams; 7596 params.fParams.contentSizeFlag = 1; 7597 return params; 7598 } 7599 7600 /*! ZSTD_getParams() : 7601 * same idea as ZSTD_getCParams() 7602 * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). 7603 * Fields of `ZSTD_frameParameters` are set to default values */ 7604 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) 7605 { 7606 if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; 7607 return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); 7608 } 7609 7610 void ZSTD_registerSequenceProducer( 7611 ZSTD_CCtx* zc, 7612 void* extSeqProdState, 7613 ZSTD_sequenceProducer_F extSeqProdFunc) 7614 { 7615 assert(zc != NULL); 7616 ZSTD_CCtxParams_registerSequenceProducer( 7617 &zc->requestedParams, extSeqProdState, extSeqProdFunc 7618 ); 7619 } 7620 7621 void ZSTD_CCtxParams_registerSequenceProducer( 7622 ZSTD_CCtx_params* params, 7623 void* extSeqProdState, 7624 ZSTD_sequenceProducer_F extSeqProdFunc) 7625 { 7626 assert(params != NULL); 7627 if (extSeqProdFunc != NULL) { 7628 params->extSeqProdFunc = extSeqProdFunc; 7629 params->extSeqProdState = extSeqProdState; 7630 } else { 7631 params->extSeqProdFunc = NULL; 7632 params->extSeqProdState = NULL; 7633 } 7634 } 7635