1 // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause 2 /* 3 * Copyright (c) Meta Platforms, Inc. and affiliates. 4 * All rights reserved. 5 * 6 * This source code is licensed under both the BSD-style license (found in the 7 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 8 * in the COPYING file in the root directory of this source tree). 9 * You may select, at your option, one of the above-listed licenses. 10 */ 11 12 /*-************************************* 13 * Dependencies 14 ***************************************/ 15 #include "zstd_compress_superblock.h" 16 17 #include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */ 18 #include "hist.h" /* HIST_countFast_wksp */ 19 #include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */ 20 #include "zstd_compress_sequences.h" 21 #include "zstd_compress_literals.h" 22 23 /* ZSTD_compressSubBlock_literal() : 24 * Compresses literals section for a sub-block. 25 * When we have to write the Huffman table we will sometimes choose a header 26 * size larger than necessary. This is because we have to pick the header size 27 * before we know the table size + compressed size, so we have a bound on the 28 * table size. If we guessed incorrectly, we fall back to uncompressed literals. 29 * 30 * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded 31 * in writing the header, otherwise it is set to 0. 32 * 33 * hufMetadata->hType has literals block type info. 34 * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. 35 * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. 36 * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block 37 * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block 38 * and the following sub-blocks' literals sections will be Treeless_Literals_Block. 39 * @return : compressed size of literals section of a sub-block 40 * Or 0 if unable to compress. 41 * Or error code */ 42 static size_t 43 ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, 44 const ZSTD_hufCTablesMetadata_t* hufMetadata, 45 const BYTE* literals, size_t litSize, 46 void* dst, size_t dstSize, 47 const int bmi2, int writeEntropy, int* entropyWritten) 48 { 49 size_t const header = writeEntropy ? 200 : 0; 50 size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); 51 BYTE* const ostart = (BYTE*)dst; 52 BYTE* const oend = ostart + dstSize; 53 BYTE* op = ostart + lhSize; 54 U32 const singleStream = lhSize == 3; 55 SymbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; 56 size_t cLitSize = 0; 57 58 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); 59 60 *entropyWritten = 0; 61 if (litSize == 0 || hufMetadata->hType == set_basic) { 62 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); 63 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 64 } else if (hufMetadata->hType == set_rle) { 65 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); 66 return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); 67 } 68 69 assert(litSize > 0); 70 assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); 71 72 if (writeEntropy && hufMetadata->hType == set_compressed) { 73 ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); 74 op += hufMetadata->hufDesSize; 75 cLitSize += hufMetadata->hufDesSize; 76 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); 77 } 78 79 { int const flags = bmi2 ? HUF_flags_bmi2 : 0; 80 const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags) 81 : HUF_compress4X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags); 82 op += cSize; 83 cLitSize += cSize; 84 if (cSize == 0 || ERR_isError(cSize)) { 85 DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); 86 return 0; 87 } 88 /* If we expand and we aren't writing a header then emit uncompressed */ 89 if (!writeEntropy && cLitSize >= litSize) { 90 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); 91 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 92 } 93 /* If we are writing headers then allow expansion that doesn't change our header size. */ 94 if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { 95 assert(cLitSize > litSize); 96 DEBUGLOG(5, "Literals expanded beyond allowed header size"); 97 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 98 } 99 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); 100 } 101 102 /* Build header */ 103 switch(lhSize) 104 { 105 case 3: /* 2 - 2 - 10 - 10 */ 106 { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); 107 MEM_writeLE24(ostart, lhc); 108 break; 109 } 110 case 4: /* 2 - 2 - 14 - 14 */ 111 { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); 112 MEM_writeLE32(ostart, lhc); 113 break; 114 } 115 case 5: /* 2 - 2 - 18 - 18 */ 116 { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); 117 MEM_writeLE32(ostart, lhc); 118 ostart[4] = (BYTE)(cLitSize >> 10); 119 break; 120 } 121 default: /* not possible : lhSize is {3,4,5} */ 122 assert(0); 123 } 124 *entropyWritten = 1; 125 DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); 126 return (size_t)(op-ostart); 127 } 128 129 static size_t 130 ZSTD_seqDecompressedSize(SeqStore_t const* seqStore, 131 const SeqDef* sequences, size_t nbSeqs, 132 size_t litSize, int lastSubBlock) 133 { 134 size_t matchLengthSum = 0; 135 size_t litLengthSum = 0; 136 size_t n; 137 for (n=0; n<nbSeqs; n++) { 138 const ZSTD_SequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences+n); 139 litLengthSum += seqLen.litLength; 140 matchLengthSum += seqLen.matchLength; 141 } 142 DEBUGLOG(5, "ZSTD_seqDecompressedSize: %u sequences from %p: %u literals + %u matchlength", 143 (unsigned)nbSeqs, (const void*)sequences, 144 (unsigned)litLengthSum, (unsigned)matchLengthSum); 145 if (!lastSubBlock) 146 assert(litLengthSum == litSize); 147 else 148 assert(litLengthSum <= litSize); 149 (void)litLengthSum; 150 return matchLengthSum + litSize; 151 } 152 153 /* ZSTD_compressSubBlock_sequences() : 154 * Compresses sequences section for a sub-block. 155 * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have 156 * symbol compression modes for the super-block. 157 * The first successfully compressed block will have these in its header. 158 * We set entropyWritten=1 when we succeed in compressing the sequences. 159 * The following sub-blocks will always have repeat mode. 160 * @return : compressed size of sequences section of a sub-block 161 * Or 0 if it is unable to compress 162 * Or error code. */ 163 static size_t 164 ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, 165 const ZSTD_fseCTablesMetadata_t* fseMetadata, 166 const SeqDef* sequences, size_t nbSeq, 167 const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, 168 const ZSTD_CCtx_params* cctxParams, 169 void* dst, size_t dstCapacity, 170 const int bmi2, int writeEntropy, int* entropyWritten) 171 { 172 const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; 173 BYTE* const ostart = (BYTE*)dst; 174 BYTE* const oend = ostart + dstCapacity; 175 BYTE* op = ostart; 176 BYTE* seqHead; 177 178 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); 179 180 *entropyWritten = 0; 181 /* Sequences Header */ 182 RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, 183 dstSize_tooSmall, ""); 184 if (nbSeq < 128) 185 *op++ = (BYTE)nbSeq; 186 else if (nbSeq < LONGNBSEQ) 187 op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; 188 else 189 op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; 190 if (nbSeq==0) { 191 return (size_t)(op - ostart); 192 } 193 194 /* seqHead : flags for FSE encoding type */ 195 seqHead = op++; 196 197 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); 198 199 if (writeEntropy) { 200 const U32 LLtype = fseMetadata->llType; 201 const U32 Offtype = fseMetadata->ofType; 202 const U32 MLtype = fseMetadata->mlType; 203 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); 204 *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); 205 ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); 206 op += fseMetadata->fseTablesSize; 207 } else { 208 const U32 repeat = set_repeat; 209 *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); 210 } 211 212 { size_t const bitstreamSize = ZSTD_encodeSequences( 213 op, (size_t)(oend - op), 214 fseTables->matchlengthCTable, mlCode, 215 fseTables->offcodeCTable, ofCode, 216 fseTables->litlengthCTable, llCode, 217 sequences, nbSeq, 218 longOffsets, bmi2); 219 FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); 220 op += bitstreamSize; 221 /* zstd versions <= 1.3.4 mistakenly report corruption when 222 * FSE_readNCount() receives a buffer < 4 bytes. 223 * Fixed by https://github.com/facebook/zstd/pull/1146. 224 * This can happen when the last set_compressed table present is 2 225 * bytes and the bitstream is only one byte. 226 * In this exceedingly rare case, we will simply emit an uncompressed 227 * block, since it isn't worth optimizing. 228 */ 229 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION 230 if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { 231 /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ 232 assert(fseMetadata->lastCountSize + bitstreamSize == 3); 233 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " 234 "emitting an uncompressed block."); 235 return 0; 236 } 237 #endif 238 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); 239 } 240 241 /* zstd versions <= 1.4.0 mistakenly report error when 242 * sequences section body size is less than 3 bytes. 243 * Fixed by https://github.com/facebook/zstd/pull/1664. 244 * This can happen when the previous sequences section block is compressed 245 * with rle mode and the current block's sequences section is compressed 246 * with repeat mode where sequences section body size can be 1 byte. 247 */ 248 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION 249 if (op-seqHead < 4) { 250 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " 251 "an uncompressed block when sequences are < 4 bytes"); 252 return 0; 253 } 254 #endif 255 256 *entropyWritten = 1; 257 return (size_t)(op - ostart); 258 } 259 260 /* ZSTD_compressSubBlock() : 261 * Compresses a single sub-block. 262 * @return : compressed size of the sub-block 263 * Or 0 if it failed to compress. */ 264 static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, 265 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 266 const SeqDef* sequences, size_t nbSeq, 267 const BYTE* literals, size_t litSize, 268 const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, 269 const ZSTD_CCtx_params* cctxParams, 270 void* dst, size_t dstCapacity, 271 const int bmi2, 272 int writeLitEntropy, int writeSeqEntropy, 273 int* litEntropyWritten, int* seqEntropyWritten, 274 U32 lastBlock) 275 { 276 BYTE* const ostart = (BYTE*)dst; 277 BYTE* const oend = ostart + dstCapacity; 278 BYTE* op = ostart + ZSTD_blockHeaderSize; 279 DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", 280 litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); 281 { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, 282 &entropyMetadata->hufMetadata, literals, litSize, 283 op, (size_t)(oend-op), 284 bmi2, writeLitEntropy, litEntropyWritten); 285 FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); 286 if (cLitSize == 0) return 0; 287 op += cLitSize; 288 } 289 { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, 290 &entropyMetadata->fseMetadata, 291 sequences, nbSeq, 292 llCode, mlCode, ofCode, 293 cctxParams, 294 op, (size_t)(oend-op), 295 bmi2, writeSeqEntropy, seqEntropyWritten); 296 FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); 297 if (cSeqSize == 0) return 0; 298 op += cSeqSize; 299 } 300 /* Write block header */ 301 { size_t cSize = (size_t)(op-ostart) - ZSTD_blockHeaderSize; 302 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); 303 MEM_writeLE24(ostart, cBlockHeader24); 304 } 305 return (size_t)(op-ostart); 306 } 307 308 static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, 309 const ZSTD_hufCTables_t* huf, 310 const ZSTD_hufCTablesMetadata_t* hufMetadata, 311 void* workspace, size_t wkspSize, 312 int writeEntropy) 313 { 314 unsigned* const countWksp = (unsigned*)workspace; 315 unsigned maxSymbolValue = 255; 316 size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ 317 318 if (hufMetadata->hType == set_basic) return litSize; 319 else if (hufMetadata->hType == set_rle) return 1; 320 else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { 321 size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); 322 if (ZSTD_isError(largest)) return litSize; 323 { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); 324 if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; 325 return cLitSizeEstimate + literalSectionHeaderSize; 326 } } 327 assert(0); /* impossible */ 328 return 0; 329 } 330 331 static size_t ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type, 332 const BYTE* codeTable, unsigned maxCode, 333 size_t nbSeq, const FSE_CTable* fseCTable, 334 const U8* additionalBits, 335 short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, 336 void* workspace, size_t wkspSize) 337 { 338 unsigned* const countWksp = (unsigned*)workspace; 339 const BYTE* ctp = codeTable; 340 const BYTE* const ctStart = ctp; 341 const BYTE* const ctEnd = ctStart + nbSeq; 342 size_t cSymbolTypeSizeEstimateInBits = 0; 343 unsigned max = maxCode; 344 345 HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ 346 if (type == set_basic) { 347 /* We selected this encoding type, so it must be valid. */ 348 assert(max <= defaultMax); 349 cSymbolTypeSizeEstimateInBits = max <= defaultMax 350 ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) 351 : ERROR(GENERIC); 352 } else if (type == set_rle) { 353 cSymbolTypeSizeEstimateInBits = 0; 354 } else if (type == set_compressed || type == set_repeat) { 355 cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); 356 } 357 if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; 358 while (ctp < ctEnd) { 359 if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; 360 else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ 361 ctp++; 362 } 363 return cSymbolTypeSizeEstimateInBits / 8; 364 } 365 366 static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, 367 const BYTE* llCodeTable, 368 const BYTE* mlCodeTable, 369 size_t nbSeq, 370 const ZSTD_fseCTables_t* fseTables, 371 const ZSTD_fseCTablesMetadata_t* fseMetadata, 372 void* workspace, size_t wkspSize, 373 int writeEntropy) 374 { 375 size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ 376 size_t cSeqSizeEstimate = 0; 377 if (nbSeq == 0) return sequencesSectionHeaderSize; 378 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, 379 nbSeq, fseTables->offcodeCTable, NULL, 380 OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, 381 workspace, wkspSize); 382 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, 383 nbSeq, fseTables->litlengthCTable, LL_bits, 384 LL_defaultNorm, LL_defaultNormLog, MaxLL, 385 workspace, wkspSize); 386 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, 387 nbSeq, fseTables->matchlengthCTable, ML_bits, 388 ML_defaultNorm, ML_defaultNormLog, MaxML, 389 workspace, wkspSize); 390 if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; 391 return cSeqSizeEstimate + sequencesSectionHeaderSize; 392 } 393 394 typedef struct { 395 size_t estLitSize; 396 size_t estBlockSize; 397 } EstimatedBlockSize; 398 static EstimatedBlockSize ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, 399 const BYTE* ofCodeTable, 400 const BYTE* llCodeTable, 401 const BYTE* mlCodeTable, 402 size_t nbSeq, 403 const ZSTD_entropyCTables_t* entropy, 404 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 405 void* workspace, size_t wkspSize, 406 int writeLitEntropy, int writeSeqEntropy) 407 { 408 EstimatedBlockSize ebs; 409 ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize, 410 &entropy->huf, &entropyMetadata->hufMetadata, 411 workspace, wkspSize, writeLitEntropy); 412 ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, 413 nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, 414 workspace, wkspSize, writeSeqEntropy); 415 ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize; 416 return ebs; 417 } 418 419 static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) 420 { 421 if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) 422 return 1; 423 if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) 424 return 1; 425 if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) 426 return 1; 427 return 0; 428 } 429 430 static size_t countLiterals(SeqStore_t const* seqStore, const SeqDef* sp, size_t seqCount) 431 { 432 size_t n, total = 0; 433 assert(sp != NULL); 434 for (n=0; n<seqCount; n++) { 435 total += ZSTD_getSequenceLength(seqStore, sp+n).litLength; 436 } 437 DEBUGLOG(6, "countLiterals for %zu sequences from %p => %zu bytes", seqCount, (const void*)sp, total); 438 return total; 439 } 440 441 #define BYTESCALE 256 442 443 static size_t sizeBlockSequences(const SeqDef* sp, size_t nbSeqs, 444 size_t targetBudget, size_t avgLitCost, size_t avgSeqCost, 445 int firstSubBlock) 446 { 447 size_t n, budget = 0, inSize=0; 448 /* entropy headers */ 449 size_t const headerSize = (size_t)firstSubBlock * 120 * BYTESCALE; /* generous estimate */ 450 assert(firstSubBlock==0 || firstSubBlock==1); 451 budget += headerSize; 452 453 /* first sequence => at least one sequence*/ 454 budget += sp[0].litLength * avgLitCost + avgSeqCost; 455 if (budget > targetBudget) return 1; 456 inSize = sp[0].litLength + (sp[0].mlBase+MINMATCH); 457 458 /* loop over sequences */ 459 for (n=1; n<nbSeqs; n++) { 460 size_t currentCost = sp[n].litLength * avgLitCost + avgSeqCost; 461 budget += currentCost; 462 inSize += sp[n].litLength + (sp[n].mlBase+MINMATCH); 463 /* stop when sub-block budget is reached */ 464 if ( (budget > targetBudget) 465 /* though continue to expand until the sub-block is deemed compressible */ 466 && (budget < inSize * BYTESCALE) ) 467 break; 468 } 469 470 return n; 471 } 472 473 /* ZSTD_compressSubBlock_multi() : 474 * Breaks super-block into multiple sub-blocks and compresses them. 475 * Entropy will be written into the first block. 476 * The following blocks use repeat_mode to compress. 477 * Sub-blocks are all compressed, except the last one when beneficial. 478 * @return : compressed size of the super block (which features multiple ZSTD blocks) 479 * or 0 if it failed to compress. */ 480 static size_t ZSTD_compressSubBlock_multi(const SeqStore_t* seqStorePtr, 481 const ZSTD_compressedBlockState_t* prevCBlock, 482 ZSTD_compressedBlockState_t* nextCBlock, 483 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 484 const ZSTD_CCtx_params* cctxParams, 485 void* dst, size_t dstCapacity, 486 const void* src, size_t srcSize, 487 const int bmi2, U32 lastBlock, 488 void* workspace, size_t wkspSize) 489 { 490 const SeqDef* const sstart = seqStorePtr->sequencesStart; 491 const SeqDef* const send = seqStorePtr->sequences; 492 const SeqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */ 493 size_t const nbSeqs = (size_t)(send - sstart); 494 const BYTE* const lstart = seqStorePtr->litStart; 495 const BYTE* const lend = seqStorePtr->lit; 496 const BYTE* lp = lstart; 497 size_t const nbLiterals = (size_t)(lend - lstart); 498 BYTE const* ip = (BYTE const*)src; 499 BYTE const* const iend = ip + srcSize; 500 BYTE* const ostart = (BYTE*)dst; 501 BYTE* const oend = ostart + dstCapacity; 502 BYTE* op = ostart; 503 const BYTE* llCodePtr = seqStorePtr->llCode; 504 const BYTE* mlCodePtr = seqStorePtr->mlCode; 505 const BYTE* ofCodePtr = seqStorePtr->ofCode; 506 size_t const minTarget = ZSTD_TARGETCBLOCKSIZE_MIN; /* enforce minimum size, to reduce undesirable side effects */ 507 size_t const targetCBlockSize = MAX(minTarget, cctxParams->targetCBlockSize); 508 int writeLitEntropy = (entropyMetadata->hufMetadata.hType == set_compressed); 509 int writeSeqEntropy = 1; 510 511 DEBUGLOG(5, "ZSTD_compressSubBlock_multi (srcSize=%u, litSize=%u, nbSeq=%u)", 512 (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart)); 513 514 /* let's start by a general estimation for the full block */ 515 if (nbSeqs > 0) { 516 EstimatedBlockSize const ebs = 517 ZSTD_estimateSubBlockSize(lp, nbLiterals, 518 ofCodePtr, llCodePtr, mlCodePtr, nbSeqs, 519 &nextCBlock->entropy, entropyMetadata, 520 workspace, wkspSize, 521 writeLitEntropy, writeSeqEntropy); 522 /* quick estimation */ 523 size_t const avgLitCost = nbLiterals ? (ebs.estLitSize * BYTESCALE) / nbLiterals : BYTESCALE; 524 size_t const avgSeqCost = ((ebs.estBlockSize - ebs.estLitSize) * BYTESCALE) / nbSeqs; 525 const size_t nbSubBlocks = MAX((ebs.estBlockSize + (targetCBlockSize/2)) / targetCBlockSize, 1); 526 size_t n, avgBlockBudget, blockBudgetSupp=0; 527 avgBlockBudget = (ebs.estBlockSize * BYTESCALE) / nbSubBlocks; 528 DEBUGLOG(5, "estimated fullblock size=%u bytes ; avgLitCost=%.2f ; avgSeqCost=%.2f ; targetCBlockSize=%u, nbSubBlocks=%u ; avgBlockBudget=%.0f bytes", 529 (unsigned)ebs.estBlockSize, (double)avgLitCost/BYTESCALE, (double)avgSeqCost/BYTESCALE, 530 (unsigned)targetCBlockSize, (unsigned)nbSubBlocks, (double)avgBlockBudget/BYTESCALE); 531 /* simplification: if estimates states that the full superblock doesn't compress, just bail out immediately 532 * this will result in the production of a single uncompressed block covering @srcSize.*/ 533 if (ebs.estBlockSize > srcSize) return 0; 534 535 /* compress and write sub-blocks */ 536 assert(nbSubBlocks>0); 537 for (n=0; n < nbSubBlocks-1; n++) { 538 /* determine nb of sequences for current sub-block + nbLiterals from next sequence */ 539 size_t const seqCount = sizeBlockSequences(sp, (size_t)(send-sp), 540 avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n==0); 541 /* if reached last sequence : break to last sub-block (simplification) */ 542 assert(seqCount <= (size_t)(send-sp)); 543 if (sp + seqCount == send) break; 544 assert(seqCount > 0); 545 /* compress sub-block */ 546 { int litEntropyWritten = 0; 547 int seqEntropyWritten = 0; 548 size_t litSize = countLiterals(seqStorePtr, sp, seqCount); 549 const size_t decompressedSize = 550 ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0); 551 size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, 552 sp, seqCount, 553 lp, litSize, 554 llCodePtr, mlCodePtr, ofCodePtr, 555 cctxParams, 556 op, (size_t)(oend-op), 557 bmi2, writeLitEntropy, writeSeqEntropy, 558 &litEntropyWritten, &seqEntropyWritten, 559 0); 560 FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); 561 562 /* check compressibility, update state components */ 563 if (cSize > 0 && cSize < decompressedSize) { 564 DEBUGLOG(5, "Committed sub-block compressing %u bytes => %u bytes", 565 (unsigned)decompressedSize, (unsigned)cSize); 566 assert(ip + decompressedSize <= iend); 567 ip += decompressedSize; 568 lp += litSize; 569 op += cSize; 570 llCodePtr += seqCount; 571 mlCodePtr += seqCount; 572 ofCodePtr += seqCount; 573 /* Entropy only needs to be written once */ 574 if (litEntropyWritten) { 575 writeLitEntropy = 0; 576 } 577 if (seqEntropyWritten) { 578 writeSeqEntropy = 0; 579 } 580 sp += seqCount; 581 blockBudgetSupp = 0; 582 } } 583 /* otherwise : do not compress yet, coalesce current sub-block with following one */ 584 } 585 } /* if (nbSeqs > 0) */ 586 587 /* write last block */ 588 DEBUGLOG(5, "Generate last sub-block: %u sequences remaining", (unsigned)(send - sp)); 589 { int litEntropyWritten = 0; 590 int seqEntropyWritten = 0; 591 size_t litSize = (size_t)(lend - lp); 592 size_t seqCount = (size_t)(send - sp); 593 const size_t decompressedSize = 594 ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1); 595 size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, 596 sp, seqCount, 597 lp, litSize, 598 llCodePtr, mlCodePtr, ofCodePtr, 599 cctxParams, 600 op, (size_t)(oend-op), 601 bmi2, writeLitEntropy, writeSeqEntropy, 602 &litEntropyWritten, &seqEntropyWritten, 603 lastBlock); 604 FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); 605 606 /* update pointers, the nb of literals borrowed from next sequence must be preserved */ 607 if (cSize > 0 && cSize < decompressedSize) { 608 DEBUGLOG(5, "Last sub-block compressed %u bytes => %u bytes", 609 (unsigned)decompressedSize, (unsigned)cSize); 610 assert(ip + decompressedSize <= iend); 611 ip += decompressedSize; 612 lp += litSize; 613 op += cSize; 614 llCodePtr += seqCount; 615 mlCodePtr += seqCount; 616 ofCodePtr += seqCount; 617 /* Entropy only needs to be written once */ 618 if (litEntropyWritten) { 619 writeLitEntropy = 0; 620 } 621 if (seqEntropyWritten) { 622 writeSeqEntropy = 0; 623 } 624 sp += seqCount; 625 } 626 } 627 628 629 if (writeLitEntropy) { 630 DEBUGLOG(5, "Literal entropy tables were never written"); 631 ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); 632 } 633 if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { 634 /* If we haven't written our entropy tables, then we've violated our contract and 635 * must emit an uncompressed block. 636 */ 637 DEBUGLOG(5, "Sequence entropy tables were never written => cancel, emit an uncompressed block"); 638 return 0; 639 } 640 641 if (ip < iend) { 642 /* some data left : last part of the block sent uncompressed */ 643 size_t const rSize = (size_t)((iend - ip)); 644 size_t const cSize = ZSTD_noCompressBlock(op, (size_t)(oend - op), ip, rSize, lastBlock); 645 DEBUGLOG(5, "Generate last uncompressed sub-block of %u bytes", (unsigned)(rSize)); 646 FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); 647 assert(cSize != 0); 648 op += cSize; 649 /* We have to regenerate the repcodes because we've skipped some sequences */ 650 if (sp < send) { 651 const SeqDef* seq; 652 Repcodes_t rep; 653 ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); 654 for (seq = sstart; seq < sp; ++seq) { 655 ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); 656 } 657 ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); 658 } 659 } 660 661 DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed all subBlocks: total compressed size = %u", 662 (unsigned)(op-ostart)); 663 return (size_t)(op-ostart); 664 } 665 666 size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, 667 void* dst, size_t dstCapacity, 668 const void* src, size_t srcSize, 669 unsigned lastBlock) 670 { 671 ZSTD_entropyCTablesMetadata_t entropyMetadata; 672 673 FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore, 674 &zc->blockState.prevCBlock->entropy, 675 &zc->blockState.nextCBlock->entropy, 676 &zc->appliedParams, 677 &entropyMetadata, 678 zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */), ""); 679 680 return ZSTD_compressSubBlock_multi(&zc->seqStore, 681 zc->blockState.prevCBlock, 682 zc->blockState.nextCBlock, 683 &entropyMetadata, 684 &zc->appliedParams, 685 dst, dstCapacity, 686 src, srcSize, 687 zc->bmi2, lastBlock, 688 zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */); 689 } 690