zstd-ruby 1.3.7.0 → 1.3.8.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +1 -1
  3. data/ext/zstdruby/libzstd/BUCK +15 -2
  4. data/ext/zstdruby/libzstd/Makefile +37 -2
  5. data/ext/zstdruby/libzstd/README.md +67 -41
  6. data/ext/zstdruby/libzstd/common/bitstream.h +2 -2
  7. data/ext/zstdruby/libzstd/common/compiler.h +19 -12
  8. data/ext/zstdruby/libzstd/common/cpu.h +1 -1
  9. data/ext/zstdruby/libzstd/common/debug.h +22 -11
  10. data/ext/zstdruby/libzstd/common/error_private.c +6 -0
  11. data/ext/zstdruby/libzstd/common/fse.h +2 -2
  12. data/ext/zstdruby/libzstd/common/huf.h +25 -1
  13. data/ext/zstdruby/libzstd/common/pool.c +1 -1
  14. data/ext/zstdruby/libzstd/common/zstd_common.c +3 -1
  15. data/ext/zstdruby/libzstd/common/zstd_errors.h +1 -0
  16. data/ext/zstdruby/libzstd/common/zstd_internal.h +11 -2
  17. data/ext/zstdruby/libzstd/compress/fse_compress.c +3 -3
  18. data/ext/zstdruby/libzstd/compress/hist.c +19 -11
  19. data/ext/zstdruby/libzstd/compress/hist.h +11 -8
  20. data/ext/zstdruby/libzstd/compress/huf_compress.c +33 -31
  21. data/ext/zstdruby/libzstd/compress/zstd_compress.c +621 -371
  22. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +90 -28
  23. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +4 -4
  24. data/ext/zstdruby/libzstd/compress/zstd_fast.c +15 -15
  25. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +25 -18
  26. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +18 -67
  27. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +2 -6
  28. data/ext/zstdruby/libzstd/compress/zstd_opt.c +133 -48
  29. data/ext/zstdruby/libzstd/compress/zstd_opt.h +8 -0
  30. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +229 -73
  31. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +18 -10
  32. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +178 -42
  33. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +240 -0
  34. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +44 -0
  35. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +244 -1680
  36. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1307 -0
  37. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +59 -0
  38. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +168 -0
  39. data/ext/zstdruby/libzstd/dictBuilder/cover.c +13 -11
  40. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +15 -15
  41. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +28 -28
  42. data/ext/zstdruby/libzstd/dll/libzstd.def +0 -1
  43. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -10
  44. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +15 -15
  45. data/ext/zstdruby/libzstd/zstd.h +1208 -968
  46. data/lib/zstd-ruby/version.rb +1 -1
  47. metadata +7 -2
@@ -0,0 +1,44 @@
1
+ /*
2
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
+ * You may select, at your option, one of the above-listed licenses.
9
+ */
10
+
11
+
12
+ #ifndef ZSTD_DDICT_H
13
+ #define ZSTD_DDICT_H
14
+
15
+ /*-*******************************************************
16
+ * Dependencies
17
+ *********************************************************/
18
+ #include <stddef.h> /* size_t */
19
+ #include "zstd.h" /* ZSTD_DDict, and several public functions */
20
+
21
+
22
+ /*-*******************************************************
23
+ * Interface
24
+ *********************************************************/
25
+
26
+ /* note: several prototypes are already published in `zstd.h` :
27
+ * ZSTD_createDDict()
28
+ * ZSTD_createDDict_byReference()
29
+ * ZSTD_createDDict_advanced()
30
+ * ZSTD_freeDDict()
31
+ * ZSTD_initStaticDDict()
32
+ * ZSTD_sizeof_DDict()
33
+ * ZSTD_estimateDDictSize()
34
+ * ZSTD_getDictID_fromDict()
35
+ */
36
+
37
+ const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
38
+ size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
39
+
40
+ void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
41
+
42
+
43
+
44
+ #endif /* ZSTD_DDICT_H */
@@ -37,12 +37,12 @@
37
37
  * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
38
38
  */
39
39
  #ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
40
- # define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
40
+ # define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
41
41
  #endif
42
42
 
43
43
  /*!
44
44
  * NO_FORWARD_PROGRESS_MAX :
45
- * maximum allowed nb of calls to ZSTD_decompressStream() and ZSTD_decompress_generic()
45
+ * maximum allowed nb of calls to ZSTD_decompressStream()
46
46
  * without any forward progress
47
47
  * (defined as: no byte read from input, and no byte flushed to output)
48
48
  * before triggering an error.
@@ -56,128 +56,25 @@
56
56
  * Dependencies
57
57
  *********************************************************/
58
58
  #include <string.h> /* memcpy, memmove, memset */
59
- #include "compiler.h" /* prefetch */
60
59
  #include "cpu.h" /* bmi2 */
61
60
  #include "mem.h" /* low level memory routines */
62
61
  #define FSE_STATIC_LINKING_ONLY
63
62
  #include "fse.h"
64
63
  #define HUF_STATIC_LINKING_ONLY
65
64
  #include "huf.h"
66
- #include "zstd_internal.h"
65
+ #include "zstd_internal.h" /* blockProperties_t */
66
+ #include "zstd_decompress_internal.h" /* ZSTD_DCtx */
67
+ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */
68
+ #include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
67
69
 
68
70
  #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
69
71
  # include "zstd_legacy.h"
70
72
  #endif
71
73
 
72
- static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
73
- static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
74
-
75
-
76
- /*-*************************************
77
- * Errors
78
- ***************************************/
79
- #define ZSTD_isError ERR_isError /* for inlining */
80
- #define FSE_isError ERR_isError
81
- #define HUF_isError ERR_isError
82
-
83
-
84
- /*_*******************************************************
85
- * Memory operations
86
- **********************************************************/
87
- static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
88
-
89
74
 
90
75
  /*-*************************************************************
91
76
  * Context management
92
77
  ***************************************************************/
93
- typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
94
- ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
95
- ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
96
- ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
97
-
98
- typedef enum { zdss_init=0, zdss_loadHeader,
99
- zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
100
-
101
-
102
- typedef struct {
103
- U32 fastMode;
104
- U32 tableLog;
105
- } ZSTD_seqSymbol_header;
106
-
107
- typedef struct {
108
- U16 nextState;
109
- BYTE nbAdditionalBits;
110
- BYTE nbBits;
111
- U32 baseValue;
112
- } ZSTD_seqSymbol;
113
-
114
- #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
115
-
116
- typedef struct {
117
- ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
118
- ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
119
- ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
120
- HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
121
- U32 rep[ZSTD_REP_NUM];
122
- } ZSTD_entropyDTables_t;
123
-
124
- struct ZSTD_DCtx_s
125
- {
126
- const ZSTD_seqSymbol* LLTptr;
127
- const ZSTD_seqSymbol* MLTptr;
128
- const ZSTD_seqSymbol* OFTptr;
129
- const HUF_DTable* HUFptr;
130
- ZSTD_entropyDTables_t entropy;
131
- U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */
132
- const void* previousDstEnd; /* detect continuity */
133
- const void* prefixStart; /* start of current segment */
134
- const void* virtualStart; /* virtual start of previous segment if it was just before current one */
135
- const void* dictEnd; /* end of previous segment */
136
- size_t expected;
137
- ZSTD_frameHeader fParams;
138
- U64 decodedSize;
139
- blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
140
- ZSTD_dStage stage;
141
- U32 litEntropy;
142
- U32 fseEntropy;
143
- XXH64_state_t xxhState;
144
- size_t headerSize;
145
- ZSTD_format_e format;
146
- const BYTE* litPtr;
147
- ZSTD_customMem customMem;
148
- size_t litSize;
149
- size_t rleSize;
150
- size_t staticSize;
151
- int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
152
-
153
- /* dictionary */
154
- ZSTD_DDict* ddictLocal;
155
- const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
156
- U32 dictID;
157
- int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
158
-
159
- /* streaming */
160
- ZSTD_dStreamStage streamStage;
161
- char* inBuff;
162
- size_t inBuffSize;
163
- size_t inPos;
164
- size_t maxWindowSize;
165
- char* outBuff;
166
- size_t outBuffSize;
167
- size_t outStart;
168
- size_t outEnd;
169
- size_t lhSize;
170
- void* legacyContext;
171
- U32 previousLegacyVersion;
172
- U32 legacyVersion;
173
- U32 hostageByte;
174
- int noForwardProgress;
175
-
176
- /* workspace */
177
- BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
178
- BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
179
- }; /* typedef'd to ZSTD_DCtx within "zstd.h" */
180
-
181
78
  size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
182
79
  {
183
80
  if (dctx==NULL) return 0; /* support sizeof NULL */
@@ -192,8 +89,8 @@ size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
192
89
  static size_t ZSTD_startingInputLength(ZSTD_format_e format)
193
90
  {
194
91
  size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
195
- ZSTD_frameHeaderSize_prefix - ZSTD_FRAMEIDSIZE :
196
- ZSTD_frameHeaderSize_prefix;
92
+ ZSTD_FRAMEHEADERSIZE_PREFIX - ZSTD_FRAMEIDSIZE :
93
+ ZSTD_FRAMEHEADERSIZE_PREFIX;
197
94
  ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
198
95
  /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
199
96
  assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
@@ -290,7 +187,7 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size)
290
187
  if (size < ZSTD_FRAMEIDSIZE) return 0;
291
188
  { U32 const magic = MEM_readLE32(buffer);
292
189
  if (magic == ZSTD_MAGICNUMBER) return 1;
293
- if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
190
+ if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
294
191
  }
295
192
  #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
296
193
  if (ZSTD_isLegacy(buffer, size)) return 1;
@@ -345,10 +242,10 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
345
242
 
346
243
  if ( (format != ZSTD_f_zstd1_magicless)
347
244
  && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
348
- if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
245
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
349
246
  /* skippable frame */
350
- if (srcSize < ZSTD_skippableHeaderSize)
351
- return ZSTD_skippableHeaderSize; /* magic number + frame length */
247
+ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
248
+ return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
352
249
  memset(zfhPtr, 0, sizeof(*zfhPtr));
353
250
  zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
354
251
  zfhPtr->frameType = ZSTD_skippableFrame;
@@ -446,6 +343,21 @@ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
446
343
  } }
447
344
  }
448
345
 
346
+ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
347
+ {
348
+ size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
349
+ U32 sizeU32;
350
+
351
+ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
352
+ return ERROR(srcSize_wrong);
353
+
354
+ sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
355
+ if ((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32)
356
+ return ERROR(frameParameter_unsupported);
357
+
358
+ return skippableHeaderSize + sizeU32;
359
+ }
360
+
449
361
  /** ZSTD_findDecompressedSize() :
450
362
  * compatible with legacy mode
451
363
  * `srcSize` must be the exact length of some number of ZSTD compressed and/or
@@ -455,15 +367,13 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
455
367
  {
456
368
  unsigned long long totalDstSize = 0;
457
369
 
458
- while (srcSize >= ZSTD_frameHeaderSize_prefix) {
370
+ while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
459
371
  U32 const magicNumber = MEM_readLE32(src);
460
372
 
461
- if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
462
- size_t skippableSize;
463
- if (srcSize < ZSTD_skippableHeaderSize)
464
- return ERROR(srcSize_wrong);
465
- skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_FRAMEIDSIZE)
466
- + ZSTD_skippableHeaderSize;
373
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
374
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
375
+ if (ZSTD_isError(skippableSize))
376
+ return skippableSize;
467
377
  if (srcSize < skippableSize) {
468
378
  return ZSTD_CONTENTSIZE_ERROR;
469
379
  }
@@ -496,9 +406,9 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
496
406
  }
497
407
 
498
408
  /** ZSTD_getDecompressedSize() :
499
- * compatible with legacy mode
500
- * @return : decompressed size if known, 0 otherwise
501
- note : 0 can mean any of the following :
409
+ * compatible with legacy mode
410
+ * @return : decompressed size if known, 0 otherwise
411
+ note : 0 can mean any of the following :
502
412
  - frame content is empty
503
413
  - decompressed size field is not present in frame header
504
414
  - frame header unknown / not supported
@@ -512,8 +422,8 @@ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
512
422
 
513
423
 
514
424
  /** ZSTD_decodeFrameHeader() :
515
- * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
516
- * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
425
+ * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
426
+ * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
517
427
  static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
518
428
  {
519
429
  size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
@@ -526,1275 +436,6 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
526
436
  }
527
437
 
528
438
 
529
- /*-*************************************************************
530
- * Block decoding
531
- ***************************************************************/
532
-
533
- /*! ZSTD_getcBlockSize() :
534
- * Provides the size of compressed block from block header `src` */
535
- size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
536
- blockProperties_t* bpPtr)
537
- {
538
- if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
539
- { U32 const cBlockHeader = MEM_readLE24(src);
540
- U32 const cSize = cBlockHeader >> 3;
541
- bpPtr->lastBlock = cBlockHeader & 1;
542
- bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
543
- bpPtr->origSize = cSize; /* only useful for RLE */
544
- if (bpPtr->blockType == bt_rle) return 1;
545
- if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
546
- return cSize;
547
- }
548
- }
549
-
550
-
551
- static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
552
- const void* src, size_t srcSize)
553
- {
554
- if (dst==NULL) return ERROR(dstSize_tooSmall);
555
- if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
556
- memcpy(dst, src, srcSize);
557
- return srcSize;
558
- }
559
-
560
-
561
- static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
562
- const void* src, size_t srcSize,
563
- size_t regenSize)
564
- {
565
- if (srcSize != 1) return ERROR(srcSize_wrong);
566
- if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
567
- memset(dst, *(const BYTE*)src, regenSize);
568
- return regenSize;
569
- }
570
-
571
- /* Hidden declaration for fullbench */
572
- size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
573
- const void* src, size_t srcSize);
574
- /*! ZSTD_decodeLiteralsBlock() :
575
- * @return : nb of bytes read from src (< srcSize )
576
- * note : symbol not declared but exposed for fullbench */
577
- size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
578
- const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
579
- {
580
- if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
581
-
582
- { const BYTE* const istart = (const BYTE*) src;
583
- symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
584
-
585
- switch(litEncType)
586
- {
587
- case set_repeat:
588
- if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
589
- /* fall-through */
590
-
591
- case set_compressed:
592
- if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
593
- { size_t lhSize, litSize, litCSize;
594
- U32 singleStream=0;
595
- U32 const lhlCode = (istart[0] >> 2) & 3;
596
- U32 const lhc = MEM_readLE32(istart);
597
- switch(lhlCode)
598
- {
599
- case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
600
- /* 2 - 2 - 10 - 10 */
601
- singleStream = !lhlCode;
602
- lhSize = 3;
603
- litSize = (lhc >> 4) & 0x3FF;
604
- litCSize = (lhc >> 14) & 0x3FF;
605
- break;
606
- case 2:
607
- /* 2 - 2 - 14 - 14 */
608
- lhSize = 4;
609
- litSize = (lhc >> 4) & 0x3FFF;
610
- litCSize = lhc >> 18;
611
- break;
612
- case 3:
613
- /* 2 - 2 - 18 - 18 */
614
- lhSize = 5;
615
- litSize = (lhc >> 4) & 0x3FFFF;
616
- litCSize = (lhc >> 22) + (istart[4] << 10);
617
- break;
618
- }
619
- if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
620
- if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
621
-
622
- /* prefetch huffman table if cold */
623
- if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
624
- PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
625
- }
626
-
627
- if (HUF_isError((litEncType==set_repeat) ?
628
- ( singleStream ?
629
- HUF_decompress1X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) :
630
- HUF_decompress4X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) ) :
631
- ( singleStream ?
632
- HUF_decompress1X1_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
633
- dctx->workspace, sizeof(dctx->workspace), dctx->bmi2) :
634
- HUF_decompress4X_hufOnly_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
635
- dctx->workspace, sizeof(dctx->workspace), dctx->bmi2))))
636
- return ERROR(corruption_detected);
637
-
638
- dctx->litPtr = dctx->litBuffer;
639
- dctx->litSize = litSize;
640
- dctx->litEntropy = 1;
641
- if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
642
- memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
643
- return litCSize + lhSize;
644
- }
645
-
646
- case set_basic:
647
- { size_t litSize, lhSize;
648
- U32 const lhlCode = ((istart[0]) >> 2) & 3;
649
- switch(lhlCode)
650
- {
651
- case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
652
- lhSize = 1;
653
- litSize = istart[0] >> 3;
654
- break;
655
- case 1:
656
- lhSize = 2;
657
- litSize = MEM_readLE16(istart) >> 4;
658
- break;
659
- case 3:
660
- lhSize = 3;
661
- litSize = MEM_readLE24(istart) >> 4;
662
- break;
663
- }
664
-
665
- if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
666
- if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
667
- memcpy(dctx->litBuffer, istart+lhSize, litSize);
668
- dctx->litPtr = dctx->litBuffer;
669
- dctx->litSize = litSize;
670
- memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
671
- return lhSize+litSize;
672
- }
673
- /* direct reference into compressed stream */
674
- dctx->litPtr = istart+lhSize;
675
- dctx->litSize = litSize;
676
- return lhSize+litSize;
677
- }
678
-
679
- case set_rle:
680
- { U32 const lhlCode = ((istart[0]) >> 2) & 3;
681
- size_t litSize, lhSize;
682
- switch(lhlCode)
683
- {
684
- case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
685
- lhSize = 1;
686
- litSize = istart[0] >> 3;
687
- break;
688
- case 1:
689
- lhSize = 2;
690
- litSize = MEM_readLE16(istart) >> 4;
691
- break;
692
- case 3:
693
- lhSize = 3;
694
- litSize = MEM_readLE24(istart) >> 4;
695
- if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
696
- break;
697
- }
698
- if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
699
- memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
700
- dctx->litPtr = dctx->litBuffer;
701
- dctx->litSize = litSize;
702
- return lhSize+1;
703
- }
704
- default:
705
- return ERROR(corruption_detected); /* impossible */
706
- }
707
- }
708
- }
709
-
710
- /* Default FSE distribution tables.
711
- * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
712
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
713
- * They were generated programmatically with following method :
714
- * - start from default distributions, present in /lib/common/zstd_internal.h
715
- * - generate tables normally, using ZSTD_buildFSETable()
716
- * - printout the content of tables
717
- * - pretify output, report below, test with fuzzer to ensure it's correct */
718
-
719
- /* Default FSE distribution table for Literal Lengths */
720
- static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
721
- { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
722
- /* nextState, nbAddBits, nbBits, baseVal */
723
- { 0, 0, 4, 0}, { 16, 0, 4, 0},
724
- { 32, 0, 5, 1}, { 0, 0, 5, 3},
725
- { 0, 0, 5, 4}, { 0, 0, 5, 6},
726
- { 0, 0, 5, 7}, { 0, 0, 5, 9},
727
- { 0, 0, 5, 10}, { 0, 0, 5, 12},
728
- { 0, 0, 6, 14}, { 0, 1, 5, 16},
729
- { 0, 1, 5, 20}, { 0, 1, 5, 22},
730
- { 0, 2, 5, 28}, { 0, 3, 5, 32},
731
- { 0, 4, 5, 48}, { 32, 6, 5, 64},
732
- { 0, 7, 5, 128}, { 0, 8, 6, 256},
733
- { 0, 10, 6, 1024}, { 0, 12, 6, 4096},
734
- { 32, 0, 4, 0}, { 0, 0, 4, 1},
735
- { 0, 0, 5, 2}, { 32, 0, 5, 4},
736
- { 0, 0, 5, 5}, { 32, 0, 5, 7},
737
- { 0, 0, 5, 8}, { 32, 0, 5, 10},
738
- { 0, 0, 5, 11}, { 0, 0, 6, 13},
739
- { 32, 1, 5, 16}, { 0, 1, 5, 18},
740
- { 32, 1, 5, 22}, { 0, 2, 5, 24},
741
- { 32, 3, 5, 32}, { 0, 3, 5, 40},
742
- { 0, 6, 4, 64}, { 16, 6, 4, 64},
743
- { 32, 7, 5, 128}, { 0, 9, 6, 512},
744
- { 0, 11, 6, 2048}, { 48, 0, 4, 0},
745
- { 16, 0, 4, 1}, { 32, 0, 5, 2},
746
- { 32, 0, 5, 3}, { 32, 0, 5, 5},
747
- { 32, 0, 5, 6}, { 32, 0, 5, 8},
748
- { 32, 0, 5, 9}, { 32, 0, 5, 11},
749
- { 32, 0, 5, 12}, { 0, 0, 6, 15},
750
- { 32, 1, 5, 18}, { 32, 1, 5, 20},
751
- { 32, 2, 5, 24}, { 32, 2, 5, 28},
752
- { 32, 3, 5, 40}, { 32, 4, 5, 48},
753
- { 0, 16, 6,65536}, { 0, 15, 6,32768},
754
- { 0, 14, 6,16384}, { 0, 13, 6, 8192},
755
- }; /* LL_defaultDTable */
756
-
757
- /* Default FSE distribution table for Offset Codes */
758
- static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
759
- { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
760
- /* nextState, nbAddBits, nbBits, baseVal */
761
- { 0, 0, 5, 0}, { 0, 6, 4, 61},
762
- { 0, 9, 5, 509}, { 0, 15, 5,32765},
763
- { 0, 21, 5,2097149}, { 0, 3, 5, 5},
764
- { 0, 7, 4, 125}, { 0, 12, 5, 4093},
765
- { 0, 18, 5,262141}, { 0, 23, 5,8388605},
766
- { 0, 5, 5, 29}, { 0, 8, 4, 253},
767
- { 0, 14, 5,16381}, { 0, 20, 5,1048573},
768
- { 0, 2, 5, 1}, { 16, 7, 4, 125},
769
- { 0, 11, 5, 2045}, { 0, 17, 5,131069},
770
- { 0, 22, 5,4194301}, { 0, 4, 5, 13},
771
- { 16, 8, 4, 253}, { 0, 13, 5, 8189},
772
- { 0, 19, 5,524285}, { 0, 1, 5, 1},
773
- { 16, 6, 4, 61}, { 0, 10, 5, 1021},
774
- { 0, 16, 5,65533}, { 0, 28, 5,268435453},
775
- { 0, 27, 5,134217725}, { 0, 26, 5,67108861},
776
- { 0, 25, 5,33554429}, { 0, 24, 5,16777213},
777
- }; /* OF_defaultDTable */
778
-
779
-
780
- /* Default FSE distribution table for Match Lengths */
781
- static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
782
- { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
783
- /* nextState, nbAddBits, nbBits, baseVal */
784
- { 0, 0, 6, 3}, { 0, 0, 4, 4},
785
- { 32, 0, 5, 5}, { 0, 0, 5, 6},
786
- { 0, 0, 5, 8}, { 0, 0, 5, 9},
787
- { 0, 0, 5, 11}, { 0, 0, 6, 13},
788
- { 0, 0, 6, 16}, { 0, 0, 6, 19},
789
- { 0, 0, 6, 22}, { 0, 0, 6, 25},
790
- { 0, 0, 6, 28}, { 0, 0, 6, 31},
791
- { 0, 0, 6, 34}, { 0, 1, 6, 37},
792
- { 0, 1, 6, 41}, { 0, 2, 6, 47},
793
- { 0, 3, 6, 59}, { 0, 4, 6, 83},
794
- { 0, 7, 6, 131}, { 0, 9, 6, 515},
795
- { 16, 0, 4, 4}, { 0, 0, 4, 5},
796
- { 32, 0, 5, 6}, { 0, 0, 5, 7},
797
- { 32, 0, 5, 9}, { 0, 0, 5, 10},
798
- { 0, 0, 6, 12}, { 0, 0, 6, 15},
799
- { 0, 0, 6, 18}, { 0, 0, 6, 21},
800
- { 0, 0, 6, 24}, { 0, 0, 6, 27},
801
- { 0, 0, 6, 30}, { 0, 0, 6, 33},
802
- { 0, 1, 6, 35}, { 0, 1, 6, 39},
803
- { 0, 2, 6, 43}, { 0, 3, 6, 51},
804
- { 0, 4, 6, 67}, { 0, 5, 6, 99},
805
- { 0, 8, 6, 259}, { 32, 0, 4, 4},
806
- { 48, 0, 4, 4}, { 16, 0, 4, 5},
807
- { 32, 0, 5, 7}, { 32, 0, 5, 8},
808
- { 32, 0, 5, 10}, { 32, 0, 5, 11},
809
- { 0, 0, 6, 14}, { 0, 0, 6, 17},
810
- { 0, 0, 6, 20}, { 0, 0, 6, 23},
811
- { 0, 0, 6, 26}, { 0, 0, 6, 29},
812
- { 0, 0, 6, 32}, { 0, 16, 6,65539},
813
- { 0, 15, 6,32771}, { 0, 14, 6,16387},
814
- { 0, 13, 6, 8195}, { 0, 12, 6, 4099},
815
- { 0, 11, 6, 2051}, { 0, 10, 6, 1027},
816
- }; /* ML_defaultDTable */
817
-
818
-
819
- static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
820
- {
821
- void* ptr = dt;
822
- ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
823
- ZSTD_seqSymbol* const cell = dt + 1;
824
-
825
- DTableH->tableLog = 0;
826
- DTableH->fastMode = 0;
827
-
828
- cell->nbBits = 0;
829
- cell->nextState = 0;
830
- assert(nbAddBits < 255);
831
- cell->nbAdditionalBits = (BYTE)nbAddBits;
832
- cell->baseValue = baseValue;
833
- }
834
-
835
-
836
- /* ZSTD_buildFSETable() :
837
- * generate FSE decoding table for one symbol (ll, ml or off) */
838
- static void
839
- ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
840
- const short* normalizedCounter, unsigned maxSymbolValue,
841
- const U32* baseValue, const U32* nbAdditionalBits,
842
- unsigned tableLog)
843
- {
844
- ZSTD_seqSymbol* const tableDecode = dt+1;
845
- U16 symbolNext[MaxSeq+1];
846
-
847
- U32 const maxSV1 = maxSymbolValue + 1;
848
- U32 const tableSize = 1 << tableLog;
849
- U32 highThreshold = tableSize-1;
850
-
851
- /* Sanity Checks */
852
- assert(maxSymbolValue <= MaxSeq);
853
- assert(tableLog <= MaxFSELog);
854
-
855
- /* Init, lay down lowprob symbols */
856
- { ZSTD_seqSymbol_header DTableH;
857
- DTableH.tableLog = tableLog;
858
- DTableH.fastMode = 1;
859
- { S16 const largeLimit= (S16)(1 << (tableLog-1));
860
- U32 s;
861
- for (s=0; s<maxSV1; s++) {
862
- if (normalizedCounter[s]==-1) {
863
- tableDecode[highThreshold--].baseValue = s;
864
- symbolNext[s] = 1;
865
- } else {
866
- if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
867
- symbolNext[s] = normalizedCounter[s];
868
- } } }
869
- memcpy(dt, &DTableH, sizeof(DTableH));
870
- }
871
-
872
- /* Spread symbols */
873
- { U32 const tableMask = tableSize-1;
874
- U32 const step = FSE_TABLESTEP(tableSize);
875
- U32 s, position = 0;
876
- for (s=0; s<maxSV1; s++) {
877
- int i;
878
- for (i=0; i<normalizedCounter[s]; i++) {
879
- tableDecode[position].baseValue = s;
880
- position = (position + step) & tableMask;
881
- while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
882
- } }
883
- assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
884
- }
885
-
886
- /* Build Decoding table */
887
- { U32 u;
888
- for (u=0; u<tableSize; u++) {
889
- U32 const symbol = tableDecode[u].baseValue;
890
- U32 const nextState = symbolNext[symbol]++;
891
- tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
892
- tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
893
- assert(nbAdditionalBits[symbol] < 255);
894
- tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
895
- tableDecode[u].baseValue = baseValue[symbol];
896
- } }
897
- }
898
-
899
-
900
- /*! ZSTD_buildSeqTable() :
901
- * @return : nb bytes read from src,
902
- * or an error code if it fails */
903
- static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
904
- symbolEncodingType_e type, U32 max, U32 maxLog,
905
- const void* src, size_t srcSize,
906
- const U32* baseValue, const U32* nbAdditionalBits,
907
- const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
908
- int ddictIsCold, int nbSeq)
909
- {
910
- switch(type)
911
- {
912
- case set_rle :
913
- if (!srcSize) return ERROR(srcSize_wrong);
914
- if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
915
- { U32 const symbol = *(const BYTE*)src;
916
- U32 const baseline = baseValue[symbol];
917
- U32 const nbBits = nbAdditionalBits[symbol];
918
- ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
919
- }
920
- *DTablePtr = DTableSpace;
921
- return 1;
922
- case set_basic :
923
- *DTablePtr = defaultTable;
924
- return 0;
925
- case set_repeat:
926
- if (!flagRepeatTable) return ERROR(corruption_detected);
927
- /* prefetch FSE table if used */
928
- if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
929
- const void* const pStart = *DTablePtr;
930
- size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
931
- PREFETCH_AREA(pStart, pSize);
932
- }
933
- return 0;
934
- case set_compressed :
935
- { U32 tableLog;
936
- S16 norm[MaxSeq+1];
937
- size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
938
- if (FSE_isError(headerSize)) return ERROR(corruption_detected);
939
- if (tableLog > maxLog) return ERROR(corruption_detected);
940
- ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
941
- *DTablePtr = DTableSpace;
942
- return headerSize;
943
- }
944
- default : /* impossible */
945
- assert(0);
946
- return ERROR(GENERIC);
947
- }
948
- }
949
-
950
- static const U32 LL_base[MaxLL+1] = {
951
- 0, 1, 2, 3, 4, 5, 6, 7,
952
- 8, 9, 10, 11, 12, 13, 14, 15,
953
- 16, 18, 20, 22, 24, 28, 32, 40,
954
- 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
955
- 0x2000, 0x4000, 0x8000, 0x10000 };
956
-
957
- static const U32 OF_base[MaxOff+1] = {
958
- 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
959
- 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
960
- 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
961
- 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
962
-
963
- static const U32 OF_bits[MaxOff+1] = {
964
- 0, 1, 2, 3, 4, 5, 6, 7,
965
- 8, 9, 10, 11, 12, 13, 14, 15,
966
- 16, 17, 18, 19, 20, 21, 22, 23,
967
- 24, 25, 26, 27, 28, 29, 30, 31 };
968
-
969
- static const U32 ML_base[MaxML+1] = {
970
- 3, 4, 5, 6, 7, 8, 9, 10,
971
- 11, 12, 13, 14, 15, 16, 17, 18,
972
- 19, 20, 21, 22, 23, 24, 25, 26,
973
- 27, 28, 29, 30, 31, 32, 33, 34,
974
- 35, 37, 39, 41, 43, 47, 51, 59,
975
- 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
976
- 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
977
-
978
- /* Hidden delcaration for fullbench */
979
- size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
980
- const void* src, size_t srcSize);
981
-
982
- size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
983
- const void* src, size_t srcSize)
984
- {
985
- const BYTE* const istart = (const BYTE* const)src;
986
- const BYTE* const iend = istart + srcSize;
987
- const BYTE* ip = istart;
988
- int nbSeq;
989
- DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
990
-
991
- /* check */
992
- if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
993
-
994
- /* SeqHead */
995
- nbSeq = *ip++;
996
- if (!nbSeq) { *nbSeqPtr=0; return 1; }
997
- if (nbSeq > 0x7F) {
998
- if (nbSeq == 0xFF) {
999
- if (ip+2 > iend) return ERROR(srcSize_wrong);
1000
- nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
1001
- } else {
1002
- if (ip >= iend) return ERROR(srcSize_wrong);
1003
- nbSeq = ((nbSeq-0x80)<<8) + *ip++;
1004
- }
1005
- }
1006
- *nbSeqPtr = nbSeq;
1007
-
1008
- /* FSE table descriptors */
1009
- if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
1010
- { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
1011
- symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
1012
- symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
1013
- ip++;
1014
-
1015
- /* Build DTables */
1016
- { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
1017
- LLtype, MaxLL, LLFSELog,
1018
- ip, iend-ip,
1019
- LL_base, LL_bits,
1020
- LL_defaultDTable, dctx->fseEntropy,
1021
- dctx->ddictIsCold, nbSeq);
1022
- if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
1023
- ip += llhSize;
1024
- }
1025
-
1026
- { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
1027
- OFtype, MaxOff, OffFSELog,
1028
- ip, iend-ip,
1029
- OF_base, OF_bits,
1030
- OF_defaultDTable, dctx->fseEntropy,
1031
- dctx->ddictIsCold, nbSeq);
1032
- if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
1033
- ip += ofhSize;
1034
- }
1035
-
1036
- { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
1037
- MLtype, MaxML, MLFSELog,
1038
- ip, iend-ip,
1039
- ML_base, ML_bits,
1040
- ML_defaultDTable, dctx->fseEntropy,
1041
- dctx->ddictIsCold, nbSeq);
1042
- if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
1043
- ip += mlhSize;
1044
- }
1045
- }
1046
-
1047
- /* prefetch dictionary content */
1048
- if (dctx->ddictIsCold) {
1049
- size_t const dictSize = (const char*)dctx->prefixStart - (const char*)dctx->virtualStart;
1050
- size_t const psmin = MIN(dictSize, (size_t)(64*nbSeq) /* heuristic */ );
1051
- size_t const pSize = MIN(psmin, 128 KB /* protection */ );
1052
- const void* const pStart = (const char*)dctx->dictEnd - pSize;
1053
- PREFETCH_AREA(pStart, pSize);
1054
- dctx->ddictIsCold = 0;
1055
- }
1056
-
1057
- return ip-istart;
1058
- }
1059
-
1060
-
1061
- typedef struct {
1062
- size_t litLength;
1063
- size_t matchLength;
1064
- size_t offset;
1065
- const BYTE* match;
1066
- } seq_t;
1067
-
1068
- typedef struct {
1069
- size_t state;
1070
- const ZSTD_seqSymbol* table;
1071
- } ZSTD_fseState;
1072
-
1073
- typedef struct {
1074
- BIT_DStream_t DStream;
1075
- ZSTD_fseState stateLL;
1076
- ZSTD_fseState stateOffb;
1077
- ZSTD_fseState stateML;
1078
- size_t prevOffset[ZSTD_REP_NUM];
1079
- const BYTE* prefixStart;
1080
- const BYTE* dictEnd;
1081
- size_t pos;
1082
- } seqState_t;
1083
-
1084
-
1085
- FORCE_NOINLINE
1086
- size_t ZSTD_execSequenceLast7(BYTE* op,
1087
- BYTE* const oend, seq_t sequence,
1088
- const BYTE** litPtr, const BYTE* const litLimit,
1089
- const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
1090
- {
1091
- BYTE* const oLitEnd = op + sequence.litLength;
1092
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
1093
- BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
1094
- BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
1095
- const BYTE* const iLitEnd = *litPtr + sequence.litLength;
1096
- const BYTE* match = oLitEnd - sequence.offset;
1097
-
1098
- /* check */
1099
- if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
1100
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
1101
- if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */
1102
-
1103
- /* copy literals */
1104
- if (op < oend_w) {
1105
- ZSTD_wildcopy(op, *litPtr, oend_w - op);
1106
- *litPtr += oend_w - op;
1107
- op = oend_w;
1108
- }
1109
- while (op < oLitEnd) *op++ = *(*litPtr)++;
1110
-
1111
- /* copy Match */
1112
- if (sequence.offset > (size_t)(oLitEnd - base)) {
1113
- /* offset beyond prefix */
1114
- if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
1115
- match = dictEnd - (base-match);
1116
- if (match + sequence.matchLength <= dictEnd) {
1117
- memmove(oLitEnd, match, sequence.matchLength);
1118
- return sequenceLength;
1119
- }
1120
- /* span extDict & currentPrefixSegment */
1121
- { size_t const length1 = dictEnd - match;
1122
- memmove(oLitEnd, match, length1);
1123
- op = oLitEnd + length1;
1124
- sequence.matchLength -= length1;
1125
- match = base;
1126
- } }
1127
- while (op < oMatchEnd) *op++ = *match++;
1128
- return sequenceLength;
1129
- }
1130
-
1131
-
1132
- HINT_INLINE
1133
- size_t ZSTD_execSequence(BYTE* op,
1134
- BYTE* const oend, seq_t sequence,
1135
- const BYTE** litPtr, const BYTE* const litLimit,
1136
- const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
1137
- {
1138
- BYTE* const oLitEnd = op + sequence.litLength;
1139
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
1140
- BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
1141
- BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
1142
- const BYTE* const iLitEnd = *litPtr + sequence.litLength;
1143
- const BYTE* match = oLitEnd - sequence.offset;
1144
-
1145
- /* check */
1146
- if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
1147
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
1148
- if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
1149
-
1150
- /* copy Literals */
1151
- ZSTD_copy8(op, *litPtr);
1152
- if (sequence.litLength > 8)
1153
- ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
1154
- op = oLitEnd;
1155
- *litPtr = iLitEnd; /* update for next sequence */
1156
-
1157
- /* copy Match */
1158
- if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
1159
- /* offset beyond prefix -> go into extDict */
1160
- if (sequence.offset > (size_t)(oLitEnd - virtualStart))
1161
- return ERROR(corruption_detected);
1162
- match = dictEnd + (match - prefixStart);
1163
- if (match + sequence.matchLength <= dictEnd) {
1164
- memmove(oLitEnd, match, sequence.matchLength);
1165
- return sequenceLength;
1166
- }
1167
- /* span extDict & currentPrefixSegment */
1168
- { size_t const length1 = dictEnd - match;
1169
- memmove(oLitEnd, match, length1);
1170
- op = oLitEnd + length1;
1171
- sequence.matchLength -= length1;
1172
- match = prefixStart;
1173
- if (op > oend_w || sequence.matchLength < MINMATCH) {
1174
- U32 i;
1175
- for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
1176
- return sequenceLength;
1177
- }
1178
- } }
1179
- /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
1180
-
1181
- /* match within prefix */
1182
- if (sequence.offset < 8) {
1183
- /* close range match, overlap */
1184
- static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
1185
- static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
1186
- int const sub2 = dec64table[sequence.offset];
1187
- op[0] = match[0];
1188
- op[1] = match[1];
1189
- op[2] = match[2];
1190
- op[3] = match[3];
1191
- match += dec32table[sequence.offset];
1192
- ZSTD_copy4(op+4, match);
1193
- match -= sub2;
1194
- } else {
1195
- ZSTD_copy8(op, match);
1196
- }
1197
- op += 8; match += 8;
1198
-
1199
- if (oMatchEnd > oend-(16-MINMATCH)) {
1200
- if (op < oend_w) {
1201
- ZSTD_wildcopy(op, match, oend_w - op);
1202
- match += oend_w - op;
1203
- op = oend_w;
1204
- }
1205
- while (op < oMatchEnd) *op++ = *match++;
1206
- } else {
1207
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
1208
- }
1209
- return sequenceLength;
1210
- }
1211
-
1212
-
1213
- HINT_INLINE
1214
- size_t ZSTD_execSequenceLong(BYTE* op,
1215
- BYTE* const oend, seq_t sequence,
1216
- const BYTE** litPtr, const BYTE* const litLimit,
1217
- const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
1218
- {
1219
- BYTE* const oLitEnd = op + sequence.litLength;
1220
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
1221
- BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
1222
- BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
1223
- const BYTE* const iLitEnd = *litPtr + sequence.litLength;
1224
- const BYTE* match = sequence.match;
1225
-
1226
- /* check */
1227
- if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
1228
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
1229
- if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
1230
-
1231
- /* copy Literals */
1232
- ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */
1233
- if (sequence.litLength > 8)
1234
- ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
1235
- op = oLitEnd;
1236
- *litPtr = iLitEnd; /* update for next sequence */
1237
-
1238
- /* copy Match */
1239
- if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
1240
- /* offset beyond prefix */
1241
- if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
1242
- if (match + sequence.matchLength <= dictEnd) {
1243
- memmove(oLitEnd, match, sequence.matchLength);
1244
- return sequenceLength;
1245
- }
1246
- /* span extDict & currentPrefixSegment */
1247
- { size_t const length1 = dictEnd - match;
1248
- memmove(oLitEnd, match, length1);
1249
- op = oLitEnd + length1;
1250
- sequence.matchLength -= length1;
1251
- match = prefixStart;
1252
- if (op > oend_w || sequence.matchLength < MINMATCH) {
1253
- U32 i;
1254
- for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
1255
- return sequenceLength;
1256
- }
1257
- } }
1258
- assert(op <= oend_w);
1259
- assert(sequence.matchLength >= MINMATCH);
1260
-
1261
- /* match within prefix */
1262
- if (sequence.offset < 8) {
1263
- /* close range match, overlap */
1264
- static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
1265
- static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
1266
- int const sub2 = dec64table[sequence.offset];
1267
- op[0] = match[0];
1268
- op[1] = match[1];
1269
- op[2] = match[2];
1270
- op[3] = match[3];
1271
- match += dec32table[sequence.offset];
1272
- ZSTD_copy4(op+4, match);
1273
- match -= sub2;
1274
- } else {
1275
- ZSTD_copy8(op, match);
1276
- }
1277
- op += 8; match += 8;
1278
-
1279
- if (oMatchEnd > oend-(16-MINMATCH)) {
1280
- if (op < oend_w) {
1281
- ZSTD_wildcopy(op, match, oend_w - op);
1282
- match += oend_w - op;
1283
- op = oend_w;
1284
- }
1285
- while (op < oMatchEnd) *op++ = *match++;
1286
- } else {
1287
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
1288
- }
1289
- return sequenceLength;
1290
- }
1291
-
1292
- static void
1293
- ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
1294
- {
1295
- const void* ptr = dt;
1296
- const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
1297
- DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
1298
- DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
1299
- (U32)DStatePtr->state, DTableH->tableLog);
1300
- BIT_reloadDStream(bitD);
1301
- DStatePtr->table = dt + 1;
1302
- }
1303
-
1304
- FORCE_INLINE_TEMPLATE void
1305
- ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
1306
- {
1307
- ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
1308
- U32 const nbBits = DInfo.nbBits;
1309
- size_t const lowBits = BIT_readBits(bitD, nbBits);
1310
- DStatePtr->state = DInfo.nextState + lowBits;
1311
- }
1312
-
1313
- /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
1314
- * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
1315
- * bits before reloading. This value is the maximum number of bytes we read
1316
- * after reloading when we are decoding long offets.
1317
- */
1318
- #define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
1319
- (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
1320
- ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
1321
- : 0)
1322
-
1323
- typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
1324
-
1325
- FORCE_INLINE_TEMPLATE seq_t
1326
- ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
1327
- {
1328
- seq_t seq;
1329
- U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
1330
- U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
1331
- U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
1332
- U32 const totalBits = llBits+mlBits+ofBits;
1333
- U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
1334
- U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
1335
- U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
1336
-
1337
- /* sequence */
1338
- { size_t offset;
1339
- if (!ofBits)
1340
- offset = 0;
1341
- else {
1342
- ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
1343
- ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
1344
- assert(ofBits <= MaxOff);
1345
- if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
1346
- U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
1347
- offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
1348
- BIT_reloadDStream(&seqState->DStream);
1349
- if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
1350
- assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
1351
- } else {
1352
- offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
1353
- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
1354
- }
1355
- }
1356
-
1357
- if (ofBits <= 1) {
1358
- offset += (llBase==0);
1359
- if (offset) {
1360
- size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
1361
- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
1362
- if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
1363
- seqState->prevOffset[1] = seqState->prevOffset[0];
1364
- seqState->prevOffset[0] = offset = temp;
1365
- } else { /* offset == 0 */
1366
- offset = seqState->prevOffset[0];
1367
- }
1368
- } else {
1369
- seqState->prevOffset[2] = seqState->prevOffset[1];
1370
- seqState->prevOffset[1] = seqState->prevOffset[0];
1371
- seqState->prevOffset[0] = offset;
1372
- }
1373
- seq.offset = offset;
1374
- }
1375
-
1376
- seq.matchLength = mlBase
1377
- + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0); /* <= 16 bits */
1378
- if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
1379
- BIT_reloadDStream(&seqState->DStream);
1380
- if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
1381
- BIT_reloadDStream(&seqState->DStream);
1382
- /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
1383
- ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
1384
-
1385
- seq.litLength = llBase
1386
- + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0); /* <= 16 bits */
1387
- if (MEM_32bits())
1388
- BIT_reloadDStream(&seqState->DStream);
1389
-
1390
- DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
1391
- (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
1392
-
1393
- /* ANS state update */
1394
- ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
1395
- ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
1396
- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
1397
- ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
1398
-
1399
- return seq;
1400
- }
1401
-
1402
- FORCE_INLINE_TEMPLATE size_t
1403
- ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
1404
- void* dst, size_t maxDstSize,
1405
- const void* seqStart, size_t seqSize, int nbSeq,
1406
- const ZSTD_longOffset_e isLongOffset)
1407
- {
1408
- const BYTE* ip = (const BYTE*)seqStart;
1409
- const BYTE* const iend = ip + seqSize;
1410
- BYTE* const ostart = (BYTE* const)dst;
1411
- BYTE* const oend = ostart + maxDstSize;
1412
- BYTE* op = ostart;
1413
- const BYTE* litPtr = dctx->litPtr;
1414
- const BYTE* const litEnd = litPtr + dctx->litSize;
1415
- const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
1416
- const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
1417
- const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
1418
- DEBUGLOG(5, "ZSTD_decompressSequences_body");
1419
-
1420
- /* Regen sequences */
1421
- if (nbSeq) {
1422
- seqState_t seqState;
1423
- dctx->fseEntropy = 1;
1424
- { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
1425
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
1426
- ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1427
- ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1428
- ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
1429
-
1430
- for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
1431
- nbSeq--;
1432
- { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
1433
- size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
1434
- DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
1435
- if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1436
- op += oneSeqSize;
1437
- } }
1438
-
1439
- /* check if reached exact end */
1440
- DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
1441
- if (nbSeq) return ERROR(corruption_detected);
1442
- /* save reps for next block */
1443
- { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
1444
- }
1445
-
1446
- /* last literal segment */
1447
- { size_t const lastLLSize = litEnd - litPtr;
1448
- if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
1449
- memcpy(op, litPtr, lastLLSize);
1450
- op += lastLLSize;
1451
- }
1452
-
1453
- return op-ostart;
1454
- }
1455
-
1456
- static size_t
1457
- ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
1458
- void* dst, size_t maxDstSize,
1459
- const void* seqStart, size_t seqSize, int nbSeq,
1460
- const ZSTD_longOffset_e isLongOffset)
1461
- {
1462
- return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1463
- }
1464
-
1465
-
1466
-
1467
- FORCE_INLINE_TEMPLATE seq_t
1468
- ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
1469
- {
1470
- seq_t seq;
1471
- U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
1472
- U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
1473
- U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
1474
- U32 const totalBits = llBits+mlBits+ofBits;
1475
- U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
1476
- U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
1477
- U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
1478
-
1479
- /* sequence */
1480
- { size_t offset;
1481
- if (!ofBits)
1482
- offset = 0;
1483
- else {
1484
- ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
1485
- ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
1486
- assert(ofBits <= MaxOff);
1487
- if (MEM_32bits() && longOffsets) {
1488
- U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
1489
- offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
1490
- if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
1491
- if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
1492
- } else {
1493
- offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
1494
- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
1495
- }
1496
- }
1497
-
1498
- if (ofBits <= 1) {
1499
- offset += (llBase==0);
1500
- if (offset) {
1501
- size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
1502
- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
1503
- if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
1504
- seqState->prevOffset[1] = seqState->prevOffset[0];
1505
- seqState->prevOffset[0] = offset = temp;
1506
- } else {
1507
- offset = seqState->prevOffset[0];
1508
- }
1509
- } else {
1510
- seqState->prevOffset[2] = seqState->prevOffset[1];
1511
- seqState->prevOffset[1] = seqState->prevOffset[0];
1512
- seqState->prevOffset[0] = offset;
1513
- }
1514
- seq.offset = offset;
1515
- }
1516
-
1517
- seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
1518
- if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
1519
- BIT_reloadDStream(&seqState->DStream);
1520
- if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
1521
- BIT_reloadDStream(&seqState->DStream);
1522
- /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
1523
- ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
1524
-
1525
- seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
1526
- if (MEM_32bits())
1527
- BIT_reloadDStream(&seqState->DStream);
1528
-
1529
- { size_t const pos = seqState->pos + seq.litLength;
1530
- const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
1531
- seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
1532
- * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
1533
- seqState->pos = pos + seq.matchLength;
1534
- }
1535
-
1536
- /* ANS state update */
1537
- ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
1538
- ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
1539
- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
1540
- ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
1541
-
1542
- return seq;
1543
- }
1544
-
1545
- FORCE_INLINE_TEMPLATE size_t
1546
- ZSTD_decompressSequencesLong_body(
1547
- ZSTD_DCtx* dctx,
1548
- void* dst, size_t maxDstSize,
1549
- const void* seqStart, size_t seqSize, int nbSeq,
1550
- const ZSTD_longOffset_e isLongOffset)
1551
- {
1552
- const BYTE* ip = (const BYTE*)seqStart;
1553
- const BYTE* const iend = ip + seqSize;
1554
- BYTE* const ostart = (BYTE* const)dst;
1555
- BYTE* const oend = ostart + maxDstSize;
1556
- BYTE* op = ostart;
1557
- const BYTE* litPtr = dctx->litPtr;
1558
- const BYTE* const litEnd = litPtr + dctx->litSize;
1559
- const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
1560
- const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
1561
- const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
1562
-
1563
- /* Regen sequences */
1564
- if (nbSeq) {
1565
- #define STORED_SEQS 4
1566
- #define STOSEQ_MASK (STORED_SEQS-1)
1567
- #define ADVANCED_SEQS 4
1568
- seq_t sequences[STORED_SEQS];
1569
- int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
1570
- seqState_t seqState;
1571
- int seqNb;
1572
- dctx->fseEntropy = 1;
1573
- { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
1574
- seqState.prefixStart = prefixStart;
1575
- seqState.pos = (size_t)(op-prefixStart);
1576
- seqState.dictEnd = dictEnd;
1577
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
1578
- ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1579
- ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1580
- ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
1581
-
1582
- /* prepare in advance */
1583
- for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
1584
- sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
1585
- }
1586
- if (seqNb<seqAdvance) return ERROR(corruption_detected);
1587
-
1588
- /* decode and decompress */
1589
- for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
1590
- seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
1591
- size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1592
- if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1593
- PREFETCH(sequence.match); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
1594
- sequences[seqNb&STOSEQ_MASK] = sequence;
1595
- op += oneSeqSize;
1596
- }
1597
- if (seqNb<nbSeq) return ERROR(corruption_detected);
1598
-
1599
- /* finish queue */
1600
- seqNb -= seqAdvance;
1601
- for ( ; seqNb<nbSeq ; seqNb++) {
1602
- size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1603
- if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1604
- op += oneSeqSize;
1605
- }
1606
-
1607
- /* save reps for next block */
1608
- { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
1609
- #undef STORED_SEQS
1610
- #undef STOSEQ_MASK
1611
- #undef ADVANCED_SEQS
1612
- }
1613
-
1614
- /* last literal segment */
1615
- { size_t const lastLLSize = litEnd - litPtr;
1616
- if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
1617
- memcpy(op, litPtr, lastLLSize);
1618
- op += lastLLSize;
1619
- }
1620
-
1621
- return op-ostart;
1622
- }
1623
-
1624
- static size_t
1625
- ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
1626
- void* dst, size_t maxDstSize,
1627
- const void* seqStart, size_t seqSize, int nbSeq,
1628
- const ZSTD_longOffset_e isLongOffset)
1629
- {
1630
- return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1631
- }
1632
-
1633
-
1634
-
1635
- #if DYNAMIC_BMI2
1636
-
1637
- static TARGET_ATTRIBUTE("bmi2") size_t
1638
- ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
1639
- void* dst, size_t maxDstSize,
1640
- const void* seqStart, size_t seqSize, int nbSeq,
1641
- const ZSTD_longOffset_e isLongOffset)
1642
- {
1643
- return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1644
- }
1645
-
1646
- static TARGET_ATTRIBUTE("bmi2") size_t
1647
- ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
1648
- void* dst, size_t maxDstSize,
1649
- const void* seqStart, size_t seqSize, int nbSeq,
1650
- const ZSTD_longOffset_e isLongOffset)
1651
- {
1652
- return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1653
- }
1654
-
1655
- #endif
1656
-
1657
- typedef size_t (*ZSTD_decompressSequences_t)(
1658
- ZSTD_DCtx *dctx, void *dst, size_t maxDstSize,
1659
- const void *seqStart, size_t seqSize, int nbSeq,
1660
- const ZSTD_longOffset_e isLongOffset);
1661
-
1662
- static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
1663
- const void* seqStart, size_t seqSize, int nbSeq,
1664
- const ZSTD_longOffset_e isLongOffset)
1665
- {
1666
- DEBUGLOG(5, "ZSTD_decompressSequences");
1667
- #if DYNAMIC_BMI2
1668
- if (dctx->bmi2) {
1669
- return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1670
- }
1671
- #endif
1672
- return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1673
- }
1674
-
1675
- static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
1676
- void* dst, size_t maxDstSize,
1677
- const void* seqStart, size_t seqSize, int nbSeq,
1678
- const ZSTD_longOffset_e isLongOffset)
1679
- {
1680
- DEBUGLOG(5, "ZSTD_decompressSequencesLong");
1681
- #if DYNAMIC_BMI2
1682
- if (dctx->bmi2) {
1683
- return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1684
- }
1685
- #endif
1686
- return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1687
- }
1688
-
1689
- /* ZSTD_getLongOffsetsShare() :
1690
- * condition : offTable must be valid
1691
- * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
1692
- * compared to maximum possible of (1<<OffFSELog) */
1693
- static unsigned
1694
- ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
1695
- {
1696
- const void* ptr = offTable;
1697
- U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
1698
- const ZSTD_seqSymbol* table = offTable + 1;
1699
- U32 const max = 1 << tableLog;
1700
- U32 u, total = 0;
1701
- DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
1702
-
1703
- assert(max <= (1 << OffFSELog)); /* max not too large */
1704
- for (u=0; u<max; u++) {
1705
- if (table[u].nbAdditionalBits > 22) total += 1;
1706
- }
1707
-
1708
- assert(tableLog <= OffFSELog);
1709
- total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
1710
-
1711
- return total;
1712
- }
1713
-
1714
-
1715
- static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
1716
- void* dst, size_t dstCapacity,
1717
- const void* src, size_t srcSize, const int frame)
1718
- { /* blockType == blockCompressed */
1719
- const BYTE* ip = (const BYTE*)src;
1720
- /* isLongOffset must be true if there are long offsets.
1721
- * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
1722
- * We don't expect that to be the case in 64-bit mode.
1723
- * In block mode, window size is not known, so we have to be conservative.
1724
- * (note: but it could be evaluated from current-lowLimit)
1725
- */
1726
- ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)));
1727
- DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
1728
-
1729
- if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
1730
-
1731
- /* Decode literals section */
1732
- { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
1733
- DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
1734
- if (ZSTD_isError(litCSize)) return litCSize;
1735
- ip += litCSize;
1736
- srcSize -= litCSize;
1737
- }
1738
-
1739
- /* Build Decoding Tables */
1740
- { int nbSeq;
1741
- size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
1742
- if (ZSTD_isError(seqHSize)) return seqHSize;
1743
- ip += seqHSize;
1744
- srcSize -= seqHSize;
1745
-
1746
- if ( (!frame || dctx->fParams.windowSize > (1<<24))
1747
- && (nbSeq>0) ) { /* could probably use a larger nbSeq limit */
1748
- U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
1749
- U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
1750
- if (shareLongOffsets >= minShare)
1751
- return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
1752
- }
1753
-
1754
- return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
1755
- }
1756
- }
1757
-
1758
-
1759
- static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
1760
- {
1761
- if (dst != dctx->previousDstEnd) { /* not contiguous */
1762
- dctx->dictEnd = dctx->previousDstEnd;
1763
- dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
1764
- dctx->prefixStart = dst;
1765
- dctx->previousDstEnd = dst;
1766
- }
1767
- }
1768
-
1769
- size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
1770
- void* dst, size_t dstCapacity,
1771
- const void* src, size_t srcSize)
1772
- {
1773
- size_t dSize;
1774
- ZSTD_checkContinuity(dctx, dst);
1775
- dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
1776
- dctx->previousDstEnd = (char*)dst + dSize;
1777
- return dSize;
1778
- }
1779
-
1780
-
1781
- /** ZSTD_insertBlock() :
1782
- insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
1783
- ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
1784
- {
1785
- ZSTD_checkContinuity(dctx, blockStart);
1786
- dctx->previousDstEnd = (const char*)blockStart + blockSize;
1787
- return blockSize;
1788
- }
1789
-
1790
-
1791
- static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE value, size_t length)
1792
- {
1793
- if (length > dstCapacity) return ERROR(dstSize_tooSmall);
1794
- memset(dst, value, length);
1795
- return length;
1796
- }
1797
-
1798
439
  /** ZSTD_findFrameCompressedSize() :
1799
440
  * compatible with legacy mode
1800
441
  * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
@@ -1806,9 +447,9 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
1806
447
  if (ZSTD_isLegacy(src, srcSize))
1807
448
  return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
1808
449
  #endif
1809
- if ( (srcSize >= ZSTD_skippableHeaderSize)
1810
- && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
1811
- return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_FRAMEIDSIZE);
450
+ if ( (srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
451
+ && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START ) {
452
+ return readSkippableFrameSize(src, srcSize);
1812
453
  } else {
1813
454
  const BYTE* ip = (const BYTE*)src;
1814
455
  const BYTE* const ipstart = ip;
@@ -1848,8 +489,64 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
1848
489
  }
1849
490
  }
1850
491
 
492
+
493
+
494
+ /*-*************************************************************
495
+ * Frame decoding
496
+ ***************************************************************/
497
+
498
+
499
+ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
500
+ {
501
+ if (dst != dctx->previousDstEnd) { /* not contiguous */
502
+ dctx->dictEnd = dctx->previousDstEnd;
503
+ dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
504
+ dctx->prefixStart = dst;
505
+ dctx->previousDstEnd = dst;
506
+ }
507
+ }
508
+
509
+ /** ZSTD_insertBlock() :
510
+ insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
511
+ size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
512
+ {
513
+ ZSTD_checkContinuity(dctx, blockStart);
514
+ dctx->previousDstEnd = (const char*)blockStart + blockSize;
515
+ return blockSize;
516
+ }
517
+
518
+
519
+ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
520
+ const void* src, size_t srcSize)
521
+ {
522
+ DEBUGLOG(5, "ZSTD_copyRawBlock");
523
+ if (dst == NULL) {
524
+ if (srcSize == 0) return 0;
525
+ return ERROR(dstBuffer_null);
526
+ }
527
+ if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
528
+ memcpy(dst, src, srcSize);
529
+ return srcSize;
530
+ }
531
+
532
+ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
533
+ BYTE b,
534
+ size_t regenSize)
535
+ {
536
+ if (dst == NULL) {
537
+ if (regenSize == 0) return 0;
538
+ return ERROR(dstBuffer_null);
539
+ }
540
+ if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
541
+ memset(dst, b, regenSize);
542
+ return regenSize;
543
+ }
544
+
545
+
1851
546
  /*! ZSTD_decompressFrame() :
1852
- * @dctx must be properly initialized */
547
+ * @dctx must be properly initialized
548
+ * will update *srcPtr and *srcSizePtr,
549
+ * to make *srcPtr progress by one frame. */
1853
550
  static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
1854
551
  void* dst, size_t dstCapacity,
1855
552
  const void** srcPtr, size_t *srcSizePtr)
@@ -1858,31 +555,33 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
1858
555
  BYTE* const ostart = (BYTE* const)dst;
1859
556
  BYTE* const oend = ostart + dstCapacity;
1860
557
  BYTE* op = ostart;
1861
- size_t remainingSize = *srcSizePtr;
558
+ size_t remainingSrcSize = *srcSizePtr;
559
+
560
+ DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
1862
561
 
1863
562
  /* check */
1864
- if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize)
563
+ if (remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize)
1865
564
  return ERROR(srcSize_wrong);
1866
565
 
1867
566
  /* Frame Header */
1868
- { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
567
+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
1869
568
  if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
1870
- if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize)
569
+ if (remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize)
1871
570
  return ERROR(srcSize_wrong);
1872
571
  CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
1873
- ip += frameHeaderSize; remainingSize -= frameHeaderSize;
572
+ ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
1874
573
  }
1875
574
 
1876
575
  /* Loop on each block */
1877
576
  while (1) {
1878
577
  size_t decodedSize;
1879
578
  blockProperties_t blockProperties;
1880
- size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
579
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
1881
580
  if (ZSTD_isError(cBlockSize)) return cBlockSize;
1882
581
 
1883
582
  ip += ZSTD_blockHeaderSize;
1884
- remainingSize -= ZSTD_blockHeaderSize;
1885
- if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
583
+ remainingSrcSize -= ZSTD_blockHeaderSize;
584
+ if (cBlockSize > remainingSrcSize) return ERROR(srcSize_wrong);
1886
585
 
1887
586
  switch(blockProperties.blockType)
1888
587
  {
@@ -1893,7 +592,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
1893
592
  decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
1894
593
  break;
1895
594
  case bt_rle :
1896
- decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
595
+ decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize);
1897
596
  break;
1898
597
  case bt_reserved :
1899
598
  default:
@@ -1905,7 +604,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
1905
604
  XXH64_update(&dctx->xxhState, op, decodedSize);
1906
605
  op += decodedSize;
1907
606
  ip += cBlockSize;
1908
- remainingSize -= cBlockSize;
607
+ remainingSrcSize -= cBlockSize;
1909
608
  if (blockProperties.lastBlock) break;
1910
609
  }
1911
610
 
@@ -1916,16 +615,16 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
1916
615
  if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
1917
616
  U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
1918
617
  U32 checkRead;
1919
- if (remainingSize<4) return ERROR(checksum_wrong);
618
+ if (remainingSrcSize<4) return ERROR(checksum_wrong);
1920
619
  checkRead = MEM_readLE32(ip);
1921
620
  if (checkRead != checkCalc) return ERROR(checksum_wrong);
1922
621
  ip += 4;
1923
- remainingSize -= 4;
622
+ remainingSrcSize -= 4;
1924
623
  }
1925
624
 
1926
625
  /* Allow caller to get size read */
1927
626
  *srcPtr = ip;
1928
- *srcSizePtr = remainingSize;
627
+ *srcSizePtr = remainingSrcSize;
1929
628
  return op-ostart;
1930
629
  }
1931
630
 
@@ -1942,11 +641,11 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
1942
641
  assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
1943
642
 
1944
643
  if (ddict) {
1945
- dict = ZSTD_DDictDictContent(ddict);
1946
- dictSize = ZSTD_DDictDictSize(ddict);
644
+ dict = ZSTD_DDict_dictContent(ddict);
645
+ dictSize = ZSTD_DDict_dictSize(ddict);
1947
646
  }
1948
647
 
1949
- while (srcSize >= ZSTD_frameHeaderSize_prefix) {
648
+ while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
1950
649
 
1951
650
  #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
1952
651
  if (ZSTD_isLegacy(src, srcSize)) {
@@ -1957,7 +656,9 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
1957
656
  if (dctx->staticSize) return ERROR(memory_allocation);
1958
657
 
1959
658
  decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
659
+ if (ZSTD_isError(decodedSize)) return decodedSize;
1960
660
 
661
+ assert(decodedSize <=- dstCapacity);
1961
662
  dst = (BYTE*)dst + decodedSize;
1962
663
  dstCapacity -= decodedSize;
1963
664
 
@@ -1970,13 +671,11 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
1970
671
 
1971
672
  { U32 const magicNumber = MEM_readLE32(src);
1972
673
  DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
1973
- (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
1974
- if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
1975
- size_t skippableSize;
1976
- if (srcSize < ZSTD_skippableHeaderSize)
1977
- return ERROR(srcSize_wrong);
1978
- skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_FRAMEIDSIZE)
1979
- + ZSTD_skippableHeaderSize;
674
+ (unsigned)magicNumber, ZSTD_MAGICNUMBER);
675
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
676
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
677
+ if (ZSTD_isError(skippableSize))
678
+ return skippableSize;
1980
679
  if (srcSize < skippableSize) return ERROR(srcSize_wrong);
1981
680
 
1982
681
  src = (const BYTE *)src + skippableSize;
@@ -2010,7 +709,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
2010
709
  return ERROR(srcSize_wrong);
2011
710
  }
2012
711
  if (ZSTD_isError(res)) return res;
2013
- /* no need to bound check, ZSTD_decompressFrame already has */
712
+ assert(res <= dstCapacity);
2014
713
  dst = (BYTE*)dst + res;
2015
714
  dstCapacity -= res;
2016
715
  }
@@ -2090,9 +789,10 @@ static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skip
2090
789
  * or an error code, which can be tested using ZSTD_isError() */
2091
790
  size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2092
791
  {
2093
- DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize);
792
+ DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
2094
793
  /* Sanity check */
2095
- if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* not allowed */
794
+ if (srcSize != dctx->expected)
795
+ return ERROR(srcSize_wrong); /* not allowed */
2096
796
  if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
2097
797
 
2098
798
  switch (dctx->stage)
@@ -2101,9 +801,9 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
2101
801
  assert(src != NULL);
2102
802
  if (dctx->format == ZSTD_f_zstd1) { /* allows header */
2103
803
  assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
2104
- if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
804
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
2105
805
  memcpy(dctx->headerBuffer, src, srcSize);
2106
- dctx->expected = ZSTD_skippableHeaderSize - srcSize; /* remaining to load to get full skippable frame header */
806
+ dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */
2107
807
  dctx->stage = ZSTDds_decodeSkippableHeader;
2108
808
  return 0;
2109
809
  } }
@@ -2163,19 +863,19 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
2163
863
  rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
2164
864
  break;
2165
865
  case bt_rle :
2166
- rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize);
866
+ rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
2167
867
  break;
2168
868
  case bt_reserved : /* should never happen */
2169
869
  default:
2170
870
  return ERROR(corruption_detected);
2171
871
  }
2172
872
  if (ZSTD_isError(rSize)) return rSize;
2173
- DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize);
873
+ DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
2174
874
  dctx->decodedSize += rSize;
2175
875
  if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
2176
876
 
2177
877
  if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
2178
- DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize);
878
+ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
2179
879
  if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
2180
880
  if (dctx->decodedSize != dctx->fParams.frameContentSize) {
2181
881
  return ERROR(corruption_detected);
@@ -2199,7 +899,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
2199
899
  assert(srcSize == 4); /* guaranteed by dctx->expected */
2200
900
  { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
2201
901
  U32 const check32 = MEM_readLE32(src);
2202
- DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32);
902
+ DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
2203
903
  if (check32 != h32) return ERROR(checksum_wrong);
2204
904
  dctx->expected = 0;
2205
905
  dctx->stage = ZSTDds_getFrameHeaderSize;
@@ -2208,8 +908,8 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
2208
908
 
2209
909
  case ZSTDds_decodeSkippableHeader:
2210
910
  assert(src != NULL);
2211
- assert(srcSize <= ZSTD_skippableHeaderSize);
2212
- memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize); /* complete skippable header */
911
+ assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
912
+ memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
2213
913
  dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
2214
914
  dctx->stage = ZSTDds_skipFrame;
2215
915
  return 0;
@@ -2220,7 +920,8 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
2220
920
  return 0;
2221
921
 
2222
922
  default:
2223
- return ERROR(GENERIC); /* impossible */
923
+ assert(0); /* impossible */
924
+ return ERROR(GENERIC); /* some compiler require default to do something */
2224
925
  }
2225
926
  }
2226
927
 
@@ -2234,11 +935,12 @@ static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dict
2234
935
  return 0;
2235
936
  }
2236
937
 
2237
- /*! ZSTD_loadEntropy() :
938
+ /*! ZSTD_loadDEntropy() :
2238
939
  * dict : must point at beginning of a valid zstd dictionary.
2239
940
  * @return : size of entropy tables read */
2240
- static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy,
2241
- const void* const dict, size_t const dictSize)
941
+ size_t
942
+ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
943
+ const void* const dict, size_t const dictSize)
2242
944
  {
2243
945
  const BYTE* dictPtr = (const BYTE*)dict;
2244
946
  const BYTE* const dictEnd = dictPtr + dictSize;
@@ -2252,15 +954,22 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy,
2252
954
  ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
2253
955
  { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */
2254
956
  size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
957
+ #ifdef HUF_FORCE_DECOMPRESS_X1
958
+ /* in minimal huffman, we always use X1 variants */
959
+ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
960
+ dictPtr, dictEnd - dictPtr,
961
+ workspace, workspaceSize);
962
+ #else
2255
963
  size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
2256
964
  dictPtr, dictEnd - dictPtr,
2257
965
  workspace, workspaceSize);
966
+ #endif
2258
967
  if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
2259
968
  dictPtr += hSize;
2260
969
  }
2261
970
 
2262
971
  { short offcodeNCount[MaxOff+1];
2263
- U32 offcodeMaxValue = MaxOff, offcodeLog;
972
+ unsigned offcodeMaxValue = MaxOff, offcodeLog;
2264
973
  size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
2265
974
  if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
2266
975
  if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
@@ -2320,7 +1029,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
2320
1029
  dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
2321
1030
 
2322
1031
  /* load entropy tables */
2323
- { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
1032
+ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
2324
1033
  if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
2325
1034
  dict = (const char*)dict + eSize;
2326
1035
  dictSize -= eSize;
@@ -2364,209 +1073,25 @@ size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t
2364
1073
 
2365
1074
  /* ====== ZSTD_DDict ====== */
2366
1075
 
2367
- struct ZSTD_DDict_s {
2368
- void* dictBuffer;
2369
- const void* dictContent;
2370
- size_t dictSize;
2371
- ZSTD_entropyDTables_t entropy;
2372
- U32 dictID;
2373
- U32 entropyPresent;
2374
- ZSTD_customMem cMem;
2375
- }; /* typedef'd to ZSTD_DDict within "zstd.h" */
2376
-
2377
- static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict)
2378
- {
2379
- assert(ddict != NULL);
2380
- return ddict->dictContent;
2381
- }
2382
-
2383
- static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
2384
- {
2385
- assert(ddict != NULL);
2386
- return ddict->dictSize;
2387
- }
2388
-
2389
1076
  size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
2390
1077
  {
2391
1078
  DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
2392
1079
  assert(dctx != NULL);
2393
1080
  if (ddict) {
2394
- dctx->ddictIsCold = (dctx->dictEnd != (const char*)ddict->dictContent + ddict->dictSize);
1081
+ const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
1082
+ size_t const dictSize = ZSTD_DDict_dictSize(ddict);
1083
+ const void* const dictEnd = dictStart + dictSize;
1084
+ dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
2395
1085
  DEBUGLOG(4, "DDict is %s",
2396
1086
  dctx->ddictIsCold ? "~cold~" : "hot!");
2397
1087
  }
2398
1088
  CHECK_F( ZSTD_decompressBegin(dctx) );
2399
1089
  if (ddict) { /* NULL ddict is equivalent to no dictionary */
2400
- dctx->dictID = ddict->dictID;
2401
- dctx->prefixStart = ddict->dictContent;
2402
- dctx->virtualStart = ddict->dictContent;
2403
- dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
2404
- dctx->previousDstEnd = dctx->dictEnd;
2405
- if (ddict->entropyPresent) {
2406
- dctx->litEntropy = 1;
2407
- dctx->fseEntropy = 1;
2408
- dctx->LLTptr = ddict->entropy.LLTable;
2409
- dctx->MLTptr = ddict->entropy.MLTable;
2410
- dctx->OFTptr = ddict->entropy.OFTable;
2411
- dctx->HUFptr = ddict->entropy.hufTable;
2412
- dctx->entropy.rep[0] = ddict->entropy.rep[0];
2413
- dctx->entropy.rep[1] = ddict->entropy.rep[1];
2414
- dctx->entropy.rep[2] = ddict->entropy.rep[2];
2415
- } else {
2416
- dctx->litEntropy = 0;
2417
- dctx->fseEntropy = 0;
2418
- }
2419
- }
2420
- return 0;
2421
- }
2422
-
2423
- static size_t
2424
- ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict,
2425
- ZSTD_dictContentType_e dictContentType)
2426
- {
2427
- ddict->dictID = 0;
2428
- ddict->entropyPresent = 0;
2429
- if (dictContentType == ZSTD_dct_rawContent) return 0;
2430
-
2431
- if (ddict->dictSize < 8) {
2432
- if (dictContentType == ZSTD_dct_fullDict)
2433
- return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
2434
- return 0; /* pure content mode */
2435
- }
2436
- { U32 const magic = MEM_readLE32(ddict->dictContent);
2437
- if (magic != ZSTD_MAGIC_DICTIONARY) {
2438
- if (dictContentType == ZSTD_dct_fullDict)
2439
- return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
2440
- return 0; /* pure content mode */
2441
- }
1090
+ ZSTD_copyDDictParameters(dctx, ddict);
2442
1091
  }
2443
- ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
2444
-
2445
- /* load entropy tables */
2446
- CHECK_E( ZSTD_loadEntropy(&ddict->entropy,
2447
- ddict->dictContent, ddict->dictSize),
2448
- dictionary_corrupted );
2449
- ddict->entropyPresent = 1;
2450
- return 0;
2451
- }
2452
-
2453
-
2454
- static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
2455
- const void* dict, size_t dictSize,
2456
- ZSTD_dictLoadMethod_e dictLoadMethod,
2457
- ZSTD_dictContentType_e dictContentType)
2458
- {
2459
- if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
2460
- ddict->dictBuffer = NULL;
2461
- ddict->dictContent = dict;
2462
- if (!dict) dictSize = 0;
2463
- } else {
2464
- void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
2465
- ddict->dictBuffer = internalBuffer;
2466
- ddict->dictContent = internalBuffer;
2467
- if (!internalBuffer) return ERROR(memory_allocation);
2468
- memcpy(internalBuffer, dict, dictSize);
2469
- }
2470
- ddict->dictSize = dictSize;
2471
- ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
2472
-
2473
- /* parse dictionary content */
2474
- CHECK_F( ZSTD_loadEntropy_inDDict(ddict, dictContentType) );
2475
-
2476
1092
  return 0;
2477
1093
  }
2478
1094
 
2479
- ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
2480
- ZSTD_dictLoadMethod_e dictLoadMethod,
2481
- ZSTD_dictContentType_e dictContentType,
2482
- ZSTD_customMem customMem)
2483
- {
2484
- if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
2485
-
2486
- { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
2487
- if (ddict == NULL) return NULL;
2488
- ddict->cMem = customMem;
2489
- { size_t const initResult = ZSTD_initDDict_internal(ddict,
2490
- dict, dictSize,
2491
- dictLoadMethod, dictContentType);
2492
- if (ZSTD_isError(initResult)) {
2493
- ZSTD_freeDDict(ddict);
2494
- return NULL;
2495
- } }
2496
- return ddict;
2497
- }
2498
- }
2499
-
2500
- /*! ZSTD_createDDict() :
2501
- * Create a digested dictionary, to start decompression without startup delay.
2502
- * `dict` content is copied inside DDict.
2503
- * Consequently, `dict` can be released after `ZSTD_DDict` creation */
2504
- ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
2505
- {
2506
- ZSTD_customMem const allocator = { NULL, NULL, NULL };
2507
- return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
2508
- }
2509
-
2510
- /*! ZSTD_createDDict_byReference() :
2511
- * Create a digested dictionary, to start decompression without startup delay.
2512
- * Dictionary content is simply referenced, it will be accessed during decompression.
2513
- * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
2514
- ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
2515
- {
2516
- ZSTD_customMem const allocator = { NULL, NULL, NULL };
2517
- return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
2518
- }
2519
-
2520
-
2521
- const ZSTD_DDict* ZSTD_initStaticDDict(
2522
- void* sBuffer, size_t sBufferSize,
2523
- const void* dict, size_t dictSize,
2524
- ZSTD_dictLoadMethod_e dictLoadMethod,
2525
- ZSTD_dictContentType_e dictContentType)
2526
- {
2527
- size_t const neededSpace = sizeof(ZSTD_DDict)
2528
- + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
2529
- ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
2530
- assert(sBuffer != NULL);
2531
- assert(dict != NULL);
2532
- if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
2533
- if (sBufferSize < neededSpace) return NULL;
2534
- if (dictLoadMethod == ZSTD_dlm_byCopy) {
2535
- memcpy(ddict+1, dict, dictSize); /* local copy */
2536
- dict = ddict+1;
2537
- }
2538
- if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
2539
- dict, dictSize,
2540
- ZSTD_dlm_byRef, dictContentType) ))
2541
- return NULL;
2542
- return ddict;
2543
- }
2544
-
2545
-
2546
- size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
2547
- {
2548
- if (ddict==NULL) return 0; /* support free on NULL */
2549
- { ZSTD_customMem const cMem = ddict->cMem;
2550
- ZSTD_free(ddict->dictBuffer, cMem);
2551
- ZSTD_free(ddict, cMem);
2552
- return 0;
2553
- }
2554
- }
2555
-
2556
- /*! ZSTD_estimateDDictSize() :
2557
- * Estimate amount of memory that will be needed to create a dictionary for decompression.
2558
- * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
2559
- size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
2560
- {
2561
- return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
2562
- }
2563
-
2564
- size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
2565
- {
2566
- if (ddict==NULL) return 0; /* support sizeof on NULL */
2567
- return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
2568
- }
2569
-
2570
1095
  /*! ZSTD_getDictID_fromDict() :
2571
1096
  * Provides the dictID stored within dictionary.
2572
1097
  * if @return == 0, the dictionary is not conformant with Zstandard specification.
@@ -2578,16 +1103,6 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
2578
1103
  return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
2579
1104
  }
2580
1105
 
2581
- /*! ZSTD_getDictID_fromDDict() :
2582
- * Provides the dictID of the dictionary loaded into `ddict`.
2583
- * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
2584
- * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
2585
- unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
2586
- {
2587
- if (ddict==NULL) return 0;
2588
- return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
2589
- }
2590
-
2591
1106
  /*! ZSTD_getDictID_fromFrame() :
2592
1107
  * Provides the dictID required to decompresse frame stored within `src`.
2593
1108
  * If @return == 0, the dictID could not be decoded.
@@ -2695,7 +1210,7 @@ size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSiz
2695
1210
 
2696
1211
 
2697
1212
  /* ZSTD_initDStream_usingDict() :
2698
- * return : expected size, aka ZSTD_frameHeaderSize_prefix.
1213
+ * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
2699
1214
  * this function cannot fail */
2700
1215
  size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
2701
1216
  {
@@ -2703,7 +1218,7 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di
2703
1218
  zds->streamStage = zdss_init;
2704
1219
  zds->noForwardProgress = 0;
2705
1220
  CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
2706
- return ZSTD_frameHeaderSize_prefix;
1221
+ return ZSTD_FRAMEHEADERSIZE_PREFIX;
2707
1222
  }
2708
1223
 
2709
1224
  /* note : this variant can't fail */
@@ -2724,7 +1239,7 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
2724
1239
  }
2725
1240
 
2726
1241
  /* ZSTD_resetDStream() :
2727
- * return : expected size, aka ZSTD_frameHeaderSize_prefix.
1242
+ * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
2728
1243
  * this function cannot fail */
2729
1244
  size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
2730
1245
  {
@@ -2733,23 +1248,9 @@ size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
2733
1248
  dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0;
2734
1249
  dctx->legacyVersion = 0;
2735
1250
  dctx->hostageByte = 0;
2736
- return ZSTD_frameHeaderSize_prefix;
1251
+ return ZSTD_FRAMEHEADERSIZE_PREFIX;
2737
1252
  }
2738
1253
 
2739
- size_t ZSTD_setDStreamParameter(ZSTD_DStream* dctx,
2740
- ZSTD_DStreamParameter_e paramType, unsigned paramValue)
2741
- {
2742
- if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2743
- switch(paramType)
2744
- {
2745
- default : return ERROR(parameter_unsupported);
2746
- case DStream_p_maxWindowSize :
2747
- DEBUGLOG(4, "setting maxWindowSize = %u KB", paramValue >> 10);
2748
- dctx->maxWindowSize = paramValue ? paramValue : (U32)(-1);
2749
- break;
2750
- }
2751
- return 0;
2752
- }
2753
1254
 
2754
1255
  size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
2755
1256
  {
@@ -2758,18 +1259,92 @@ size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
2758
1259
  return 0;
2759
1260
  }
2760
1261
 
1262
+ /* ZSTD_DCtx_setMaxWindowSize() :
1263
+ * note : no direct equivalence in ZSTD_DCtx_setParameter,
1264
+ * since this version sets windowSize, and the other sets windowLog */
2761
1265
  size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
2762
1266
  {
1267
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
1268
+ size_t const min = (size_t)1 << bounds.lowerBound;
1269
+ size_t const max = (size_t)1 << bounds.upperBound;
2763
1270
  if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
1271
+ if (maxWindowSize < min) return ERROR(parameter_outOfBound);
1272
+ if (maxWindowSize > max) return ERROR(parameter_outOfBound);
2764
1273
  dctx->maxWindowSize = maxWindowSize;
2765
1274
  return 0;
2766
1275
  }
2767
1276
 
2768
1277
  size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
2769
1278
  {
2770
- DEBUGLOG(4, "ZSTD_DCtx_setFormat : %u", (unsigned)format);
1279
+ return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format);
1280
+ }
1281
+
1282
+ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
1283
+ {
1284
+ ZSTD_bounds bounds = { 0, 0, 0 };
1285
+ switch(dParam) {
1286
+ case ZSTD_d_windowLogMax:
1287
+ bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
1288
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
1289
+ return bounds;
1290
+ case ZSTD_d_format:
1291
+ bounds.lowerBound = (int)ZSTD_f_zstd1;
1292
+ bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
1293
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
1294
+ return bounds;
1295
+ default:;
1296
+ }
1297
+ bounds.error = ERROR(parameter_unsupported);
1298
+ return bounds;
1299
+ }
1300
+
1301
+ /* ZSTD_dParam_withinBounds:
1302
+ * @return 1 if value is within dParam bounds,
1303
+ * 0 otherwise */
1304
+ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
1305
+ {
1306
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
1307
+ if (ZSTD_isError(bounds.error)) return 0;
1308
+ if (value < bounds.lowerBound) return 0;
1309
+ if (value > bounds.upperBound) return 0;
1310
+ return 1;
1311
+ }
1312
+
1313
+ #define CHECK_DBOUNDS(p,v) { \
1314
+ if (!ZSTD_dParam_withinBounds(p, v)) \
1315
+ return ERROR(parameter_outOfBound); \
1316
+ }
1317
+
1318
+ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
1319
+ {
2771
1320
  if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2772
- dctx->format = format;
1321
+ switch(dParam) {
1322
+ case ZSTD_d_windowLogMax:
1323
+ CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
1324
+ dctx->maxWindowSize = ((size_t)1) << value;
1325
+ return 0;
1326
+ case ZSTD_d_format:
1327
+ CHECK_DBOUNDS(ZSTD_d_format, value);
1328
+ dctx->format = (ZSTD_format_e)value;
1329
+ return 0;
1330
+ default:;
1331
+ }
1332
+ return ERROR(parameter_unsupported);
1333
+ }
1334
+
1335
+ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
1336
+ {
1337
+ if ( (reset == ZSTD_reset_session_only)
1338
+ || (reset == ZSTD_reset_session_and_parameters) ) {
1339
+ (void)ZSTD_initDStream(dctx);
1340
+ }
1341
+ if ( (reset == ZSTD_reset_parameters)
1342
+ || (reset == ZSTD_reset_session_and_parameters) ) {
1343
+ if (dctx->streamStage != zdss_init)
1344
+ return ERROR(stage_wrong);
1345
+ dctx->format = ZSTD_f_zstd1;
1346
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
1347
+ }
2773
1348
  return 0;
2774
1349
  }
2775
1350
 
@@ -2799,7 +1374,7 @@ size_t ZSTD_estimateDStreamSize(size_t windowSize)
2799
1374
 
2800
1375
  size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
2801
1376
  {
2802
- U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable */
1377
+ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
2803
1378
  ZSTD_frameHeader zfh;
2804
1379
  size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
2805
1380
  if (ZSTD_isError(err)) return err;
@@ -2868,8 +1443,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
2868
1443
  #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
2869
1444
  U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
2870
1445
  if (legacyVersion) {
2871
- const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL;
2872
- size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
1446
+ const void* const dict = zds->ddict ? ZSTD_DDict_dictContent(zds->ddict) : NULL;
1447
+ size_t const dictSize = zds->ddict ? ZSTD_DDict_dictSize(zds->ddict) : 0;
2873
1448
  DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
2874
1449
  /* legacy support is incompatible with static dctx */
2875
1450
  if (zds->staticSize) return ERROR(memory_allocation);
@@ -2894,7 +1469,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
2894
1469
  zds->lhSize += remainingInput;
2895
1470
  }
2896
1471
  input->pos = input->size;
2897
- return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
1472
+ return (MAX(ZSTD_FRAMEHEADERSIZE_MIN, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
2898
1473
  }
2899
1474
  assert(ip != NULL);
2900
1475
  memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
@@ -2922,7 +1497,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
2922
1497
  DEBUGLOG(4, "Consume header");
2923
1498
  CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
2924
1499
 
2925
- if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
1500
+ if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
2926
1501
  zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
2927
1502
  zds->stage = ZSTDds_skipFrame;
2928
1503
  } else {
@@ -3038,7 +1613,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
3038
1613
  someMoreWork = 0;
3039
1614
  break;
3040
1615
 
3041
- default: return ERROR(GENERIC); /* impossible */
1616
+ default:
1617
+ assert(0); /* impossible */
1618
+ return ERROR(GENERIC); /* some compiler require default to do something */
3042
1619
  } }
3043
1620
 
3044
1621
  /* result */
@@ -3080,13 +1657,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
3080
1657
  }
3081
1658
  }
3082
1659
 
3083
-
3084
- size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
3085
- {
3086
- return ZSTD_decompressStream(dctx, output, input);
3087
- }
3088
-
3089
- size_t ZSTD_decompress_generic_simpleArgs (
1660
+ size_t ZSTD_decompressStream_simpleArgs (
3090
1661
  ZSTD_DCtx* dctx,
3091
1662
  void* dst, size_t dstCapacity, size_t* dstPos,
3092
1663
  const void* src, size_t srcSize, size_t* srcPos)
@@ -3094,15 +1665,8 @@ size_t ZSTD_decompress_generic_simpleArgs (
3094
1665
  ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
3095
1666
  ZSTD_inBuffer input = { src, srcSize, *srcPos };
3096
1667
  /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
3097
- size_t const cErr = ZSTD_decompress_generic(dctx, &output, &input);
1668
+ size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
3098
1669
  *dstPos = output.pos;
3099
1670
  *srcPos = input.pos;
3100
1671
  return cErr;
3101
1672
  }
3102
-
3103
- void ZSTD_DCtx_reset(ZSTD_DCtx* dctx)
3104
- {
3105
- (void)ZSTD_initDStream(dctx);
3106
- dctx->format = ZSTD_f_zstd1;
3107
- dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
3108
- }