zstd-ruby 1.3.5.0 → 1.3.7.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +4 -2
  3. data/README.md +2 -1
  4. data/ext/zstdruby/libzstd/BUCK +1 -0
  5. data/ext/zstdruby/libzstd/Makefile +25 -13
  6. data/ext/zstdruby/libzstd/README.md +11 -10
  7. data/ext/zstdruby/libzstd/common/bitstream.h +8 -11
  8. data/ext/zstdruby/libzstd/common/compiler.h +30 -8
  9. data/ext/zstdruby/libzstd/common/cpu.h +1 -1
  10. data/ext/zstdruby/libzstd/common/mem.h +20 -2
  11. data/ext/zstdruby/libzstd/common/xxhash.c +1 -0
  12. data/ext/zstdruby/libzstd/common/zstd_internal.h +3 -2
  13. data/ext/zstdruby/libzstd/compress/fse_compress.c +55 -48
  14. data/ext/zstdruby/libzstd/compress/hist.h +1 -1
  15. data/ext/zstdruby/libzstd/compress/huf_compress.c +1 -1
  16. data/ext/zstdruby/libzstd/compress/zstd_compress.c +290 -147
  17. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +5 -2
  18. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +63 -51
  19. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +3 -4
  20. data/ext/zstdruby/libzstd/compress/zstd_fast.c +44 -33
  21. data/ext/zstdruby/libzstd/compress/zstd_fast.h +3 -4
  22. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +125 -116
  23. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +13 -15
  24. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +9 -11
  25. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +0 -1
  26. data/ext/zstdruby/libzstd/compress/zstd_opt.c +42 -36
  27. data/ext/zstdruby/libzstd/compress/zstd_opt.h +8 -9
  28. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +96 -51
  29. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +16 -6
  30. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +3 -3
  31. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +169 -101
  32. data/ext/zstdruby/libzstd/dictBuilder/cover.c +111 -87
  33. data/ext/zstdruby/libzstd/dictBuilder/cover.h +83 -0
  34. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +3 -3
  35. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +728 -0
  36. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +34 -31
  37. data/ext/zstdruby/libzstd/dictBuilder/zdict.h +60 -5
  38. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +9 -3
  39. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +6 -0
  40. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +6 -0
  41. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +1 -5
  42. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +12 -9
  43. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +10 -10
  44. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +20 -18
  45. data/ext/zstdruby/libzstd/zstd.h +109 -50
  46. data/lib/zstd-ruby/version.rb +1 -1
  47. metadata +4 -2
@@ -119,11 +119,21 @@ ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
119
119
  * === Not exposed in libzstd. Never invoke directly ===
120
120
  * ======================================================== */
121
121
 
122
+ /*! ZSTDMT_toFlushNow()
123
+ * Tell how many bytes are ready to be flushed immediately.
124
+ * Probe the oldest active job (not yet entirely flushed) and check its output buffer.
125
+ * If return 0, it means there is no active job,
126
+ * or, it means oldest job is still active, but everything produced has been flushed so far,
127
+ * therefore flushing is limited by speed of oldest job. */
128
+ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
129
+
130
+ /*! ZSTDMT_CCtxParam_setMTCtxParameter()
131
+ * like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */
122
132
  size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
123
133
 
124
- /* ZSTDMT_CCtxParam_setNbWorkers()
125
- * Set nbWorkers, and clamp it.
126
- * Also reset jobSize and overlapLog */
134
+ /*! ZSTDMT_CCtxParam_setNbWorkers()
135
+ * Set nbWorkers, and clamp it.
136
+ * Also reset jobSize and overlapLog */
127
137
  size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
128
138
 
129
139
  /*! ZSTDMT_updateCParams_whileCompressing() :
@@ -131,9 +141,9 @@ size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorker
131
141
  * New parameters will be applied to next compression job. */
132
142
  void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
133
143
 
134
- /* ZSTDMT_getFrameProgression():
135
- * tells how much data has been consumed (input) and produced (output) for current frame.
136
- * able to count progression inside worker threads.
144
+ /*! ZSTDMT_getFrameProgression():
145
+ * tells how much data has been consumed (input) and produced (output) for current frame.
146
+ * able to count progression inside worker threads.
137
147
  */
138
148
  ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
139
149
 
@@ -533,9 +533,9 @@ static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
533
533
  }
534
534
  }
535
535
 
536
- size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src,
537
- size_t srcSize, void* workSpace,
538
- size_t wkspSize)
536
+ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
537
+ const void* src, size_t srcSize,
538
+ void* workSpace, size_t wkspSize)
539
539
  {
540
540
  U32 tableLog, maxW, sizeOfSort, nbSymbols;
541
541
  DTableDesc dtd = HUF_getDTableDesc(DTable);
@@ -40,7 +40,6 @@
40
40
  # define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
41
41
  #endif
42
42
 
43
-
44
43
  /*!
45
44
  * NO_FORWARD_PROGRESS_MAX :
46
45
  * maximum allowed nb of calls to ZSTD_decompressStream() and ZSTD_decompress_generic()
@@ -52,11 +51,13 @@
52
51
  # define ZSTD_NO_FORWARD_PROGRESS_MAX 16
53
52
  #endif
54
53
 
54
+
55
55
  /*-*******************************************************
56
56
  * Dependencies
57
57
  *********************************************************/
58
58
  #include <string.h> /* memcpy, memmove, memset */
59
- #include "cpu.h"
59
+ #include "compiler.h" /* prefetch */
60
+ #include "cpu.h" /* bmi2 */
60
61
  #include "mem.h" /* low level memory routines */
61
62
  #define FSE_STATIC_LINKING_ONLY
62
63
  #include "fse.h"
@@ -68,6 +69,9 @@
68
69
  # include "zstd_legacy.h"
69
70
  #endif
70
71
 
72
+ static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
73
+ static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
74
+
71
75
 
72
76
  /*-*************************************
73
77
  * Errors
@@ -110,11 +114,10 @@ typedef struct {
110
114
  #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
111
115
 
112
116
  typedef struct {
113
- ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];
114
- ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];
115
- ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];
117
+ ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
118
+ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
119
+ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
116
120
  HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
117
- U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
118
121
  U32 rep[ZSTD_REP_NUM];
119
122
  } ZSTD_entropyDTables_t;
120
123
 
@@ -125,6 +128,7 @@ struct ZSTD_DCtx_s
125
128
  const ZSTD_seqSymbol* OFTptr;
126
129
  const HUF_DTable* HUFptr;
127
130
  ZSTD_entropyDTables_t entropy;
131
+ U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */
128
132
  const void* previousDstEnd; /* detect continuity */
129
133
  const void* prefixStart; /* start of current segment */
130
134
  const void* virtualStart; /* virtual start of previous segment if it was just before current one */
@@ -138,7 +142,6 @@ struct ZSTD_DCtx_s
138
142
  U32 fseEntropy;
139
143
  XXH64_state_t xxhState;
140
144
  size_t headerSize;
141
- U32 dictID;
142
145
  ZSTD_format_e format;
143
146
  const BYTE* litPtr;
144
147
  ZSTD_customMem customMem;
@@ -147,9 +150,13 @@ struct ZSTD_DCtx_s
147
150
  size_t staticSize;
148
151
  int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
149
152
 
150
- /* streaming */
153
+ /* dictionary */
151
154
  ZSTD_DDict* ddictLocal;
152
- const ZSTD_DDict* ddict;
155
+ const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
156
+ U32 dictID;
157
+ int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
158
+
159
+ /* streaming */
153
160
  ZSTD_dStreamStage streamStage;
154
161
  char* inBuff;
155
162
  size_t inBuffSize;
@@ -185,7 +192,7 @@ size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
185
192
  static size_t ZSTD_startingInputLength(ZSTD_format_e format)
186
193
  {
187
194
  size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
188
- ZSTD_frameHeaderSize_prefix - ZSTD_frameIdSize :
195
+ ZSTD_frameHeaderSize_prefix - ZSTD_FRAMEIDSIZE :
189
196
  ZSTD_frameHeaderSize_prefix;
190
197
  ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
191
198
  /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
@@ -200,6 +207,8 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
200
207
  dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
201
208
  dctx->ddict = NULL;
202
209
  dctx->ddictLocal = NULL;
210
+ dctx->dictEnd = NULL;
211
+ dctx->ddictIsCold = 0;
203
212
  dctx->inBuff = NULL;
204
213
  dctx->inBuffSize = 0;
205
214
  dctx->outBuffSize = 0;
@@ -278,7 +287,7 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
278
287
  * Note 3 : Skippable Frame Identifiers are considered valid. */
279
288
  unsigned ZSTD_isFrame(const void* buffer, size_t size)
280
289
  {
281
- if (size < ZSTD_frameIdSize) return 0;
290
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
282
291
  { U32 const magic = MEM_readLE32(buffer);
283
292
  if (magic == ZSTD_MAGICNUMBER) return 1;
284
293
  if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
@@ -330,7 +339,9 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
330
339
  const BYTE* ip = (const BYTE*)src;
331
340
  size_t const minInputSize = ZSTD_startingInputLength(format);
332
341
 
342
+ memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
333
343
  if (srcSize < minInputSize) return minInputSize;
344
+ if (src==NULL) return ERROR(GENERIC); /* invalid parameter */
334
345
 
335
346
  if ( (format != ZSTD_f_zstd1_magicless)
336
347
  && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
@@ -339,7 +350,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
339
350
  if (srcSize < ZSTD_skippableHeaderSize)
340
351
  return ZSTD_skippableHeaderSize; /* magic number + frame length */
341
352
  memset(zfhPtr, 0, sizeof(*zfhPtr));
342
- zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_frameIdSize);
353
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
343
354
  zfhPtr->frameType = ZSTD_skippableFrame;
344
355
  return 0;
345
356
  }
@@ -451,7 +462,7 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
451
462
  size_t skippableSize;
452
463
  if (srcSize < ZSTD_skippableHeaderSize)
453
464
  return ERROR(srcSize_wrong);
454
- skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_frameIdSize)
465
+ skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_FRAMEIDSIZE)
455
466
  + ZSTD_skippableHeaderSize;
456
467
  if (srcSize < skippableSize) {
457
468
  return ZSTD_CONTENTSIZE_ERROR;
@@ -540,6 +551,7 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
540
551
  static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
541
552
  const void* src, size_t srcSize)
542
553
  {
554
+ if (dst==NULL) return ERROR(dstSize_tooSmall);
543
555
  if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
544
556
  memcpy(dst, src, srcSize);
545
557
  return srcSize;
@@ -556,6 +568,9 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
556
568
  return regenSize;
557
569
  }
558
570
 
571
+ /* Hidden declaration for fullbench */
572
+ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
573
+ const void* src, size_t srcSize);
559
574
  /*! ZSTD_decodeLiteralsBlock() :
560
575
  * @return : nb of bytes read from src (< srcSize )
561
576
  * note : symbol not declared but exposed for fullbench */
@@ -572,6 +587,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
572
587
  case set_repeat:
573
588
  if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
574
589
  /* fall-through */
590
+
575
591
  case set_compressed:
576
592
  if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
577
593
  { size_t lhSize, litSize, litCSize;
@@ -603,15 +619,20 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
603
619
  if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
604
620
  if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
605
621
 
622
+ /* prefetch huffman table if cold */
623
+ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
624
+ PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
625
+ }
626
+
606
627
  if (HUF_isError((litEncType==set_repeat) ?
607
628
  ( singleStream ?
608
629
  HUF_decompress1X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) :
609
630
  HUF_decompress4X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) ) :
610
631
  ( singleStream ?
611
632
  HUF_decompress1X1_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
612
- dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2) :
633
+ dctx->workspace, sizeof(dctx->workspace), dctx->bmi2) :
613
634
  HUF_decompress4X_hufOnly_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
614
- dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2))))
635
+ dctx->workspace, sizeof(dctx->workspace), dctx->bmi2))))
615
636
  return ERROR(corruption_detected);
616
637
 
617
638
  dctx->litPtr = dctx->litBuffer;
@@ -883,7 +904,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
883
904
  symbolEncodingType_e type, U32 max, U32 maxLog,
884
905
  const void* src, size_t srcSize,
885
906
  const U32* baseValue, const U32* nbAdditionalBits,
886
- const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable)
907
+ const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
908
+ int ddictIsCold, int nbSeq)
887
909
  {
888
910
  switch(type)
889
911
  {
@@ -902,6 +924,12 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
902
924
  return 0;
903
925
  case set_repeat:
904
926
  if (!flagRepeatTable) return ERROR(corruption_detected);
927
+ /* prefetch FSE table if used */
928
+ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
929
+ const void* const pStart = *DTablePtr;
930
+ size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
931
+ PREFETCH_AREA(pStart, pSize);
932
+ }
905
933
  return 0;
906
934
  case set_compressed :
907
935
  { U32 tableLog;
@@ -947,6 +975,9 @@ static const U32 ML_base[MaxML+1] = {
947
975
  67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
948
976
  0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
949
977
 
978
+ /* Hidden delcaration for fullbench */
979
+ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
980
+ const void* src, size_t srcSize);
950
981
 
951
982
  size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
952
983
  const void* src, size_t srcSize)
@@ -954,25 +985,25 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
954
985
  const BYTE* const istart = (const BYTE* const)src;
955
986
  const BYTE* const iend = istart + srcSize;
956
987
  const BYTE* ip = istart;
988
+ int nbSeq;
957
989
  DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
958
990
 
959
991
  /* check */
960
992
  if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
961
993
 
962
994
  /* SeqHead */
963
- { int nbSeq = *ip++;
964
- if (!nbSeq) { *nbSeqPtr=0; return 1; }
965
- if (nbSeq > 0x7F) {
966
- if (nbSeq == 0xFF) {
967
- if (ip+2 > iend) return ERROR(srcSize_wrong);
968
- nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
969
- } else {
970
- if (ip >= iend) return ERROR(srcSize_wrong);
971
- nbSeq = ((nbSeq-0x80)<<8) + *ip++;
972
- }
995
+ nbSeq = *ip++;
996
+ if (!nbSeq) { *nbSeqPtr=0; return 1; }
997
+ if (nbSeq > 0x7F) {
998
+ if (nbSeq == 0xFF) {
999
+ if (ip+2 > iend) return ERROR(srcSize_wrong);
1000
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
1001
+ } else {
1002
+ if (ip >= iend) return ERROR(srcSize_wrong);
1003
+ nbSeq = ((nbSeq-0x80)<<8) + *ip++;
973
1004
  }
974
- *nbSeqPtr = nbSeq;
975
1005
  }
1006
+ *nbSeqPtr = nbSeq;
976
1007
 
977
1008
  /* FSE table descriptors */
978
1009
  if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
@@ -986,7 +1017,8 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
986
1017
  LLtype, MaxLL, LLFSELog,
987
1018
  ip, iend-ip,
988
1019
  LL_base, LL_bits,
989
- LL_defaultDTable, dctx->fseEntropy);
1020
+ LL_defaultDTable, dctx->fseEntropy,
1021
+ dctx->ddictIsCold, nbSeq);
990
1022
  if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
991
1023
  ip += llhSize;
992
1024
  }
@@ -995,7 +1027,8 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
995
1027
  OFtype, MaxOff, OffFSELog,
996
1028
  ip, iend-ip,
997
1029
  OF_base, OF_bits,
998
- OF_defaultDTable, dctx->fseEntropy);
1030
+ OF_defaultDTable, dctx->fseEntropy,
1031
+ dctx->ddictIsCold, nbSeq);
999
1032
  if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
1000
1033
  ip += ofhSize;
1001
1034
  }
@@ -1004,12 +1037,23 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
1004
1037
  MLtype, MaxML, MLFSELog,
1005
1038
  ip, iend-ip,
1006
1039
  ML_base, ML_bits,
1007
- ML_defaultDTable, dctx->fseEntropy);
1040
+ ML_defaultDTable, dctx->fseEntropy,
1041
+ dctx->ddictIsCold, nbSeq);
1008
1042
  if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
1009
1043
  ip += mlhSize;
1010
1044
  }
1011
1045
  }
1012
1046
 
1047
+ /* prefetch dictionary content */
1048
+ if (dctx->ddictIsCold) {
1049
+ size_t const dictSize = (const char*)dctx->prefixStart - (const char*)dctx->virtualStart;
1050
+ size_t const psmin = MIN(dictSize, (size_t)(64*nbSeq) /* heuristic */ );
1051
+ size_t const pSize = MIN(psmin, 128 KB /* protection */ );
1052
+ const void* const pStart = (const char*)dctx->dictEnd - pSize;
1053
+ PREFETCH_AREA(pStart, pSize);
1054
+ dctx->ddictIsCold = 0;
1055
+ }
1056
+
1013
1057
  return ip-istart;
1014
1058
  }
1015
1059
 
@@ -1676,7 +1720,8 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
1676
1720
  /* isLongOffset must be true if there are long offsets.
1677
1721
  * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
1678
1722
  * We don't expect that to be the case in 64-bit mode.
1679
- * In block mode, window size is not known, so we have to be conservative. (note: but it could be evaluated from current-lowLimit)
1723
+ * In block mode, window size is not known, so we have to be conservative.
1724
+ * (note: but it could be evaluated from current-lowLimit)
1680
1725
  */
1681
1726
  ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)));
1682
1727
  DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
@@ -1743,10 +1788,10 @@ ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, siz
1743
1788
  }
1744
1789
 
1745
1790
 
1746
- static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
1791
+ static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE value, size_t length)
1747
1792
  {
1748
1793
  if (length > dstCapacity) return ERROR(dstSize_tooSmall);
1749
- memset(dst, byte, length);
1794
+ memset(dst, value, length);
1750
1795
  return length;
1751
1796
  }
1752
1797
 
@@ -1763,7 +1808,7 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
1763
1808
  #endif
1764
1809
  if ( (srcSize >= ZSTD_skippableHeaderSize)
1765
1810
  && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
1766
- return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize);
1811
+ return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_FRAMEIDSIZE);
1767
1812
  } else {
1768
1813
  const BYTE* ip = (const BYTE*)src;
1769
1814
  const BYTE* const ipstart = ip;
@@ -1797,7 +1842,6 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
1797
1842
  if (zfh.checksumFlag) { /* Final frame content checksum */
1798
1843
  if (remainingSize < 4) return ERROR(srcSize_wrong);
1799
1844
  ip += 4;
1800
- remainingSize -= 4;
1801
1845
  }
1802
1846
 
1803
1847
  return ip - ipstart;
@@ -1885,9 +1929,6 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
1885
1929
  return op-ostart;
1886
1930
  }
1887
1931
 
1888
- static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
1889
- static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
1890
-
1891
1932
  static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
1892
1933
  void* dst, size_t dstCapacity,
1893
1934
  const void* src, size_t srcSize,
@@ -1896,6 +1937,8 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
1896
1937
  {
1897
1938
  void* const dststart = dst;
1898
1939
  int moreThan1Frame = 0;
1940
+
1941
+ DEBUGLOG(5, "ZSTD_decompressMultiFrame");
1899
1942
  assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
1900
1943
 
1901
1944
  if (ddict) {
@@ -1932,7 +1975,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
1932
1975
  size_t skippableSize;
1933
1976
  if (srcSize < ZSTD_skippableHeaderSize)
1934
1977
  return ERROR(srcSize_wrong);
1935
- skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize)
1978
+ skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_FRAMEIDSIZE)
1936
1979
  + ZSTD_skippableHeaderSize;
1937
1980
  if (srcSize < skippableSize) return ERROR(srcSize_wrong);
1938
1981
 
@@ -2057,7 +2100,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
2057
2100
  case ZSTDds_getFrameHeaderSize :
2058
2101
  assert(src != NULL);
2059
2102
  if (dctx->format == ZSTD_f_zstd1) { /* allows header */
2060
- assert(srcSize >= ZSTD_frameIdSize); /* to read skippable magic number */
2103
+ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
2061
2104
  if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
2062
2105
  memcpy(dctx->headerBuffer, src, srcSize);
2063
2106
  dctx->expected = ZSTD_skippableHeaderSize - srcSize; /* remaining to load to get full skippable frame header */
@@ -2167,7 +2210,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
2167
2210
  assert(src != NULL);
2168
2211
  assert(srcSize <= ZSTD_skippableHeaderSize);
2169
2212
  memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize); /* complete skippable header */
2170
- dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_frameIdSize); /* note : dctx->expected can grow seriously large, beyond local buffer size */
2213
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
2171
2214
  dctx->stage = ZSTDds_skipFrame;
2172
2215
  return 0;
2173
2216
 
@@ -2191,21 +2234,27 @@ static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dict
2191
2234
  return 0;
2192
2235
  }
2193
2236
 
2194
- /* ZSTD_loadEntropy() :
2195
- * dict : must point at beginning of a valid zstd dictionary
2237
+ /*! ZSTD_loadEntropy() :
2238
+ * dict : must point at beginning of a valid zstd dictionary.
2196
2239
  * @return : size of entropy tables read */
2197
- static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize)
2240
+ static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy,
2241
+ const void* const dict, size_t const dictSize)
2198
2242
  {
2199
2243
  const BYTE* dictPtr = (const BYTE*)dict;
2200
2244
  const BYTE* const dictEnd = dictPtr + dictSize;
2201
2245
 
2202
2246
  if (dictSize <= 8) return ERROR(dictionary_corrupted);
2247
+ assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
2203
2248
  dictPtr += 8; /* skip header = magic + dictID */
2204
2249
 
2205
-
2206
- { size_t const hSize = HUF_readDTableX2_wksp(
2207
- entropy->hufTable, dictPtr, dictEnd - dictPtr,
2208
- entropy->workspace, sizeof(entropy->workspace));
2250
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
2251
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
2252
+ ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
2253
+ { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */
2254
+ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
2255
+ size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
2256
+ dictPtr, dictEnd - dictPtr,
2257
+ workspace, workspaceSize);
2209
2258
  if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
2210
2259
  dictPtr += hSize;
2211
2260
  }
@@ -2216,7 +2265,7 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const
2216
2265
  if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
2217
2266
  if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
2218
2267
  if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
2219
- ZSTD_buildFSETable(entropy->OFTable,
2268
+ ZSTD_buildFSETable( entropy->OFTable,
2220
2269
  offcodeNCount, offcodeMaxValue,
2221
2270
  OF_base, OF_bits,
2222
2271
  offcodeLog);
@@ -2229,7 +2278,7 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const
2229
2278
  if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
2230
2279
  if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
2231
2280
  if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
2232
- ZSTD_buildFSETable(entropy->MLTable,
2281
+ ZSTD_buildFSETable( entropy->MLTable,
2233
2282
  matchlengthNCount, matchlengthMaxValue,
2234
2283
  ML_base, ML_bits,
2235
2284
  matchlengthLog);
@@ -2242,7 +2291,7 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const
2242
2291
  if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
2243
2292
  if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
2244
2293
  if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
2245
- ZSTD_buildFSETable(entropy->LLTable,
2294
+ ZSTD_buildFSETable( entropy->LLTable,
2246
2295
  litlengthNCount, litlengthMaxValue,
2247
2296
  LL_base, LL_bits,
2248
2297
  litlengthLog);
@@ -2268,7 +2317,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
2268
2317
  if (magic != ZSTD_MAGIC_DICTIONARY) {
2269
2318
  return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
2270
2319
  } }
2271
- dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
2320
+ dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
2272
2321
 
2273
2322
  /* load entropy tables */
2274
2323
  { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
@@ -2282,7 +2331,6 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
2282
2331
  return ZSTD_refDictContent(dctx, dict, dictSize);
2283
2332
  }
2284
2333
 
2285
- /* Note : this function cannot fail */
2286
2334
  size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
2287
2335
  {
2288
2336
  assert(dctx != NULL);
@@ -2328,42 +2376,53 @@ struct ZSTD_DDict_s {
2328
2376
 
2329
2377
  static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict)
2330
2378
  {
2379
+ assert(ddict != NULL);
2331
2380
  return ddict->dictContent;
2332
2381
  }
2333
2382
 
2334
2383
  static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
2335
2384
  {
2385
+ assert(ddict != NULL);
2336
2386
  return ddict->dictSize;
2337
2387
  }
2338
2388
 
2339
- size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict)
2389
+ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
2340
2390
  {
2341
- CHECK_F( ZSTD_decompressBegin(dstDCtx) );
2342
- if (ddict) { /* support begin on NULL */
2343
- dstDCtx->dictID = ddict->dictID;
2344
- dstDCtx->prefixStart = ddict->dictContent;
2345
- dstDCtx->virtualStart = ddict->dictContent;
2346
- dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
2347
- dstDCtx->previousDstEnd = dstDCtx->dictEnd;
2391
+ DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
2392
+ assert(dctx != NULL);
2393
+ if (ddict) {
2394
+ dctx->ddictIsCold = (dctx->dictEnd != (const char*)ddict->dictContent + ddict->dictSize);
2395
+ DEBUGLOG(4, "DDict is %s",
2396
+ dctx->ddictIsCold ? "~cold~" : "hot!");
2397
+ }
2398
+ CHECK_F( ZSTD_decompressBegin(dctx) );
2399
+ if (ddict) { /* NULL ddict is equivalent to no dictionary */
2400
+ dctx->dictID = ddict->dictID;
2401
+ dctx->prefixStart = ddict->dictContent;
2402
+ dctx->virtualStart = ddict->dictContent;
2403
+ dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
2404
+ dctx->previousDstEnd = dctx->dictEnd;
2348
2405
  if (ddict->entropyPresent) {
2349
- dstDCtx->litEntropy = 1;
2350
- dstDCtx->fseEntropy = 1;
2351
- dstDCtx->LLTptr = ddict->entropy.LLTable;
2352
- dstDCtx->MLTptr = ddict->entropy.MLTable;
2353
- dstDCtx->OFTptr = ddict->entropy.OFTable;
2354
- dstDCtx->HUFptr = ddict->entropy.hufTable;
2355
- dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
2356
- dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
2357
- dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
2406
+ dctx->litEntropy = 1;
2407
+ dctx->fseEntropy = 1;
2408
+ dctx->LLTptr = ddict->entropy.LLTable;
2409
+ dctx->MLTptr = ddict->entropy.MLTable;
2410
+ dctx->OFTptr = ddict->entropy.OFTable;
2411
+ dctx->HUFptr = ddict->entropy.hufTable;
2412
+ dctx->entropy.rep[0] = ddict->entropy.rep[0];
2413
+ dctx->entropy.rep[1] = ddict->entropy.rep[1];
2414
+ dctx->entropy.rep[2] = ddict->entropy.rep[2];
2358
2415
  } else {
2359
- dstDCtx->litEntropy = 0;
2360
- dstDCtx->fseEntropy = 0;
2416
+ dctx->litEntropy = 0;
2417
+ dctx->fseEntropy = 0;
2361
2418
  }
2362
2419
  }
2363
2420
  return 0;
2364
2421
  }
2365
2422
 
2366
- static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType)
2423
+ static size_t
2424
+ ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict,
2425
+ ZSTD_dictContentType_e dictContentType)
2367
2426
  {
2368
2427
  ddict->dictID = 0;
2369
2428
  ddict->entropyPresent = 0;
@@ -2381,10 +2440,12 @@ static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e
2381
2440
  return 0; /* pure content mode */
2382
2441
  }
2383
2442
  }
2384
- ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_frameIdSize);
2443
+ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
2385
2444
 
2386
2445
  /* load entropy tables */
2387
- CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted );
2446
+ CHECK_E( ZSTD_loadEntropy(&ddict->entropy,
2447
+ ddict->dictContent, ddict->dictSize),
2448
+ dictionary_corrupted );
2388
2449
  ddict->entropyPresent = 1;
2389
2450
  return 0;
2390
2451
  }
@@ -2398,6 +2459,7 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
2398
2459
  if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
2399
2460
  ddict->dictBuffer = NULL;
2400
2461
  ddict->dictContent = dict;
2462
+ if (!dict) dictSize = 0;
2401
2463
  } else {
2402
2464
  void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
2403
2465
  ddict->dictBuffer = internalBuffer;
@@ -2422,14 +2484,15 @@ ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
2422
2484
  if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
2423
2485
 
2424
2486
  { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
2425
- if (!ddict) return NULL;
2487
+ if (ddict == NULL) return NULL;
2426
2488
  ddict->cMem = customMem;
2427
-
2428
- if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType) )) {
2429
- ZSTD_freeDDict(ddict);
2430
- return NULL;
2431
- }
2432
-
2489
+ { size_t const initResult = ZSTD_initDDict_internal(ddict,
2490
+ dict, dictSize,
2491
+ dictLoadMethod, dictContentType);
2492
+ if (ZSTD_isError(initResult)) {
2493
+ ZSTD_freeDDict(ddict);
2494
+ return NULL;
2495
+ } }
2433
2496
  return ddict;
2434
2497
  }
2435
2498
  }
@@ -2456,23 +2519,25 @@ ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize
2456
2519
 
2457
2520
 
2458
2521
  const ZSTD_DDict* ZSTD_initStaticDDict(
2459
- void* workspace, size_t workspaceSize,
2522
+ void* sBuffer, size_t sBufferSize,
2460
2523
  const void* dict, size_t dictSize,
2461
2524
  ZSTD_dictLoadMethod_e dictLoadMethod,
2462
2525
  ZSTD_dictContentType_e dictContentType)
2463
2526
  {
2464
- size_t const neededSpace =
2465
- sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
2466
- ZSTD_DDict* const ddict = (ZSTD_DDict*)workspace;
2467
- assert(workspace != NULL);
2527
+ size_t const neededSpace = sizeof(ZSTD_DDict)
2528
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
2529
+ ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
2530
+ assert(sBuffer != NULL);
2468
2531
  assert(dict != NULL);
2469
- if ((size_t)workspace & 7) return NULL; /* 8-aligned */
2470
- if (workspaceSize < neededSpace) return NULL;
2532
+ if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
2533
+ if (sBufferSize < neededSpace) return NULL;
2471
2534
  if (dictLoadMethod == ZSTD_dlm_byCopy) {
2472
2535
  memcpy(ddict+1, dict, dictSize); /* local copy */
2473
2536
  dict = ddict+1;
2474
2537
  }
2475
- if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) ))
2538
+ if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
2539
+ dict, dictSize,
2540
+ ZSTD_dlm_byRef, dictContentType) ))
2476
2541
  return NULL;
2477
2542
  return ddict;
2478
2543
  }
@@ -2510,7 +2575,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
2510
2575
  {
2511
2576
  if (dictSize < 8) return 0;
2512
2577
  if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
2513
- return MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
2578
+ return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
2514
2579
  }
2515
2580
 
2516
2581
  /*! ZSTD_getDictID_fromDDict() :
@@ -2586,12 +2651,15 @@ size_t ZSTD_freeDStream(ZSTD_DStream* zds)
2586
2651
  }
2587
2652
 
2588
2653
 
2589
- /* *** Initialization *** */
2654
+ /* *** Initialization *** */
2590
2655
 
2591
2656
  size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
2592
2657
  size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
2593
2658
 
2594
- size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
2659
+ size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
2660
+ const void* dict, size_t dictSize,
2661
+ ZSTD_dictLoadMethod_e dictLoadMethod,
2662
+ ZSTD_dictContentType_e dictContentType)
2595
2663
  {
2596
2664
  if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2597
2665
  ZSTD_freeDDict(dctx->ddictLocal);
@@ -2645,13 +2713,6 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds)
2645
2713
  return ZSTD_initDStream_usingDict(zds, NULL, 0);
2646
2714
  }
2647
2715
 
2648
- size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
2649
- {
2650
- if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2651
- dctx->ddict = ddict;
2652
- return 0;
2653
- }
2654
-
2655
2716
  /* ZSTD_initDStream_usingDDict() :
2656
2717
  * ddict will just be referenced, and must outlive decompression session
2657
2718
  * this function cannot fail */
@@ -2690,6 +2751,13 @@ size_t ZSTD_setDStreamParameter(ZSTD_DStream* dctx,
2690
2751
  return 0;
2691
2752
  }
2692
2753
 
2754
+ size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
2755
+ {
2756
+ if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2757
+ dctx->ddict = ddict;
2758
+ return 0;
2759
+ }
2760
+
2693
2761
  size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
2694
2762
  {
2695
2763
  if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
@@ -2855,7 +2923,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
2855
2923
  CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
2856
2924
 
2857
2925
  if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
2858
- zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_frameIdSize);
2926
+ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
2859
2927
  zds->stage = ZSTDds_skipFrame;
2860
2928
  } else {
2861
2929
  CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));