zstd-ruby 1.3.0.0 → 1.3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +1 -1
  3. data/ext/zstdruby/libzstd/common/bitstream.h +40 -41
  4. data/ext/zstdruby/libzstd/common/compiler.h +85 -0
  5. data/ext/zstdruby/libzstd/common/error_private.c +8 -10
  6. data/ext/zstdruby/libzstd/common/error_private.h +4 -4
  7. data/ext/zstdruby/libzstd/common/fse.h +11 -5
  8. data/ext/zstdruby/libzstd/common/fse_decompress.c +3 -22
  9. data/ext/zstdruby/libzstd/common/huf.h +5 -6
  10. data/ext/zstdruby/libzstd/common/mem.h +6 -6
  11. data/ext/zstdruby/libzstd/common/pool.c +61 -27
  12. data/ext/zstdruby/libzstd/common/pool.h +10 -10
  13. data/ext/zstdruby/libzstd/common/threading.h +5 -6
  14. data/ext/zstdruby/libzstd/common/xxhash.c +28 -22
  15. data/ext/zstdruby/libzstd/common/zstd_common.c +4 -4
  16. data/ext/zstdruby/libzstd/common/zstd_errors.h +30 -32
  17. data/ext/zstdruby/libzstd/common/zstd_internal.h +57 -56
  18. data/ext/zstdruby/libzstd/compress/fse_compress.c +4 -22
  19. data/ext/zstdruby/libzstd/compress/huf_compress.c +4 -3
  20. data/ext/zstdruby/libzstd/compress/zstd_compress.c +314 -304
  21. data/ext/zstdruby/libzstd/compress/zstd_opt.h +118 -116
  22. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +223 -156
  23. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +10 -9
  24. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +8 -24
  25. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +95 -115
  26. data/ext/zstdruby/libzstd/deprecated/zbuff.h +4 -4
  27. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +4 -5
  28. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +4 -4
  29. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +4 -4
  30. data/ext/zstdruby/libzstd/dictBuilder/cover.c +7 -9
  31. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +5 -5
  32. data/ext/zstdruby/libzstd/dictBuilder/zdict.h +4 -4
  33. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +8 -4
  34. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +4 -4
  35. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +4 -4
  36. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +4 -4
  37. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +4 -4
  38. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +4 -4
  39. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +4 -4
  40. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +5 -5
  41. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +4 -4
  42. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +5 -5
  43. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +4 -4
  44. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +5 -5
  45. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +4 -4
  46. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +4 -4
  47. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +4 -4
  48. data/ext/zstdruby/libzstd/zstd.h +16 -14
  49. data/lib/zstd-ruby/version.rb +1 -1
  50. metadata +3 -2
@@ -1,55 +1,28 @@
1
- /**
1
+ /*
2
2
  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
- * This source code is licensed under the BSD-style license found in the
6
- * LICENSE file in the root directory of this source tree. An additional grant
7
- * of patent rights can be found in the PATENTS file in the same directory.
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
8
  */
9
9
 
10
10
  #ifndef ZSTD_CCOMMON_H_MODULE
11
11
  #define ZSTD_CCOMMON_H_MODULE
12
12
 
13
- /*-*******************************************************
14
- * Compiler specifics
15
- *********************************************************/
16
- #ifdef _MSC_VER /* Visual Studio */
17
- # define FORCE_INLINE static __forceinline
18
- # include <intrin.h> /* For Visual 2005 */
19
- # pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
20
- # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
21
- # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
22
- # pragma warning(disable : 4324) /* disable: C4324: padded structure */
23
- #else
24
- # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
25
- # ifdef __GNUC__
26
- # define FORCE_INLINE static inline __attribute__((always_inline))
27
- # else
28
- # define FORCE_INLINE static inline
29
- # endif
30
- # else
31
- # define FORCE_INLINE static
32
- # endif /* __STDC_VERSION__ */
33
- #endif
34
-
35
- #ifdef _MSC_VER
36
- # define FORCE_NOINLINE static __declspec(noinline)
37
- #else
38
- # ifdef __GNUC__
39
- # define FORCE_NOINLINE static __attribute__((__noinline__))
40
- # else
41
- # define FORCE_NOINLINE static
42
- # endif
43
- #endif
44
-
45
13
 
46
14
  /*-*************************************
47
15
  * Dependencies
48
16
  ***************************************/
17
+ #include "compiler.h"
49
18
  #include "mem.h"
50
19
  #include "error_private.h"
51
20
  #define ZSTD_STATIC_LINKING_ONLY
52
21
  #include "zstd.h"
22
+ #define FSE_STATIC_LINKING_ONLY
23
+ #include "fse.h"
24
+ #define HUF_STATIC_LINKING_ONLY
25
+ #include "huf.h"
53
26
  #ifndef XXH_STATIC_LINKING_ONLY
54
27
  # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
55
28
  #endif
@@ -211,20 +184,6 @@ MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* s
211
184
  *********************************************/
212
185
  typedef struct ZSTD_stats_s ZSTD_stats_t;
213
186
 
214
- typedef struct {
215
- U32 off;
216
- U32 len;
217
- } ZSTD_match_t;
218
-
219
- typedef struct {
220
- U32 price;
221
- U32 off;
222
- U32 mlen;
223
- U32 litlen;
224
- U32 rep[ZSTD_REP_NUM];
225
- } ZSTD_optimal_t;
226
-
227
-
228
187
  typedef struct seqDef_s {
229
188
  U32 offset;
230
189
  U16 litLength;
@@ -242,13 +201,31 @@ typedef struct {
242
201
  BYTE* ofCode;
243
202
  U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
244
203
  U32 longLengthPos;
245
- /* opt */
246
- ZSTD_optimal_t* priceTable;
247
- ZSTD_match_t* matchTable;
248
- U32* matchLengthFreq;
249
- U32* litLengthFreq;
204
+ U32 rep[ZSTD_REP_NUM];
205
+ U32 repToConfirm[ZSTD_REP_NUM];
206
+ } seqStore_t;
207
+
208
+ typedef struct {
209
+ U32 off;
210
+ U32 len;
211
+ } ZSTD_match_t;
212
+
213
+ typedef struct {
214
+ U32 price;
215
+ U32 off;
216
+ U32 mlen;
217
+ U32 litlen;
218
+ U32 rep[ZSTD_REP_NUM];
219
+ } ZSTD_optimal_t;
220
+
221
+ typedef struct {
250
222
  U32* litFreq;
223
+ U32* litLengthFreq;
224
+ U32* matchLengthFreq;
251
225
  U32* offCodeFreq;
226
+ ZSTD_match_t* matchTable;
227
+ ZSTD_optimal_t* priceTable;
228
+
252
229
  U32 matchLengthSum;
253
230
  U32 matchSum;
254
231
  U32 litLengthSum;
@@ -264,7 +241,19 @@ typedef struct {
264
241
  U32 cachedPrice;
265
242
  U32 cachedLitLength;
266
243
  const BYTE* cachedLiterals;
267
- } seqStore_t;
244
+ } optState_t;
245
+
246
+ typedef struct {
247
+ U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
248
+ FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
249
+ FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
250
+ FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
251
+ U32 workspace[HUF_WORKSPACE_SIZE_U32];
252
+ HUF_repeat hufCTable_repeatMode;
253
+ FSE_repeat offcode_repeatMode;
254
+ FSE_repeat matchlength_repeatMode;
255
+ FSE_repeat litlength_repeatMode;
256
+ } ZSTD_entropyCTables_t;
268
257
 
269
258
  const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);
270
259
  void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);
@@ -331,4 +320,16 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
331
320
  ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict);
332
321
 
333
322
 
323
+ typedef struct {
324
+ blockType_e blockType;
325
+ U32 lastBlock;
326
+ U32 origSize;
327
+ } blockProperties_t;
328
+
329
+ /*! ZSTD_getcBlockSize() :
330
+ * Provides the size of compressed block from block header `src` */
331
+ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
332
+ blockProperties_t* bpPtr);
333
+
334
+
334
335
  #endif /* ZSTD_CCOMMON_H_MODULE */
@@ -32,27 +32,6 @@
32
32
  - Public forum : https://groups.google.com/forum/#!forum/lz4c
33
33
  ****************************************************************** */
34
34
 
35
- /* **************************************************************
36
- * Compiler specifics
37
- ****************************************************************/
38
- #ifdef _MSC_VER /* Visual Studio */
39
- # define FORCE_INLINE static __forceinline
40
- # include <intrin.h> /* For Visual 2005 */
41
- # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
42
- # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
43
- #else
44
- # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
45
- # ifdef __GNUC__
46
- # define FORCE_INLINE static inline __attribute__((always_inline))
47
- # else
48
- # define FORCE_INLINE static inline
49
- # endif
50
- # else
51
- # define FORCE_INLINE static
52
- # endif /* __STDC_VERSION__ */
53
- #endif
54
-
55
-
56
35
  /* **************************************************************
57
36
  * Includes
58
37
  ****************************************************************/
@@ -60,13 +39,16 @@
60
39
  #include <string.h> /* memcpy, memset */
61
40
  #include <stdio.h> /* printf (debug) */
62
41
  #include "bitstream.h"
42
+ #include "compiler.h"
63
43
  #define FSE_STATIC_LINKING_ONLY
64
44
  #include "fse.h"
45
+ #include "error_private.h"
65
46
 
66
47
 
67
48
  /* **************************************************************
68
49
  * Error Management
69
50
  ****************************************************************/
51
+ #define FSE_isError ERR_isError
70
52
  #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
71
53
 
72
54
 
@@ -781,7 +763,7 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
781
763
 
782
764
  size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
783
765
 
784
- #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
766
+ #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
785
767
  #define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
786
768
 
787
769
  /* FSE_compress_wksp() :
@@ -50,13 +50,15 @@
50
50
  #include "fse.h" /* header compression */
51
51
  #define HUF_STATIC_LINKING_ONLY
52
52
  #include "huf.h"
53
+ #include "error_private.h"
53
54
 
54
55
 
55
56
  /* **************************************************************
56
57
  * Error Management
57
58
  ****************************************************************/
59
+ #define HUF_isError ERR_isError
58
60
  #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
59
- #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
61
+ #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
60
62
  #define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
61
63
 
62
64
 
@@ -436,7 +438,7 @@ static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt*
436
438
 
437
439
  size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
438
440
 
439
- #define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
441
+ #define HUF_FLUSHBITS(s) BIT_flushBits(s)
440
442
 
441
443
  #define HUF_FLUSHBITS_1(stream) \
442
444
  if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
@@ -451,7 +453,6 @@ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, si
451
453
  BYTE* const oend = ostart + dstSize;
452
454
  BYTE* op = ostart;
453
455
  size_t n;
454
- const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
455
456
  BIT_CStream_t bitC;
456
457
 
457
458
  /* init */
@@ -1,10 +1,10 @@
1
- /**
1
+ /*
2
2
  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
- * This source code is licensed under the BSD-style license found in the
6
- * LICENSE file in the root directory of this source tree. An additional grant
7
- * of patent rights can be found in the PATENTS file in the same directory.
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
8
  */
9
9
 
10
10
 
@@ -36,13 +36,6 @@ static const U32 g_searchStrength = 8; /* control skip over incompressible dat
36
36
  #define HASH_READ_SIZE 8
37
37
  typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
38
38
 
39
- /* entropy tables always have same size */
40
- static size_t const hufCTable_size = HUF_CTABLE_SIZE(255);
41
- static size_t const litlengthCTable_size = FSE_CTABLE_SIZE(LLFSELog, MaxLL);
42
- static size_t const offcodeCTable_size = FSE_CTABLE_SIZE(OffFSELog, MaxOff);
43
- static size_t const matchlengthCTable_size = FSE_CTABLE_SIZE(MLFSELog, MaxML);
44
- static size_t const entropyScratchSpace_size = HUF_WORKSPACE_SIZE;
45
-
46
39
 
47
40
  /*-*************************************
48
41
  * Helper functions
@@ -89,8 +82,6 @@ struct ZSTD_CCtx_s {
89
82
  U32 loadedDictEnd; /* index of end of dictionary */
90
83
  U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */
91
84
  ZSTD_compressionStage_e stage;
92
- U32 rep[ZSTD_REP_NUM];
93
- U32 repToConfirm[ZSTD_REP_NUM];
94
85
  U32 dictID;
95
86
  int compressionLevel;
96
87
  ZSTD_parameters requestedParams;
@@ -105,16 +96,11 @@ struct ZSTD_CCtx_s {
105
96
  size_t staticSize;
106
97
 
107
98
  seqStore_t seqStore; /* sequences storage ptrs */
99
+ optState_t optState;
108
100
  U32* hashTable;
109
101
  U32* hashTable3;
110
102
  U32* chainTable;
111
- HUF_repeat hufCTable_repeatMode;
112
- HUF_CElt* hufCTable;
113
- U32 fseCTables_ready;
114
- FSE_CTable* offcodeCTable;
115
- FSE_CTable* matchlengthCTable;
116
- FSE_CTable* litlengthCTable;
117
- unsigned* entropyScratchSpace;
103
+ ZSTD_entropyCTables_t* entropy;
118
104
 
119
105
  /* streaming */
120
106
  char* inBuff;
@@ -174,19 +160,9 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
174
160
  cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
175
161
 
176
162
  /* entropy space (never moves) */
177
- /* note : this code should be shared with resetCCtx, rather than copy/pasted */
178
- { void* ptr = cctx->workSpace;
179
- cctx->hufCTable = (HUF_CElt*)ptr;
180
- ptr = (char*)cctx->hufCTable + hufCTable_size;
181
- cctx->offcodeCTable = (FSE_CTable*) ptr;
182
- ptr = (char*)ptr + offcodeCTable_size;
183
- cctx->matchlengthCTable = (FSE_CTable*) ptr;
184
- ptr = (char*)ptr + matchlengthCTable_size;
185
- cctx->litlengthCTable = (FSE_CTable*) ptr;
186
- ptr = (char*)ptr + litlengthCTable_size;
187
- assert(((size_t)ptr & 3) == 0); /* ensure correct alignment */
188
- cctx->entropyScratchSpace = (unsigned*) ptr;
189
- }
163
+ if (cctx->workSpaceSize < sizeof(ZSTD_entropyCTables_t)) return NULL;
164
+ assert(((size_t)cctx->workSpace & 7) == 0); /* ensure correct alignment */
165
+ cctx->entropy = (ZSTD_entropyCTables_t*)cctx->workSpace;
190
166
 
191
167
  return cctx;
192
168
  }
@@ -237,7 +213,7 @@ size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned
237
213
  ZSTD_STATIC_ASSERT(ZSTD_dm_auto==0);
238
214
  ZSTD_STATIC_ASSERT(ZSTD_dm_rawContent==1);
239
215
  case ZSTD_p_forceRawDict : cctx->dictMode = (ZSTD_dictMode_e)(value>0); return 0;
240
- default: return ERROR(parameter_unknown);
216
+ default: return ERROR(parameter_unsupported);
241
217
  }
242
218
  }
243
219
 
@@ -251,9 +227,9 @@ static void ZSTD_cLevelToCParams(ZSTD_CCtx* cctx)
251
227
  cctx->compressionLevel = ZSTD_CLEVEL_CUSTOM;
252
228
  }
253
229
 
254
- #define CLAMPCHECK(val,min,max) { \
255
- if (((val)<(min)) | ((val)>(max))) { \
256
- return ERROR(compressionParameter_outOfBound); \
230
+ #define CLAMPCHECK(val,min,max) { \
231
+ if (((val)<(min)) | ((val)>(max))) { \
232
+ return ERROR(parameter_outOfBound); \
257
233
  } }
258
234
 
259
235
  size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
@@ -349,7 +325,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned v
349
325
  /* restrict dictionary mode, to "rawContent" or "fullDict" only */
350
326
  ZSTD_STATIC_ASSERT((U32)ZSTD_dm_fullDict > (U32)ZSTD_dm_rawContent);
351
327
  if (value > (unsigned)ZSTD_dm_fullDict)
352
- return ERROR(compressionParameter_outOfBound);
328
+ return ERROR(parameter_outOfBound);
353
329
  cctx->dictMode = (ZSTD_dictMode_e)value;
354
330
  return 0;
355
331
 
@@ -370,31 +346,31 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned v
370
346
  if (value==0) return 0;
371
347
  DEBUGLOG(5, " setting nbThreads : %u", value);
372
348
  #ifndef ZSTD_MULTITHREAD
373
- if (value > 1) return ERROR(compressionParameter_unsupported);
349
+ if (value > 1) return ERROR(parameter_unsupported);
374
350
  #endif
375
351
  if ((value>1) && (cctx->nbThreads != value)) {
376
352
  if (cctx->staticSize) /* MT not compatible with static alloc */
377
- return ERROR(compressionParameter_unsupported);
353
+ return ERROR(parameter_unsupported);
378
354
  ZSTDMT_freeCCtx(cctx->mtctx);
379
355
  cctx->nbThreads = 1;
380
- cctx->mtctx = ZSTDMT_createCCtx(value);
356
+ cctx->mtctx = ZSTDMT_createCCtx_advanced(value, cctx->customMem);
381
357
  if (cctx->mtctx == NULL) return ERROR(memory_allocation);
382
358
  }
383
359
  cctx->nbThreads = value;
384
360
  return 0;
385
361
 
386
362
  case ZSTD_p_jobSize:
387
- if (cctx->nbThreads <= 1) return ERROR(compressionParameter_unsupported);
363
+ if (cctx->nbThreads <= 1) return ERROR(parameter_unsupported);
388
364
  assert(cctx->mtctx != NULL);
389
365
  return ZSTDMT_setMTCtxParameter(cctx->mtctx, ZSTDMT_p_sectionSize, value);
390
366
 
391
367
  case ZSTD_p_overlapSizeLog:
392
368
  DEBUGLOG(5, " setting overlap with nbThreads == %u", cctx->nbThreads);
393
- if (cctx->nbThreads <= 1) return ERROR(compressionParameter_unsupported);
369
+ if (cctx->nbThreads <= 1) return ERROR(parameter_unsupported);
394
370
  assert(cctx->mtctx != NULL);
395
371
  return ZSTDMT_setMTCtxParameter(cctx->mtctx, ZSTDMT_p_overlapSectionLog, value);
396
372
 
397
- default: return ERROR(parameter_unknown);
373
+ default: return ERROR(parameter_unsupported);
398
374
  }
399
375
  }
400
376
 
@@ -474,7 +450,8 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
474
450
  CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
475
451
  CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
476
452
  CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
477
- if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) return ERROR(compressionParameter_unsupported);
453
+ if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
454
+ return ERROR(parameter_unsupported);
478
455
  return 0;
479
456
  }
480
457
 
@@ -551,9 +528,7 @@ size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams)
551
528
  size_t const hSize = ((size_t)1) << cParams.hashLog;
552
529
  U32 const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
553
530
  size_t const h3Size = ((size_t)1) << hashLog3;
554
- size_t const entropySpace = hufCTable_size + litlengthCTable_size
555
- + offcodeCTable_size + matchlengthCTable_size
556
- + entropyScratchSpace_size;
531
+ size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
557
532
  size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
558
533
 
559
534
  size_t const optBudget = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
@@ -620,8 +595,8 @@ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 ple
620
595
  cctx->stage = ZSTDcs_init;
621
596
  cctx->dictID = 0;
622
597
  cctx->loadedDictEnd = 0;
623
- { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = repStartValue[i]; }
624
- cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
598
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->seqStore.rep[i] = repStartValue[i]; }
599
+ cctx->optState.litLengthSum = 0; /* force reset of btopt stats */
625
600
  XXH64_reset(&cctx->xxhState, 0);
626
601
  return 0;
627
602
  }
@@ -641,8 +616,10 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
641
616
  if (crp == ZSTDcrp_continue) {
642
617
  if (ZSTD_equivalentParams(params.cParams, zc->appliedParams.cParams)) {
643
618
  DEBUGLOG(5, "ZSTD_equivalentParams()==1");
644
- zc->fseCTables_ready = 0;
645
- zc->hufCTable_repeatMode = HUF_repeat_none;
619
+ zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
620
+ zc->entropy->offcode_repeatMode = FSE_repeat_none;
621
+ zc->entropy->matchlength_repeatMode = FSE_repeat_none;
622
+ zc->entropy->litlength_repeatMode = FSE_repeat_none;
646
623
  return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
647
624
  } }
648
625
 
@@ -662,9 +639,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
662
639
  void* ptr;
663
640
 
664
641
  /* Check if workSpace is large enough, alloc a new one if needed */
665
- { size_t const entropySpace = hufCTable_size + litlengthCTable_size
666
- + offcodeCTable_size + matchlengthCTable_size
667
- + entropyScratchSpace_size;
642
+ { size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
668
643
  size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
669
644
  + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
670
645
  size_t const optSpace = ( (params.cParams.strategy == ZSTD_btopt)
@@ -689,16 +664,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
689
664
  ptr = zc->workSpace;
690
665
 
691
666
  /* entropy space */
692
- zc->hufCTable = (HUF_CElt*)ptr;
693
- ptr = (char*)zc->hufCTable + hufCTable_size; /* note : HUF_CElt* is incomplete type, size is estimated via macro */
694
- zc->offcodeCTable = (FSE_CTable*) ptr;
695
- ptr = (char*)ptr + offcodeCTable_size;
696
- zc->matchlengthCTable = (FSE_CTable*) ptr;
697
- ptr = (char*)ptr + matchlengthCTable_size;
698
- zc->litlengthCTable = (FSE_CTable*) ptr;
699
- ptr = (char*)ptr + litlengthCTable_size;
700
- assert(((size_t)ptr & 3) == 0); /* ensure correct alignment */
701
- zc->entropyScratchSpace = (unsigned*) ptr;
667
+ assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */
668
+ assert(zc->workSpaceSize >= sizeof(ZSTD_entropyCTables_t));
669
+ zc->entropy = (ZSTD_entropyCTables_t*)zc->workSpace;
702
670
  } }
703
671
 
704
672
  /* init params */
@@ -715,39 +683,35 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
715
683
  zc->stage = ZSTDcs_init;
716
684
  zc->dictID = 0;
717
685
  zc->loadedDictEnd = 0;
718
- zc->fseCTables_ready = 0;
719
- zc->hufCTable_repeatMode = HUF_repeat_none;
686
+ zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
687
+ zc->entropy->offcode_repeatMode = FSE_repeat_none;
688
+ zc->entropy->matchlength_repeatMode = FSE_repeat_none;
689
+ zc->entropy->litlength_repeatMode = FSE_repeat_none;
720
690
  zc->nextToUpdate = 1;
721
691
  zc->nextSrc = NULL;
722
692
  zc->base = NULL;
723
693
  zc->dictBase = NULL;
724
694
  zc->dictLimit = 0;
725
695
  zc->lowLimit = 0;
726
- { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = repStartValue[i]; }
696
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->seqStore.rep[i] = repStartValue[i]; }
727
697
  zc->hashLog3 = hashLog3;
728
- zc->seqStore.litLengthSum = 0;
698
+ zc->optState.litLengthSum = 0;
729
699
 
730
- /* ensure entropy tables are close together at the beginning */
731
- assert((void*)zc->hufCTable == zc->workSpace);
732
- assert((char*)zc->offcodeCTable == (char*)zc->hufCTable + hufCTable_size);
733
- assert((char*)zc->matchlengthCTable == (char*)zc->offcodeCTable + offcodeCTable_size);
734
- assert((char*)zc->litlengthCTable == (char*)zc->matchlengthCTable + matchlengthCTable_size);
735
- assert((char*)zc->entropyScratchSpace == (char*)zc->litlengthCTable + litlengthCTable_size);
736
- ptr = (char*)zc->entropyScratchSpace + entropyScratchSpace_size;
700
+ ptr = zc->entropy + 1;
737
701
 
738
702
  /* opt parser space */
739
703
  if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btultra)) {
740
704
  DEBUGLOG(5, "reserving optimal parser space");
741
705
  assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
742
- zc->seqStore.litFreq = (U32*)ptr;
743
- zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<<Litbits);
744
- zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1);
745
- zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1);
746
- ptr = zc->seqStore.offCodeFreq + (MaxOff+1);
747
- zc->seqStore.matchTable = (ZSTD_match_t*)ptr;
748
- ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1;
749
- zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr;
750
- ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1;
706
+ zc->optState.litFreq = (U32*)ptr;
707
+ zc->optState.litLengthFreq = zc->optState.litFreq + (1<<Litbits);
708
+ zc->optState.matchLengthFreq = zc->optState.litLengthFreq + (MaxLL+1);
709
+ zc->optState.offCodeFreq = zc->optState.matchLengthFreq + (MaxML+1);
710
+ ptr = zc->optState.offCodeFreq + (MaxOff+1);
711
+ zc->optState.matchTable = (ZSTD_match_t*)ptr;
712
+ ptr = zc->optState.matchTable + ZSTD_OPT_NUM+1;
713
+ zc->optState.priceTable = (ZSTD_optimal_t*)ptr;
714
+ ptr = zc->optState.priceTable + ZSTD_OPT_NUM+1;
751
715
  }
752
716
 
753
717
  /* table Space */
@@ -783,7 +747,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
783
747
  * do not use with extDict variant ! */
784
748
  void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
785
749
  int i;
786
- for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = 0;
750
+ for (i=0; i<ZSTD_REP_NUM; i++) cctx->seqStore.rep[i] = 0;
787
751
  }
788
752
 
789
753
 
@@ -830,16 +794,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
830
794
  dstCCtx->dictID = srcCCtx->dictID;
831
795
 
832
796
  /* copy entropy tables */
833
- dstCCtx->fseCTables_ready = srcCCtx->fseCTables_ready;
834
- if (srcCCtx->fseCTables_ready) {
835
- memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, litlengthCTable_size);
836
- memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, matchlengthCTable_size);
837
- memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, offcodeCTable_size);
838
- }
839
- dstCCtx->hufCTable_repeatMode = srcCCtx->hufCTable_repeatMode;
840
- if (srcCCtx->hufCTable_repeatMode) {
841
- memcpy(dstCCtx->hufCTable, srcCCtx->hufCTable, hufCTable_size);
842
- }
797
+ memcpy(dstCCtx->entropy, srcCCtx->entropy, sizeof(ZSTD_entropyCTables_t));
843
798
 
844
799
  return 0;
845
800
  }
@@ -956,7 +911,8 @@ static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, cons
956
911
 
957
912
  static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
958
913
 
959
- static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
914
+ static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t * entropy,
915
+ ZSTD_strategy strategy,
960
916
  void* dst, size_t dstCapacity,
961
917
  const void* src, size_t srcSize)
962
918
  {
@@ -970,28 +926,28 @@ static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
970
926
 
971
927
  /* small ? don't even attempt compression (speed opt) */
972
928
  # define LITERAL_NOENTROPY 63
973
- { size_t const minLitSize = zc->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
929
+ { size_t const minLitSize = entropy->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
974
930
  if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
975
931
  }
976
932
 
977
933
  if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
978
- { HUF_repeat repeat = zc->hufCTable_repeatMode;
979
- int const preferRepeat = zc->appliedParams.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
934
+ { HUF_repeat repeat = entropy->hufCTable_repeatMode;
935
+ int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
980
936
  if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
981
937
  cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
982
- zc->entropyScratchSpace, entropyScratchSpace_size, zc->hufCTable, &repeat, preferRepeat)
938
+ entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat)
983
939
  : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
984
- zc->entropyScratchSpace, entropyScratchSpace_size, zc->hufCTable, &repeat, preferRepeat);
940
+ entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat);
985
941
  if (repeat != HUF_repeat_none) { hType = set_repeat; } /* reused the existing table */
986
- else { zc->hufCTable_repeatMode = HUF_repeat_check; } /* now have a table to reuse */
942
+ else { entropy->hufCTable_repeatMode = HUF_repeat_check; } /* now have a table to reuse */
987
943
  }
988
944
 
989
- if ((cLitSize==0) | (cLitSize >= srcSize - minGain)) {
990
- zc->hufCTable_repeatMode = HUF_repeat_none;
945
+ if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
946
+ entropy->hufCTable_repeatMode = HUF_repeat_none;
991
947
  return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
992
948
  }
993
949
  if (cLitSize==1) {
994
- zc->hufCTable_repeatMode = HUF_repeat_none;
950
+ entropy->hufCTable_repeatMode = HUF_repeat_none;
995
951
  return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
996
952
  }
997
953
 
@@ -1062,17 +1018,154 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
1062
1018
  mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
1063
1019
  }
1064
1020
 
1065
- MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
1066
- void* dst, size_t dstCapacity,
1067
- size_t srcSize)
1021
+ MEM_STATIC symbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMode,
1022
+ size_t const mostFrequent, size_t nbSeq, U32 defaultNormLog)
1023
+ {
1024
+ #define MIN_SEQ_FOR_DYNAMIC_FSE 64
1025
+ #define MAX_SEQ_FOR_STATIC_FSE 1000
1026
+
1027
+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
1028
+ *repeatMode = FSE_repeat_check;
1029
+ return set_rle;
1030
+ }
1031
+ if ((*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
1032
+ return set_repeat;
1033
+ }
1034
+ if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) {
1035
+ *repeatMode = FSE_repeat_valid;
1036
+ return set_basic;
1037
+ }
1038
+ *repeatMode = FSE_repeat_check;
1039
+ return set_compressed;
1040
+ }
1041
+
1042
+ MEM_STATIC size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
1043
+ FSE_CTable* CTable, U32 FSELog, symbolEncodingType_e type,
1044
+ U32* count, U32 max,
1045
+ BYTE const* codeTable, size_t nbSeq,
1046
+ S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
1047
+ void* workspace, size_t workspaceSize)
1048
+ {
1049
+ BYTE* op = (BYTE*)dst;
1050
+ BYTE const* const oend = op + dstCapacity;
1051
+
1052
+ switch (type) {
1053
+ case set_rle:
1054
+ *op = codeTable[0];
1055
+ CHECK_F(FSE_buildCTable_rle(CTable, (BYTE)max));
1056
+ return 1;
1057
+ case set_repeat:
1058
+ return 0;
1059
+ case set_basic:
1060
+ CHECK_F(FSE_buildCTable_wksp(CTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));
1061
+ return 0;
1062
+ case set_compressed: {
1063
+ S16 norm[MaxSeq + 1];
1064
+ size_t nbSeq_1 = nbSeq;
1065
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
1066
+ if (count[codeTable[nbSeq-1]] > 1) {
1067
+ count[codeTable[nbSeq-1]]--;
1068
+ nbSeq_1--;
1069
+ }
1070
+ CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
1071
+ { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
1072
+ if (FSE_isError(NCountSize)) return NCountSize;
1073
+ CHECK_F(FSE_buildCTable_wksp(CTable, norm, max, tableLog, workspace, workspaceSize));
1074
+ return NCountSize;
1075
+ }
1076
+ }
1077
+ default: return assert(0), ERROR(GENERIC);
1078
+ }
1079
+ }
1080
+
1081
+ MEM_STATIC size_t ZSTD_encodeSequences(void* dst, size_t dstCapacity,
1082
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
1083
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
1084
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
1085
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
1086
+ {
1087
+ BIT_CStream_t blockStream;
1088
+ FSE_CState_t stateMatchLength;
1089
+ FSE_CState_t stateOffsetBits;
1090
+ FSE_CState_t stateLitLength;
1091
+
1092
+ CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
1093
+
1094
+ /* first symbols */
1095
+ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
1096
+ FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
1097
+ FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
1098
+ BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
1099
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
1100
+ BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
1101
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
1102
+ if (longOffsets) {
1103
+ U32 const ofBits = ofCodeTable[nbSeq-1];
1104
+ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1105
+ if (extraBits) {
1106
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
1107
+ BIT_flushBits(&blockStream);
1108
+ }
1109
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
1110
+ ofBits - extraBits);
1111
+ } else {
1112
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
1113
+ }
1114
+ BIT_flushBits(&blockStream);
1115
+
1116
+ { size_t n;
1117
+ for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
1118
+ BYTE const llCode = llCodeTable[n];
1119
+ BYTE const ofCode = ofCodeTable[n];
1120
+ BYTE const mlCode = mlCodeTable[n];
1121
+ U32 const llBits = LL_bits[llCode];
1122
+ U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
1123
+ U32 const mlBits = ML_bits[mlCode];
1124
+ /* (7)*/ /* (7)*/
1125
+ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
1126
+ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
1127
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
1128
+ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
1129
+ if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
1130
+ BIT_flushBits(&blockStream); /* (7)*/
1131
+ BIT_addBits(&blockStream, sequences[n].litLength, llBits);
1132
+ if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
1133
+ BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
1134
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
1135
+ if (longOffsets) {
1136
+ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1137
+ if (extraBits) {
1138
+ BIT_addBits(&blockStream, sequences[n].offset, extraBits);
1139
+ BIT_flushBits(&blockStream); /* (7)*/
1140
+ }
1141
+ BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
1142
+ ofBits - extraBits); /* 31 */
1143
+ } else {
1144
+ BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
1145
+ }
1146
+ BIT_flushBits(&blockStream); /* (7)*/
1147
+ } }
1148
+
1149
+ FSE_flushCState(&blockStream, &stateMatchLength);
1150
+ FSE_flushCState(&blockStream, &stateOffsetBits);
1151
+ FSE_flushCState(&blockStream, &stateLitLength);
1152
+
1153
+ { size_t const streamSize = BIT_closeCStream(&blockStream);
1154
+ if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
1155
+ return streamSize;
1156
+ }
1157
+ }
1158
+
1159
+ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
1160
+ ZSTD_entropyCTables_t* entropy,
1161
+ ZSTD_compressionParameters const* cParams,
1162
+ void* dst, size_t dstCapacity)
1068
1163
  {
1069
- const int longOffsets = zc->appliedParams.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
1070
- const seqStore_t* seqStorePtr = &(zc->seqStore);
1164
+ const int longOffsets = cParams->windowLog > STREAM_ACCUMULATOR_MIN;
1071
1165
  U32 count[MaxSeq+1];
1072
- S16 norm[MaxSeq+1];
1073
- FSE_CTable* CTable_LitLength = zc->litlengthCTable;
1074
- FSE_CTable* CTable_OffsetBits = zc->offcodeCTable;
1075
- FSE_CTable* CTable_MatchLength = zc->matchlengthCTable;
1166
+ FSE_CTable* CTable_LitLength = entropy->litlengthCTable;
1167
+ FSE_CTable* CTable_OffsetBits = entropy->offcodeCTable;
1168
+ FSE_CTable* CTable_MatchLength = entropy->matchlengthCTable;
1076
1169
  U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
1077
1170
  const seqDef* const sequences = seqStorePtr->sequencesStart;
1078
1171
  const BYTE* const ofCodeTable = seqStorePtr->ofCode;
@@ -1083,13 +1176,16 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
1083
1176
  BYTE* op = ostart;
1084
1177
  size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
1085
1178
  BYTE* seqHead;
1086
- BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
1179
+
1180
+ ZSTD_STATIC_ASSERT(sizeof(entropy->workspace) >= (1<<MAX(MLFSELog,LLFSELog)));
1087
1181
 
1088
1182
  /* Compress literals */
1089
1183
  { const BYTE* const literals = seqStorePtr->litStart;
1090
1184
  size_t const litSize = seqStorePtr->lit - literals;
1091
- size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
1092
- if (ZSTD_isError(cSize)) return cSize;
1185
+ size_t const cSize = ZSTD_compressLiterals(
1186
+ entropy, cParams->strategy, op, dstCapacity, literals, litSize);
1187
+ if (ZSTD_isError(cSize))
1188
+ return cSize;
1093
1189
  op += cSize;
1094
1190
  }
1095
1191
 
@@ -1098,177 +1194,89 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
1098
1194
  if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq;
1099
1195
  else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
1100
1196
  else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
1101
- if (nbSeq==0) goto _check_compressibility;
1197
+ if (nbSeq==0) return op - ostart;
1102
1198
 
1103
1199
  /* seqHead : flags for FSE encoding type */
1104
1200
  seqHead = op++;
1105
1201
 
1106
- #define MIN_SEQ_FOR_DYNAMIC_FSE 64
1107
- #define MAX_SEQ_FOR_STATIC_FSE 1000
1108
-
1109
1202
  /* convert length/distances into codes */
1110
1203
  ZSTD_seqToCodes(seqStorePtr);
1111
-
1112
1204
  /* CTable for Literal Lengths */
1113
1205
  { U32 max = MaxLL;
1114
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->entropyScratchSpace);
1115
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
1116
- *op++ = llCodeTable[0];
1117
- FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
1118
- LLtype = set_rle;
1119
- } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
1120
- LLtype = set_repeat;
1121
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) {
1122
- FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
1123
- LLtype = set_basic;
1124
- } else {
1125
- size_t nbSeq_1 = nbSeq;
1126
- const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
1127
- if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; }
1128
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
1129
- { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
1130
- if (FSE_isError(NCountSize)) return NCountSize;
1131
- op += NCountSize; }
1132
- FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
1133
- LLtype = set_compressed;
1206
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, entropy->workspace);
1207
+ LLtype = ZSTD_selectEncodingType(&entropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog);
1208
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
1209
+ count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
1210
+ entropy->workspace, sizeof(entropy->workspace));
1211
+ if (ZSTD_isError(countSize)) return countSize;
1212
+ op += countSize;
1134
1213
  } }
1135
-
1136
1214
  /* CTable for Offsets */
1137
1215
  { U32 max = MaxOff;
1138
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->entropyScratchSpace);
1139
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
1140
- *op++ = ofCodeTable[0];
1141
- FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
1142
- Offtype = set_rle;
1143
- } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
1144
- Offtype = set_repeat;
1145
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) {
1146
- FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
1147
- Offtype = set_basic;
1148
- } else {
1149
- size_t nbSeq_1 = nbSeq;
1150
- const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
1151
- if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; }
1152
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
1153
- { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
1154
- if (FSE_isError(NCountSize)) return NCountSize;
1155
- op += NCountSize; }
1156
- FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
1157
- Offtype = set_compressed;
1216
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, entropy->workspace);
1217
+ Offtype = ZSTD_selectEncodingType(&entropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog);
1218
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
1219
+ count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, MaxOff,
1220
+ entropy->workspace, sizeof(entropy->workspace));
1221
+ if (ZSTD_isError(countSize)) return countSize;
1222
+ op += countSize;
1158
1223
  } }
1159
-
1160
1224
  /* CTable for MatchLengths */
1161
1225
  { U32 max = MaxML;
1162
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->entropyScratchSpace);
1163
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
1164
- *op++ = *mlCodeTable;
1165
- FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
1166
- MLtype = set_rle;
1167
- } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
1168
- MLtype = set_repeat;
1169
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) {
1170
- FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
1171
- MLtype = set_basic;
1172
- } else {
1173
- size_t nbSeq_1 = nbSeq;
1174
- const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
1175
- if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; }
1176
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
1177
- { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
1178
- if (FSE_isError(NCountSize)) return NCountSize;
1179
- op += NCountSize; }
1180
- FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
1181
- MLtype = set_compressed;
1226
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, entropy->workspace);
1227
+ MLtype = ZSTD_selectEncodingType(&entropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog);
1228
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
1229
+ count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
1230
+ entropy->workspace, sizeof(entropy->workspace));
1231
+ if (ZSTD_isError(countSize)) return countSize;
1232
+ op += countSize;
1182
1233
  } }
1183
1234
 
1184
1235
  *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
1185
- zc->fseCTables_ready = 0;
1186
-
1187
- /* Encoding Sequences */
1188
- { BIT_CStream_t blockStream;
1189
- FSE_CState_t stateMatchLength;
1190
- FSE_CState_t stateOffsetBits;
1191
- FSE_CState_t stateLitLength;
1192
-
1193
- CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */
1194
-
1195
- /* first symbols */
1196
- FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
1197
- FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
1198
- FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
1199
- BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
1200
- if (MEM_32bits()) BIT_flushBits(&blockStream);
1201
- BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
1202
- if (MEM_32bits()) BIT_flushBits(&blockStream);
1203
- if (longOffsets) {
1204
- U32 const ofBits = ofCodeTable[nbSeq-1];
1205
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1206
- if (extraBits) {
1207
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
1208
- BIT_flushBits(&blockStream);
1209
- }
1210
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
1211
- ofBits - extraBits);
1212
- } else {
1213
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
1214
- }
1215
- BIT_flushBits(&blockStream);
1216
-
1217
- { size_t n;
1218
- for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
1219
- BYTE const llCode = llCodeTable[n];
1220
- BYTE const ofCode = ofCodeTable[n];
1221
- BYTE const mlCode = mlCodeTable[n];
1222
- U32 const llBits = LL_bits[llCode];
1223
- U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
1224
- U32 const mlBits = ML_bits[mlCode];
1225
- /* (7)*/ /* (7)*/
1226
- FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
1227
- FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
1228
- if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
1229
- FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
1230
- if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
1231
- BIT_flushBits(&blockStream); /* (7)*/
1232
- BIT_addBits(&blockStream, sequences[n].litLength, llBits);
1233
- if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
1234
- BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
1235
- if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
1236
- if (longOffsets) {
1237
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1238
- if (extraBits) {
1239
- BIT_addBits(&blockStream, sequences[n].offset, extraBits);
1240
- BIT_flushBits(&blockStream); /* (7)*/
1241
- }
1242
- BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
1243
- ofBits - extraBits); /* 31 */
1244
- } else {
1245
- BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
1246
- }
1247
- BIT_flushBits(&blockStream); /* (7)*/
1248
- } }
1249
1236
 
1250
- FSE_flushCState(&blockStream, &stateMatchLength);
1251
- FSE_flushCState(&blockStream, &stateOffsetBits);
1252
- FSE_flushCState(&blockStream, &stateLitLength);
1237
+ { size_t const streamSize = ZSTD_encodeSequences(op, oend - op,
1238
+ CTable_MatchLength, mlCodeTable,
1239
+ CTable_OffsetBits, ofCodeTable,
1240
+ CTable_LitLength, llCodeTable,
1241
+ sequences, nbSeq, longOffsets);
1242
+ if (ZSTD_isError(streamSize)) return streamSize;
1243
+ op += streamSize;
1244
+ }
1253
1245
 
1254
- { size_t const streamSize = BIT_closeCStream(&blockStream);
1255
- if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
1256
- op += streamSize;
1257
- } }
1246
+ return op - ostart;
1247
+ }
1258
1248
 
1259
- /* check compressibility */
1260
- _check_compressibility:
1261
- { size_t const minGain = ZSTD_minGain(srcSize);
1262
- size_t const maxCSize = srcSize - minGain;
1263
- if ((size_t)(op-ostart) >= maxCSize) {
1264
- zc->hufCTable_repeatMode = HUF_repeat_none;
1265
- return 0;
1266
- } }
1249
+ MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
1250
+ ZSTD_entropyCTables_t* entropy,
1251
+ ZSTD_compressionParameters const* cParams,
1252
+ void* dst, size_t dstCapacity,
1253
+ size_t srcSize)
1254
+ {
1255
+ size_t const cSize = ZSTD_compressSequences_internal(seqStorePtr, entropy, cParams,
1256
+ dst, dstCapacity);
1257
+ size_t const minGain = ZSTD_minGain(srcSize);
1258
+ size_t const maxCSize = srcSize - minGain;
1259
+ /* If the srcSize <= dstCapacity, then there is enough space to write a
1260
+ * raw uncompressed block. Since we ran out of space, the block must not
1261
+ * be compressible, so fall back to a raw uncompressed block.
1262
+ */
1263
+ int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
1264
+
1265
+ if (ZSTD_isError(cSize) && !uncompressibleError)
1266
+ return cSize;
1267
+ /* Check compressibility */
1268
+ if (cSize >= maxCSize || uncompressibleError) {
1269
+ entropy->hufCTable_repeatMode = HUF_repeat_none;
1270
+ entropy->offcode_repeatMode = FSE_repeat_none;
1271
+ entropy->matchlength_repeatMode = FSE_repeat_none;
1272
+ entropy->litlength_repeatMode = FSE_repeat_none;
1273
+ return 0;
1274
+ }
1275
+ assert(!ZSTD_isError(cSize));
1267
1276
 
1268
1277
  /* confirm repcodes */
1269
- { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->repToConfirm[i]; }
1270
-
1271
- return op - ostart;
1278
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->rep[i] = seqStorePtr->repToConfirm[i]; }
1279
+ return cSize;
1272
1280
  }
1273
1281
 
1274
1282
 
@@ -1475,7 +1483,7 @@ static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
1475
1483
  }
1476
1484
 
1477
1485
 
1478
- FORCE_INLINE
1486
+ FORCE_INLINE_TEMPLATE
1479
1487
  void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
1480
1488
  const void* src, size_t srcSize,
1481
1489
  const U32 mls)
@@ -1491,7 +1499,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
1491
1499
  const BYTE* const lowest = base + lowestIndex;
1492
1500
  const BYTE* const iend = istart + srcSize;
1493
1501
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
1494
- U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
1502
+ U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
1495
1503
  U32 offsetSaved = 0;
1496
1504
 
1497
1505
  /* init */
@@ -1552,8 +1560,8 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
1552
1560
  } } }
1553
1561
 
1554
1562
  /* save reps for next block */
1555
- cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
1556
- cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
1563
+ seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
1564
+ seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
1557
1565
 
1558
1566
  /* Last Literals */
1559
1567
  { size_t const lastLLSize = iend - anchor;
@@ -1601,7 +1609,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
1601
1609
  const BYTE* const dictEnd = dictBase + dictLimit;
1602
1610
  const BYTE* const iend = istart + srcSize;
1603
1611
  const BYTE* const ilimit = iend - 8;
1604
- U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
1612
+ U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
1605
1613
 
1606
1614
  /* Search Loop */
1607
1615
  while (ip < ilimit) { /* < instead of <=, because (ip+1) */
@@ -1667,7 +1675,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
1667
1675
  } } }
1668
1676
 
1669
1677
  /* save reps for next block */
1670
- ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
1678
+ seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
1671
1679
 
1672
1680
  /* Last Literals */
1673
1681
  { size_t const lastLLSize = iend - anchor;
@@ -1718,7 +1726,7 @@ static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U3
1718
1726
  }
1719
1727
 
1720
1728
 
1721
- FORCE_INLINE
1729
+ FORCE_INLINE_TEMPLATE
1722
1730
  void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
1723
1731
  const void* src, size_t srcSize,
1724
1732
  const U32 mls)
@@ -1736,7 +1744,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
1736
1744
  const BYTE* const lowest = base + lowestIndex;
1737
1745
  const BYTE* const iend = istart + srcSize;
1738
1746
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
1739
- U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
1747
+ U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
1740
1748
  U32 offsetSaved = 0;
1741
1749
 
1742
1750
  /* init */
@@ -1823,8 +1831,8 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
1823
1831
  } } }
1824
1832
 
1825
1833
  /* save reps for next block */
1826
- cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
1827
- cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
1834
+ seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
1835
+ seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
1828
1836
 
1829
1837
  /* Last Literals */
1830
1838
  { size_t const lastLLSize = iend - anchor;
@@ -1873,7 +1881,7 @@ static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
1873
1881
  const BYTE* const dictEnd = dictBase + dictLimit;
1874
1882
  const BYTE* const iend = istart + srcSize;
1875
1883
  const BYTE* const ilimit = iend - 8;
1876
- U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
1884
+ U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
1877
1885
 
1878
1886
  /* Search Loop */
1879
1887
  while (ip < ilimit) { /* < instead of <=, because (ip+1) */
@@ -1973,7 +1981,7 @@ static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
1973
1981
  } } }
1974
1982
 
1975
1983
  /* save reps for next block */
1976
- ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
1984
+ seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
1977
1985
 
1978
1986
  /* Last Literals */
1979
1987
  { size_t const lastLLSize = iend - anchor;
@@ -2276,7 +2284,7 @@ static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
2276
2284
 
2277
2285
  /* Update chains up to ip (excluded)
2278
2286
  Assumption : always within prefix (i.e. not within extDict) */
2279
- FORCE_INLINE
2287
+ FORCE_INLINE_TEMPLATE
2280
2288
  U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
2281
2289
  {
2282
2290
  U32* const hashTable = zc->hashTable;
@@ -2300,7 +2308,7 @@ U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
2300
2308
 
2301
2309
 
2302
2310
  /* inlining is important to hardwire a hot branch (template emulation) */
2303
- FORCE_INLINE
2311
+ FORCE_INLINE_TEMPLATE
2304
2312
  size_t ZSTD_HcFindBestMatch_generic (
2305
2313
  ZSTD_CCtx* zc, /* Index table will be updated */
2306
2314
  const BYTE* const ip, const BYTE* const iLimit,
@@ -2352,7 +2360,7 @@ size_t ZSTD_HcFindBestMatch_generic (
2352
2360
  }
2353
2361
 
2354
2362
 
2355
- FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS (
2363
+ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
2356
2364
  ZSTD_CCtx* zc,
2357
2365
  const BYTE* ip, const BYTE* const iLimit,
2358
2366
  size_t* offsetPtr,
@@ -2369,7 +2377,7 @@ FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS (
2369
2377
  }
2370
2378
 
2371
2379
 
2372
- FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
2380
+ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
2373
2381
  ZSTD_CCtx* zc,
2374
2382
  const BYTE* ip, const BYTE* const iLimit,
2375
2383
  size_t* offsetPtr,
@@ -2389,7 +2397,7 @@ FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
2389
2397
  /* *******************************
2390
2398
  * Common parser - lazy strategy
2391
2399
  *********************************/
2392
- FORCE_INLINE
2400
+ FORCE_INLINE_TEMPLATE
2393
2401
  void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
2394
2402
  const void* src, size_t srcSize,
2395
2403
  const U32 searchMethod, const U32 depth)
@@ -2409,7 +2417,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
2409
2417
  size_t* offsetPtr,
2410
2418
  U32 maxNbAttempts, U32 matchLengthSearch);
2411
2419
  searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
2412
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset=0;
2420
+ U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0;
2413
2421
 
2414
2422
  /* init */
2415
2423
  ip += (ip==base);
@@ -2519,8 +2527,8 @@ _storeSequence:
2519
2527
  } }
2520
2528
 
2521
2529
  /* Save reps for next block */
2522
- ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
2523
- ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
2530
+ seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
2531
+ seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
2524
2532
 
2525
2533
  /* Last Literals */
2526
2534
  { size_t const lastLLSize = iend - anchor;
@@ -2551,7 +2559,7 @@ static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t sr
2551
2559
  }
2552
2560
 
2553
2561
 
2554
- FORCE_INLINE
2562
+ FORCE_INLINE_TEMPLATE
2555
2563
  void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
2556
2564
  const void* src, size_t srcSize,
2557
2565
  const U32 searchMethod, const U32 depth)
@@ -2578,7 +2586,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
2578
2586
  U32 maxNbAttempts, U32 matchLengthSearch);
2579
2587
  searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
2580
2588
 
2581
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
2589
+ U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1];
2582
2590
 
2583
2591
  /* init */
2584
2592
  ctx->nextToUpdate3 = ctx->nextToUpdate;
@@ -2714,7 +2722,7 @@ _storeSequence:
2714
2722
  } }
2715
2723
 
2716
2724
  /* Save reps for next block */
2717
- ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
2725
+ seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
2718
2726
 
2719
2727
  /* Last Literals */
2720
2728
  { size_t const lastLLSize = iend - anchor;
@@ -2823,7 +2831,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCa
2823
2831
  if (current > zc->nextToUpdate + 384)
2824
2832
  zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* limited update after finding a very long match */
2825
2833
  blockCompressor(zc, src, srcSize);
2826
- return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
2834
+ return ZSTD_compressSequences(&zc->seqStore, zc->entropy, &zc->appliedParams.cParams, dst, dstCapacity, srcSize);
2827
2835
  }
2828
2836
 
2829
2837
 
@@ -3000,7 +3008,6 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
3000
3008
  return fhSize;
3001
3009
  }
3002
3010
 
3003
-
3004
3011
  size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
3005
3012
  void* dst, size_t dstCapacity,
3006
3013
  const void* src, size_t srcSize)
@@ -3106,13 +3113,14 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
3106
3113
  const BYTE* const dictEnd = dictPtr + dictSize;
3107
3114
  short offcodeNCount[MaxOff+1];
3108
3115
  unsigned offcodeMaxValue = MaxOff;
3109
- BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
3116
+
3117
+ ZSTD_STATIC_ASSERT(sizeof(cctx->entropy->workspace) >= (1<<MAX(MLFSELog,LLFSELog)));
3110
3118
 
3111
3119
  dictPtr += 4; /* skip magic number */
3112
3120
  cctx->dictID = cctx->appliedParams.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr);
3113
3121
  dictPtr += 4;
3114
3122
 
3115
- { size_t const hufHeaderSize = HUF_readCTable(cctx->hufCTable, 255, dictPtr, dictEnd-dictPtr);
3123
+ { size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)cctx->entropy->hufCTable, 255, dictPtr, dictEnd-dictPtr);
3116
3124
  if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
3117
3125
  dictPtr += hufHeaderSize;
3118
3126
  }
@@ -3122,7 +3130,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
3122
3130
  if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
3123
3131
  if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
3124
3132
  /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
3125
- CHECK_E( FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)),
3133
+ CHECK_E( FSE_buildCTable_wksp(cctx->entropy->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
3126
3134
  dictionary_corrupted);
3127
3135
  dictPtr += offcodeHeaderSize;
3128
3136
  }
@@ -3134,7 +3142,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
3134
3142
  if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
3135
3143
  /* Every match length code must have non-zero probability */
3136
3144
  CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
3137
- CHECK_E( FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)),
3145
+ CHECK_E( FSE_buildCTable_wksp(cctx->entropy->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
3138
3146
  dictionary_corrupted);
3139
3147
  dictPtr += matchlengthHeaderSize;
3140
3148
  }
@@ -3146,15 +3154,15 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
3146
3154
  if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
3147
3155
  /* Every literal length code must have non-zero probability */
3148
3156
  CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
3149
- CHECK_E( FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)),
3157
+ CHECK_E( FSE_buildCTable_wksp(cctx->entropy->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
3150
3158
  dictionary_corrupted);
3151
3159
  dictPtr += litlengthHeaderSize;
3152
3160
  }
3153
3161
 
3154
3162
  if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
3155
- cctx->rep[0] = MEM_readLE32(dictPtr+0);
3156
- cctx->rep[1] = MEM_readLE32(dictPtr+4);
3157
- cctx->rep[2] = MEM_readLE32(dictPtr+8);
3163
+ cctx->seqStore.rep[0] = MEM_readLE32(dictPtr+0);
3164
+ cctx->seqStore.rep[1] = MEM_readLE32(dictPtr+4);
3165
+ cctx->seqStore.rep[2] = MEM_readLE32(dictPtr+8);
3158
3166
  dictPtr += 12;
3159
3167
 
3160
3168
  { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
@@ -3168,12 +3176,14 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
3168
3176
  /* All repCodes must be <= dictContentSize and != 0*/
3169
3177
  { U32 u;
3170
3178
  for (u=0; u<3; u++) {
3171
- if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted);
3172
- if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
3179
+ if (cctx->seqStore.rep[u] == 0) return ERROR(dictionary_corrupted);
3180
+ if (cctx->seqStore.rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
3173
3181
  } }
3174
3182
 
3175
- cctx->fseCTables_ready = 1;
3176
- cctx->hufCTable_repeatMode = HUF_repeat_valid;
3183
+ cctx->entropy->hufCTable_repeatMode = HUF_repeat_valid;
3184
+ cctx->entropy->offcode_repeatMode = FSE_repeat_valid;
3185
+ cctx->entropy->matchlength_repeatMode = FSE_repeat_valid;
3186
+ cctx->entropy->litlength_repeatMode = FSE_repeat_valid;
3177
3187
  return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
3178
3188
  }
3179
3189
  }