zstd-ruby 1.3.8.0 → 1.4.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +6 -5
  3. data/README.md +1 -1
  4. data/ext/zstdruby/libzstd/Makefile +7 -3
  5. data/ext/zstdruby/libzstd/README.md +4 -2
  6. data/ext/zstdruby/libzstd/common/compiler.h +1 -1
  7. data/ext/zstdruby/libzstd/common/fse.h +1 -1
  8. data/ext/zstdruby/libzstd/common/threading.c +2 -2
  9. data/ext/zstdruby/libzstd/common/xxhash.c +2 -2
  10. data/ext/zstdruby/libzstd/common/zstd_internal.h +55 -2
  11. data/ext/zstdruby/libzstd/compress/fse_compress.c +2 -2
  12. data/ext/zstdruby/libzstd/compress/zstd_compress.c +423 -296
  13. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +14 -11
  14. data/ext/zstdruby/libzstd/compress/zstd_fast.c +203 -124
  15. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +1 -1
  16. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +1 -1
  17. data/ext/zstdruby/libzstd/compress/zstd_opt.c +27 -11
  18. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +41 -49
  19. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +43 -26
  20. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +4 -4
  21. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +257 -164
  22. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +51 -47
  23. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +7 -0
  24. data/ext/zstdruby/libzstd/dictBuilder/cover.c +58 -13
  25. data/ext/zstdruby/libzstd/dictBuilder/cover.h +29 -0
  26. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +25 -13
  27. data/ext/zstdruby/libzstd/dictBuilder/zdict.h +18 -8
  28. data/ext/zstdruby/libzstd/dll/example/build_package.bat +3 -2
  29. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +42 -12
  30. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +32 -7
  31. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +12 -7
  32. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +31 -12
  33. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +12 -7
  34. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +32 -12
  35. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +12 -7
  36. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +32 -12
  37. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +12 -7
  38. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +32 -7
  39. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +12 -7
  40. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +36 -8
  41. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +10 -5
  42. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +40 -9
  43. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +10 -5
  44. data/ext/zstdruby/libzstd/zstd.h +689 -542
  45. data/lib/zstd-ruby/version.rb +1 -1
  46. data/zstd-ruby.gemspec +1 -1
  47. metadata +6 -7
  48. data/ext/zstdruby/libzstd/dll/libzstd.def +0 -87
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '09569e18846a17f43eb0a6b5cfad3b2b2f6839e725dea1f9470ec3a12335a54f'
4
- data.tar.gz: 6f7820cc8eda43d8e0e5de6cb9f9b40dafee7a63de5f78c645bc523d67232c72
3
+ metadata.gz: 3d3a2e484e068296e89977e4bda01284174aeb6738d7ca0b81ab286abcd3b3be
4
+ data.tar.gz: 736d1180bf2aaa4f958ced167b133810dabb5b9dc8eae969a4385aac96776300
5
5
  SHA512:
6
- metadata.gz: 35e224f5a80c71a84accbd6ad8973b7718bc424341edb4c9421bf4af97c9e79492e2d536ebcd940c14d2c538adf79e0e51934557764a7cc1b0c62c22d6f40a6a
7
- data.tar.gz: 2e89cc6b4ab292b72abed39fbe09dab3d3254aa5f4a92a5fdf155af325540a354ceccca7359c33f39ef8c15c44c4d2896ffb07955789f011244bc200be67f172
6
+ metadata.gz: e4bba80e9b2e894bd3d9e734cd4d332073f7ab583f2a45e096a7e7eb015eca7a13be32790a88c6fa1b132977b22278f4e86c2d8a67af0188ce74de2fc84cf98d
7
+ data.tar.gz: 7759f59d24166a83c36822934de188b18fb6b7674ab1990da1ec7985c1fdb95bc020c6b40de2de987026553a3947dcb77dc110662d6db3a1504118afbc8f73f7
@@ -1,13 +1,14 @@
1
1
  sudo: false
2
2
  language: ruby
3
3
  rvm:
4
- - 2.6.0
5
- - 2.5.3
6
- - 2.4.5
7
- - 2.3.8
4
+ - 2.6
5
+ - 2.5
6
+ - 2.4
7
+ - 2.3
8
8
  - 2.2
9
9
 
10
- before_install: gem install bundler -v 1.14.3
10
+ before_install:
11
+ - gem install bundler -v 1.14.3
11
12
 
12
13
  before_script:
13
14
  - bundle exec rake compile
data/README.md CHANGED
@@ -10,7 +10,7 @@ See https://github.com/facebook/zstd
10
10
  Fork from https://github.com/jarredholman/ruby-zstd.
11
11
 
12
12
  ## Zstd version
13
- v1.3.8 (https://github.com/facebook/zstd/tree/v1.3.8)
13
+ v1.4.0 (https://github.com/facebook/zstd/tree/v1.4.0)
14
14
 
15
15
  ## Installation
16
16
 
@@ -25,7 +25,7 @@ endif
25
25
  CFLAGS ?= -O3
26
26
  DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
27
27
  -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
28
- -Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security \
28
+ -Wstrict-prototypes -Wundef -Wpointer-arith \
29
29
  -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
30
30
  -Wredundant-decls -Wmissing-prototypes -Wc++-compat
31
31
  CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS)
@@ -56,6 +56,7 @@ ZSTD_FORCE_DECOMPRESS_SHORT ?= 0
56
56
  ZSTD_FORCE_DECOMPRESS_LONG ?= 0
57
57
  ZSTD_NO_INLINE ?= 0
58
58
  ZSTD_STRIP_ERROR_STRINGS ?= 0
59
+ ZSTD_LEGACY_MULTITHREADED_API ?= 0
59
60
 
60
61
  ifeq ($(ZSTD_LIB_COMPRESSION), 0)
61
62
  ZSTD_LIB_DICTBUILDER = 0
@@ -107,6 +108,10 @@ ifneq ($(ZSTD_STRIP_ERROR_STRINGS), 0)
107
108
  CFLAGS += -DZSTD_STRIP_ERROR_STRINGS
108
109
  endif
109
110
 
111
+ ifneq ($(ZSTD_LEGACY_MULTITHREADED_API), 0)
112
+ CFLAGS += -DZSTD_LEGACY_MULTITHREADED_API
113
+ endif
114
+
110
115
  ifneq ($(ZSTD_LEGACY_SUPPORT), 0)
111
116
  ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0)
112
117
  ZSTD_FILES += $(shell ls legacy/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]')
@@ -151,8 +156,7 @@ ifneq (,$(filter Windows%,$(OS)))
151
156
  LIBZSTD = dll\libzstd.dll
152
157
  $(LIBZSTD): $(ZSTD_FILES)
153
158
  @echo compiling dynamic library $(LIBVER)
154
- @$(CC) $(FLAGS) -DZSTD_DLL_EXPORT=1 -shared $^ -o $@
155
- dlltool -D $@ -d dll\libzstd.def -l dll\libzstd.lib
159
+ $(CC) $(FLAGS) -DZSTD_DLL_EXPORT=1 -Wl,--out-implib,dll\libzstd.lib -shared $^ -o $@
156
160
 
157
161
  else
158
162
 
@@ -31,8 +31,6 @@ note that it's necessary to request the `-pthread` flag during link stage.
31
31
 
32
32
  Multithreading capabilities are exposed
33
33
  via the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.3.8/lib/zstd.h#L592).
34
- This API is still labelled experimental,
35
- but is expected to become "stable" in the near future.
36
34
 
37
35
 
38
36
  #### API
@@ -110,6 +108,10 @@ The file structure is designed to make this selection manually achievable for an
110
108
  which removes the error messages that are otherwise returned by
111
109
  `ZSTD_getErrorName`.
112
110
 
111
+ - While invoking `make libzstd`, the build macro `ZSTD_LEGACY_MULTITHREADED_API=1`
112
+ will expose the deprecated `ZSTDMT` API exposed by `zstdmt_compress.h` in
113
+ the shared library, which is now hidden by default.
114
+
113
115
 
114
116
  #### Windows : using MinGW+MSYS to create DLL
115
117
 
@@ -40,7 +40,7 @@
40
40
 
41
41
  /**
42
42
  * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
43
- * parameters. They must be inlined for the compiler to elimininate the constant
43
+ * parameters. They must be inlined for the compiler to eliminate the constant
44
44
  * branches.
45
45
  */
46
46
  #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
@@ -358,7 +358,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
358
358
  typedef enum {
359
359
  FSE_repeat_none, /**< Cannot use the previous table */
360
360
  FSE_repeat_check, /**< Can use the previous table but it must be checked */
361
- FSE_repeat_valid /**< Can use the previous table and it is asumed to be valid */
361
+ FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */
362
362
  } FSE_repeat;
363
363
 
364
364
  /* *****************************************
@@ -14,8 +14,8 @@
14
14
  * This file will hold wrapper for systems, which do not support pthreads
15
15
  */
16
16
 
17
- /* create fake symbol to avoid empty trnaslation unit warning */
18
- int g_ZSTD_threading_useles_symbol;
17
+ /* create fake symbol to avoid empty translation unit warning */
18
+ int g_ZSTD_threading_useless_symbol;
19
19
 
20
20
  #if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
21
21
 
@@ -66,10 +66,10 @@
66
66
  /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
67
67
 
68
68
  /*!XXH_FORCE_NATIVE_FORMAT :
69
- * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
69
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
70
70
  * Results are therefore identical for little-endian and big-endian CPU.
71
71
  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
72
- * Should endian-independance be of no importance for your application, you may set the #define below to 1,
72
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
73
73
  * to improve speed for Big-endian CPU.
74
74
  * This option has no impact on Little_Endian CPU.
75
75
  */
@@ -53,8 +53,50 @@ extern "C" {
53
53
  #undef MAX
54
54
  #define MIN(a,b) ((a)<(b) ? (a) : (b))
55
55
  #define MAX(a,b) ((a)>(b) ? (a) : (b))
56
- #define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */
57
- #define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */
56
+
57
+ /**
58
+ * Return the specified error if the condition evaluates to true.
59
+ *
60
+ * In debug modes, prints additional information. In order to do that
61
+ * (particularly, printing the conditional that failed), this can't just wrap
62
+ * RETURN_ERROR().
63
+ */
64
+ #define RETURN_ERROR_IF(cond, err, ...) \
65
+ if (cond) { \
66
+ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
67
+ RAWLOG(3, ": " __VA_ARGS__); \
68
+ RAWLOG(3, "\n"); \
69
+ return ERROR(err); \
70
+ }
71
+
72
+ /**
73
+ * Unconditionally return the specified error.
74
+ *
75
+ * In debug modes, prints additional information.
76
+ */
77
+ #define RETURN_ERROR(err, ...) \
78
+ do { \
79
+ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
80
+ RAWLOG(3, ": " __VA_ARGS__); \
81
+ RAWLOG(3, "\n"); \
82
+ return ERROR(err); \
83
+ } while(0);
84
+
85
+ /**
86
+ * If the provided expression evaluates to an error code, returns that error code.
87
+ *
88
+ * In debug modes, prints additional information.
89
+ */
90
+ #define FORWARD_IF_ERROR(err, ...) \
91
+ do { \
92
+ size_t const err_code = (err); \
93
+ if (ERR_isError(err_code)) { \
94
+ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
95
+ RAWLOG(3, ": " __VA_ARGS__); \
96
+ RAWLOG(3, "\n"); \
97
+ return err_code; \
98
+ } \
99
+ } while(0);
58
100
 
59
101
 
60
102
  /*-*************************************
@@ -200,6 +242,17 @@ typedef struct {
200
242
  U32 longLengthPos;
201
243
  } seqStore_t;
202
244
 
245
+ /**
246
+ * Contains the compressed frame size and an upper-bound for the decompressed frame size.
247
+ * Note: before using `compressedSize`, check for errors using ZSTD_isError().
248
+ * similarly, before using `decompressedBound`, check for errors using:
249
+ * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
250
+ */
251
+ typedef struct {
252
+ size_t compressedSize;
253
+ unsigned long long decompressedBound;
254
+ } ZSTD_frameSizeInfo; /* decompress & legacy */
255
+
203
256
  const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
204
257
  void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
205
258
 
@@ -129,9 +129,9 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
129
129
  { U32 position = 0;
130
130
  U32 symbol;
131
131
  for (symbol=0; symbol<=maxSymbolValue; symbol++) {
132
- int nbOccurences;
132
+ int nbOccurrences;
133
133
  int const freq = normalizedCounter[symbol];
134
- for (nbOccurences=0; nbOccurences<freq; nbOccurences++) {
134
+ for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
135
135
  tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
136
136
  position = (position + step) & tableMask;
137
137
  while (position > highThreshold)
@@ -103,12 +103,31 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
103
103
  return cctx;
104
104
  }
105
105
 
106
+ /**
107
+ * Clears and frees all of the dictionaries in the CCtx.
108
+ */
109
+ static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
110
+ {
111
+ ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);
112
+ ZSTD_freeCDict(cctx->localDict.cdict);
113
+ memset(&cctx->localDict, 0, sizeof(cctx->localDict));
114
+ memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
115
+ cctx->cdict = NULL;
116
+ }
117
+
118
+ static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
119
+ {
120
+ size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
121
+ size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
122
+ return bufferSize + cdictSize;
123
+ }
124
+
106
125
  static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
107
126
  {
108
127
  assert(cctx != NULL);
109
128
  assert(cctx->staticSize == 0);
110
129
  ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
111
- ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
130
+ ZSTD_clearAllDicts(cctx);
112
131
  #ifdef ZSTD_MULTITHREAD
113
132
  ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
114
133
  #endif
@@ -117,7 +136,8 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
117
136
  size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
118
137
  {
119
138
  if (cctx==NULL) return 0; /* support free on NULL */
120
- if (cctx->staticSize) return ERROR(memory_allocation); /* not compatible with static CCtx */
139
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
140
+ "not compatible with static CCtx");
121
141
  ZSTD_freeCCtxContent(cctx);
122
142
  ZSTD_free(cctx, cctx->customMem);
123
143
  return 0;
@@ -139,7 +159,7 @@ size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
139
159
  {
140
160
  if (cctx==NULL) return 0; /* support sizeof on NULL */
141
161
  return sizeof(*cctx) + cctx->workSpaceSize
142
- + ZSTD_sizeof_CDict(cctx->cdictLocal)
162
+ + ZSTD_sizeof_localDict(cctx->localDict)
143
163
  + ZSTD_sizeof_mtctx(cctx);
144
164
  }
145
165
 
@@ -195,7 +215,7 @@ size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
195
215
  }
196
216
 
197
217
  size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
198
- if (!cctxParams) { return ERROR(GENERIC); }
218
+ RETURN_ERROR_IF(!cctxParams, GENERIC);
199
219
  memset(cctxParams, 0, sizeof(*cctxParams));
200
220
  cctxParams->compressionLevel = compressionLevel;
201
221
  cctxParams->fParams.contentSizeFlag = 1;
@@ -204,8 +224,8 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel)
204
224
 
205
225
  size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
206
226
  {
207
- if (!cctxParams) { return ERROR(GENERIC); }
208
- CHECK_F( ZSTD_checkCParams(params.cParams) );
227
+ RETURN_ERROR_IF(!cctxParams, GENERIC);
228
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
209
229
  memset(cctxParams, 0, sizeof(*cctxParams));
210
230
  cctxParams->cParams = params.cParams;
211
231
  cctxParams->fParams = params.fParams;
@@ -359,6 +379,12 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
359
379
  bounds.upperBound = ZSTD_dictForceCopy; /* note : how to ensure at compile time that this is the highest value enum ? */
360
380
  return bounds;
361
381
 
382
+ case ZSTD_c_literalCompressionMode:
383
+ ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
384
+ bounds.lowerBound = ZSTD_lcm_auto;
385
+ bounds.upperBound = ZSTD_lcm_uncompressed;
386
+ return bounds;
387
+
362
388
  default:
363
389
  { ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
364
390
  return boundError;
@@ -378,10 +404,22 @@ static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
378
404
  return 1;
379
405
  }
380
406
 
381
- #define BOUNDCHECK(cParam, val) { \
382
- if (!ZSTD_cParam_withinBounds(cParam,val)) { \
383
- return ERROR(parameter_outOfBound); \
384
- } }
407
+ /* ZSTD_cParam_clampBounds:
408
+ * Clamps the value into the bounded range.
409
+ */
410
+ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
411
+ {
412
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
413
+ if (ZSTD_isError(bounds.error)) return bounds.error;
414
+ if (*value < bounds.lowerBound) *value = bounds.lowerBound;
415
+ if (*value > bounds.upperBound) *value = bounds.upperBound;
416
+ return 0;
417
+ }
418
+
419
+ #define BOUNDCHECK(cParam, val) { \
420
+ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
421
+ parameter_outOfBound); \
422
+ }
385
423
 
386
424
 
387
425
  static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
@@ -413,6 +451,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
413
451
  case ZSTD_c_ldmBucketSizeLog:
414
452
  case ZSTD_c_ldmHashRateLog:
415
453
  case ZSTD_c_forceAttachDict:
454
+ case ZSTD_c_literalCompressionMode:
416
455
  default:
417
456
  return 0;
418
457
  }
@@ -425,18 +464,17 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
425
464
  if (ZSTD_isUpdateAuthorized(param)) {
426
465
  cctx->cParamsChanged = 1;
427
466
  } else {
428
- return ERROR(stage_wrong);
467
+ RETURN_ERROR(stage_wrong);
429
468
  } }
430
469
 
431
470
  switch(param)
432
471
  {
433
- case ZSTD_c_format :
434
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
472
+ case ZSTD_c_nbWorkers:
473
+ RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
474
+ "MT not compatible with static alloc");
475
+ break;
435
476
 
436
477
  case ZSTD_c_compressionLevel:
437
- if (cctx->cdict) return ERROR(stage_wrong);
438
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
439
-
440
478
  case ZSTD_c_windowLog:
441
479
  case ZSTD_c_hashLog:
442
480
  case ZSTD_c_chainLog:
@@ -444,49 +482,32 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
444
482
  case ZSTD_c_minMatch:
445
483
  case ZSTD_c_targetLength:
446
484
  case ZSTD_c_strategy:
447
- if (cctx->cdict) return ERROR(stage_wrong);
448
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
449
-
485
+ case ZSTD_c_ldmHashRateLog:
486
+ case ZSTD_c_format:
450
487
  case ZSTD_c_contentSizeFlag:
451
488
  case ZSTD_c_checksumFlag:
452
489
  case ZSTD_c_dictIDFlag:
453
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
454
-
455
- case ZSTD_c_forceMaxWindow : /* Force back-references to remain < windowSize,
456
- * even when referencing into Dictionary content.
457
- * default : 0 when using a CDict, 1 when using a Prefix */
458
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
459
-
490
+ case ZSTD_c_forceMaxWindow:
460
491
  case ZSTD_c_forceAttachDict:
461
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
462
-
463
- case ZSTD_c_nbWorkers:
464
- if ((value!=0) && cctx->staticSize) {
465
- return ERROR(parameter_unsupported); /* MT not compatible with static alloc */
466
- }
467
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
468
-
492
+ case ZSTD_c_literalCompressionMode:
469
493
  case ZSTD_c_jobSize:
470
494
  case ZSTD_c_overlapLog:
471
495
  case ZSTD_c_rsyncable:
472
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
473
-
474
496
  case ZSTD_c_enableLongDistanceMatching:
475
497
  case ZSTD_c_ldmHashLog:
476
498
  case ZSTD_c_ldmMinMatch:
477
499
  case ZSTD_c_ldmBucketSizeLog:
478
- case ZSTD_c_ldmHashRateLog:
479
- if (cctx->cdict) return ERROR(stage_wrong);
480
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
500
+ break;
481
501
 
482
- default: return ERROR(parameter_unsupported);
502
+ default: RETURN_ERROR(parameter_unsupported);
483
503
  }
504
+ return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
484
505
  }
485
506
 
486
- size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
487
- ZSTD_cParameter param, int value)
507
+ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
508
+ ZSTD_cParameter param, int value)
488
509
  {
489
- DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%i, %i)", (int)param, value);
510
+ DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
490
511
  switch(param)
491
512
  {
492
513
  case ZSTD_c_format :
@@ -495,11 +516,9 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
495
516
  return (size_t)CCtxParams->format;
496
517
 
497
518
  case ZSTD_c_compressionLevel : {
498
- int cLevel = value;
499
- if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
500
- if (cLevel < ZSTD_minCLevel()) cLevel = ZSTD_minCLevel();
501
- if (cLevel) { /* 0 : does not change current level */
502
- CCtxParams->compressionLevel = cLevel;
519
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
520
+ if (value) { /* 0 : does not change current level */
521
+ CCtxParams->compressionLevel = value;
503
522
  }
504
523
  if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
505
524
  return 0; /* return type (size_t) cannot represent negative values */
@@ -573,33 +592,55 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
573
592
  return CCtxParams->attachDictPref;
574
593
  }
575
594
 
595
+ case ZSTD_c_literalCompressionMode : {
596
+ const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
597
+ BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
598
+ CCtxParams->literalCompressionMode = lcm;
599
+ return CCtxParams->literalCompressionMode;
600
+ }
601
+
576
602
  case ZSTD_c_nbWorkers :
577
603
  #ifndef ZSTD_MULTITHREAD
578
- if (value!=0) return ERROR(parameter_unsupported);
604
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
579
605
  return 0;
580
606
  #else
581
- return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
607
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
608
+ CCtxParams->nbWorkers = value;
609
+ return CCtxParams->nbWorkers;
582
610
  #endif
583
611
 
584
612
  case ZSTD_c_jobSize :
585
613
  #ifndef ZSTD_MULTITHREAD
586
- return ERROR(parameter_unsupported);
614
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
615
+ return 0;
587
616
  #else
588
- return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
617
+ /* Adjust to the minimum non-default value. */
618
+ if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
619
+ value = ZSTDMT_JOBSIZE_MIN;
620
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
621
+ assert(value >= 0);
622
+ CCtxParams->jobSize = value;
623
+ return CCtxParams->jobSize;
589
624
  #endif
590
625
 
591
626
  case ZSTD_c_overlapLog :
592
627
  #ifndef ZSTD_MULTITHREAD
593
- return ERROR(parameter_unsupported);
628
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
629
+ return 0;
594
630
  #else
595
- return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapLog, value);
631
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
632
+ CCtxParams->overlapLog = value;
633
+ return CCtxParams->overlapLog;
596
634
  #endif
597
635
 
598
636
  case ZSTD_c_rsyncable :
599
637
  #ifndef ZSTD_MULTITHREAD
600
- return ERROR(parameter_unsupported);
638
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
639
+ return 0;
601
640
  #else
602
- return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_rsyncable, value);
641
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
642
+ CCtxParams->rsyncable = value;
643
+ return CCtxParams->rsyncable;
603
644
  #endif
604
645
 
605
646
  case ZSTD_c_enableLongDistanceMatching :
@@ -625,21 +666,21 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
625
666
  return CCtxParams->ldmParams.bucketSizeLog;
626
667
 
627
668
  case ZSTD_c_ldmHashRateLog :
628
- if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
629
- return ERROR(parameter_outOfBound);
669
+ RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
670
+ parameter_outOfBound);
630
671
  CCtxParams->ldmParams.hashRateLog = value;
631
672
  return CCtxParams->ldmParams.hashRateLog;
632
673
 
633
- default: return ERROR(parameter_unsupported);
674
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
634
675
  }
635
676
  }
636
677
 
637
678
  size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
638
679
  {
639
- return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value);
680
+ return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
640
681
  }
641
682
 
642
- size_t ZSTD_CCtxParam_getParameter(
683
+ size_t ZSTD_CCtxParams_getParameter(
643
684
  ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
644
685
  {
645
686
  switch(param)
@@ -686,6 +727,9 @@ size_t ZSTD_CCtxParam_getParameter(
686
727
  case ZSTD_c_forceAttachDict :
687
728
  *value = CCtxParams->attachDictPref;
688
729
  break;
730
+ case ZSTD_c_literalCompressionMode :
731
+ *value = CCtxParams->literalCompressionMode;
732
+ break;
689
733
  case ZSTD_c_nbWorkers :
690
734
  #ifndef ZSTD_MULTITHREAD
691
735
  assert(CCtxParams->nbWorkers == 0);
@@ -694,7 +738,7 @@ size_t ZSTD_CCtxParam_getParameter(
694
738
  break;
695
739
  case ZSTD_c_jobSize :
696
740
  #ifndef ZSTD_MULTITHREAD
697
- return ERROR(parameter_unsupported);
741
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
698
742
  #else
699
743
  assert(CCtxParams->jobSize <= INT_MAX);
700
744
  *value = (int)CCtxParams->jobSize;
@@ -702,14 +746,14 @@ size_t ZSTD_CCtxParam_getParameter(
702
746
  #endif
703
747
  case ZSTD_c_overlapLog :
704
748
  #ifndef ZSTD_MULTITHREAD
705
- return ERROR(parameter_unsupported);
749
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
706
750
  #else
707
751
  *value = CCtxParams->overlapLog;
708
752
  break;
709
753
  #endif
710
754
  case ZSTD_c_rsyncable :
711
755
  #ifndef ZSTD_MULTITHREAD
712
- return ERROR(parameter_unsupported);
756
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
713
757
  #else
714
758
  *value = CCtxParams->rsyncable;
715
759
  break;
@@ -729,7 +773,7 @@ size_t ZSTD_CCtxParam_getParameter(
729
773
  case ZSTD_c_ldmHashRateLog :
730
774
  *value = CCtxParams->ldmParams.hashRateLog;
731
775
  break;
732
- default: return ERROR(parameter_unsupported);
776
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
733
777
  }
734
778
  return 0;
735
779
  }
@@ -745,8 +789,8 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
745
789
  ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
746
790
  {
747
791
  DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
748
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
749
- if (cctx->cdict) return ERROR(stage_wrong);
792
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
793
+ RETURN_ERROR_IF(cctx->cdict, stage_wrong);
750
794
 
751
795
  cctx->requestedParams = *params;
752
796
  return 0;
@@ -755,33 +799,71 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
755
799
  ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
756
800
  {
757
801
  DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
758
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
802
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
759
803
  cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
760
804
  return 0;
761
805
  }
762
806
 
807
+ /**
808
+ * Initializes the local dict using the requested parameters.
809
+ * NOTE: This does not use the pledged src size, because it may be used for more
810
+ * than one compression.
811
+ */
812
+ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
813
+ {
814
+ ZSTD_localDict* const dl = &cctx->localDict;
815
+ ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(
816
+ &cctx->requestedParams, 0, dl->dictSize);
817
+ if (dl->dict == NULL) {
818
+ /* No local dictionary. */
819
+ assert(dl->dictBuffer == NULL);
820
+ assert(dl->cdict == NULL);
821
+ assert(dl->dictSize == 0);
822
+ return 0;
823
+ }
824
+ if (dl->cdict != NULL) {
825
+ assert(cctx->cdict == dl->cdict);
826
+ /* Local dictionary already initialized. */
827
+ return 0;
828
+ }
829
+ assert(dl->dictSize > 0);
830
+ assert(cctx->cdict == NULL);
831
+ assert(cctx->prefixDict.dict == NULL);
832
+
833
+ dl->cdict = ZSTD_createCDict_advanced(
834
+ dl->dict,
835
+ dl->dictSize,
836
+ ZSTD_dlm_byRef,
837
+ dl->dictContentType,
838
+ cParams,
839
+ cctx->customMem);
840
+ RETURN_ERROR_IF(!dl->cdict, memory_allocation);
841
+ cctx->cdict = dl->cdict;
842
+ return 0;
843
+ }
844
+
763
845
  size_t ZSTD_CCtx_loadDictionary_advanced(
764
846
  ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
765
847
  ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
766
848
  {
767
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
768
- if (cctx->staticSize) return ERROR(memory_allocation); /* no malloc for static CCtx */
849
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
850
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
851
+ "no malloc for static CCtx");
769
852
  DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
770
- ZSTD_freeCDict(cctx->cdictLocal); /* in case one already exists */
771
- if (dict==NULL || dictSize==0) { /* no dictionary mode */
772
- cctx->cdictLocal = NULL;
773
- cctx->cdict = NULL;
853
+ ZSTD_clearAllDicts(cctx); /* in case one already exists */
854
+ if (dict == NULL || dictSize == 0) /* no dictionary mode */
855
+ return 0;
856
+ if (dictLoadMethod == ZSTD_dlm_byRef) {
857
+ cctx->localDict.dict = dict;
774
858
  } else {
775
- ZSTD_compressionParameters const cParams =
776
- ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
777
- cctx->cdictLocal = ZSTD_createCDict_advanced(
778
- dict, dictSize,
779
- dictLoadMethod, dictContentType,
780
- cParams, cctx->customMem);
781
- cctx->cdict = cctx->cdictLocal;
782
- if (cctx->cdictLocal == NULL)
783
- return ERROR(memory_allocation);
859
+ void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);
860
+ RETURN_ERROR_IF(!dictBuffer, memory_allocation);
861
+ memcpy(dictBuffer, dict, dictSize);
862
+ cctx->localDict.dictBuffer = dictBuffer;
863
+ cctx->localDict.dict = dictBuffer;
784
864
  }
865
+ cctx->localDict.dictSize = dictSize;
866
+ cctx->localDict.dictContentType = dictContentType;
785
867
  return 0;
786
868
  }
787
869
 
@@ -801,9 +883,10 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, s
801
883
 
802
884
  size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
803
885
  {
804
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
886
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
887
+ /* Free the existing local cdict (if any) to save memory. */
888
+ ZSTD_clearAllDicts(cctx);
805
889
  cctx->cdict = cdict;
806
- memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* exclusive */
807
890
  return 0;
808
891
  }
809
892
 
@@ -815,8 +898,8 @@ size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSiz
815
898
  size_t ZSTD_CCtx_refPrefix_advanced(
816
899
  ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
817
900
  {
818
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
819
- cctx->cdict = NULL; /* prefix discards any prior cdict */
901
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
902
+ ZSTD_clearAllDicts(cctx);
820
903
  cctx->prefixDict.dict = prefix;
821
904
  cctx->prefixDict.dictSize = prefixSize;
822
905
  cctx->prefixDict.dictContentType = dictContentType;
@@ -834,8 +917,8 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
834
917
  }
835
918
  if ( (reset == ZSTD_reset_parameters)
836
919
  || (reset == ZSTD_reset_session_and_parameters) ) {
837
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
838
- cctx->cdict = NULL;
920
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
921
+ ZSTD_clearAllDicts(cctx);
839
922
  return ZSTD_CCtxParams_reset(&cctx->requestedParams);
840
923
  }
841
924
  return 0;
@@ -888,10 +971,11 @@ static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
888
971
  }
889
972
 
890
973
  /** ZSTD_adjustCParams_internal() :
891
- optimize `cPar` for a given input (`srcSize` and `dictSize`).
892
- mostly downsizing to reduce memory consumption and initialization latency.
893
- Both `srcSize` and `dictSize` are optional (use 0 if unknown).
894
- Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */
974
+ * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
975
+ * mostly downsize to reduce memory consumption and initialization latency.
976
+ * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
977
+ * note : for the time being, `srcSize==0` means "unknown" too, for compatibility with older convention.
978
+ * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
895
979
  static ZSTD_compressionParameters
896
980
  ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
897
981
  unsigned long long srcSize,
@@ -901,7 +985,7 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
901
985
  static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
902
986
  assert(ZSTD_checkCParams(cPar)==0);
903
987
 
904
- if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
988
+ if (dictSize && (srcSize+1<2) /* ZSTD_CONTENTSIZE_UNKNOWN and 0 mean "unknown" */ )
905
989
  srcSize = minSrcSize; /* presumed small when there is a dictionary */
906
990
  else if (srcSize == 0)
907
991
  srcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* 0 == unknown : presumed large */
@@ -922,7 +1006,7 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
922
1006
  }
923
1007
 
924
1008
  if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
925
- cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
1009
+ cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
926
1010
 
927
1011
  return cPar;
928
1012
  }
@@ -932,7 +1016,7 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
932
1016
  unsigned long long srcSize,
933
1017
  size_t dictSize)
934
1018
  {
935
- cPar = ZSTD_clampCParams(cPar);
1019
+ cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
936
1020
  return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
937
1021
  }
938
1022
 
@@ -973,8 +1057,7 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
973
1057
 
974
1058
  size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
975
1059
  {
976
- /* Estimate CCtx size is supported for single-threaded compression only. */
977
- if (params->nbWorkers > 0) { return ERROR(GENERIC); }
1060
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
978
1061
  { ZSTD_compressionParameters const cParams =
979
1062
  ZSTD_getCParamsFromCCtxParams(params, 0, 0);
980
1063
  size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
@@ -1022,10 +1105,12 @@ size_t ZSTD_estimateCCtxSize(int compressionLevel)
1022
1105
 
1023
1106
  size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
1024
1107
  {
1025
- if (params->nbWorkers > 0) { return ERROR(GENERIC); }
1026
- { size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
1027
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
1028
- size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
1108
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
1109
+ { ZSTD_compressionParameters const cParams =
1110
+ ZSTD_getCParamsFromCCtxParams(params, 0, 0);
1111
+ size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
1112
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
1113
+ size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
1029
1114
  size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
1030
1115
  size_t const streamingSize = inBuffSize + outBuffSize;
1031
1116
 
@@ -1367,13 +1452,13 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
1367
1452
  DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
1368
1453
  zc->workSpaceSize >> 10,
1369
1454
  neededSpace >> 10);
1370
- /* static cctx : no resize, error out */
1371
- if (zc->staticSize) return ERROR(memory_allocation);
1455
+
1456
+ RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
1372
1457
 
1373
1458
  zc->workSpaceSize = 0;
1374
1459
  ZSTD_free(zc->workSpace, zc->customMem);
1375
1460
  zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
1376
- if (zc->workSpace == NULL) return ERROR(memory_allocation);
1461
+ RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
1377
1462
  zc->workSpaceSize = neededSpace;
1378
1463
  zc->workSpaceOversizedDuration = 0;
1379
1464
 
@@ -1644,7 +1729,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
1644
1729
  ZSTD_buffered_policy_e zbuff)
1645
1730
  {
1646
1731
  DEBUGLOG(5, "ZSTD_copyCCtx_internal");
1647
- if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
1732
+ RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);
1648
1733
 
1649
1734
  memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
1650
1735
  { ZSTD_CCtx_params params = dstCCtx->requestedParams;
@@ -1777,7 +1862,8 @@ static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
1777
1862
  static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
1778
1863
  {
1779
1864
  U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
1780
- if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
1865
+ RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
1866
+ dstSize_tooSmall);
1781
1867
  MEM_writeLE24(dst, cBlockHeader24);
1782
1868
  memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
1783
1869
  return ZSTD_blockHeaderSize + srcSize;
@@ -1788,7 +1874,7 @@ static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void
1788
1874
  BYTE* const ostart = (BYTE* const)dst;
1789
1875
  U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
1790
1876
 
1791
- if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
1877
+ RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
1792
1878
 
1793
1879
  switch(flSize)
1794
1880
  {
@@ -1878,7 +1964,7 @@ static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
1878
1964
  if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1879
1965
  }
1880
1966
 
1881
- if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
1967
+ RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
1882
1968
  { HUF_repeat repeat = prevHuf->repeatMode;
1883
1969
  int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
1884
1970
  if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
@@ -1960,7 +2046,7 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
1960
2046
  * If x == 0: Return 0
1961
2047
  * Else: Return floor(-log2(x / 256) * 256)
1962
2048
  */
1963
- static unsigned const kInverseProbabiltyLog256[256] = {
2049
+ static unsigned const kInverseProbabilityLog256[256] = {
1964
2050
  0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
1965
2051
  1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
1966
2052
  874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
@@ -1999,7 +2085,7 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
1999
2085
  if (count[s] != 0 && norm == 0)
2000
2086
  norm = 1;
2001
2087
  assert(count[s] < total);
2002
- cost += count[s] * kInverseProbabiltyLog256[norm];
2088
+ cost += count[s] * kInverseProbabilityLog256[norm];
2003
2089
  }
2004
2090
  return cost >> 8;
2005
2091
  }
@@ -2022,7 +2108,7 @@ static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
2022
2108
  unsigned const norm256 = normAcc << shift;
2023
2109
  assert(norm256 > 0);
2024
2110
  assert(norm256 < 256);
2025
- cost += count[s] * kInverseProbabiltyLog256[norm256];
2111
+ cost += count[s] * kInverseProbabilityLog256[norm256];
2026
2112
  }
2027
2113
  return cost >> 8;
2028
2114
  }
@@ -2050,21 +2136,17 @@ static size_t ZSTD_fseBitCost(
2050
2136
  unsigned s;
2051
2137
  FSE_CState_t cstate;
2052
2138
  FSE_initCState(&cstate, ctable);
2053
- if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
2054
- DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
2139
+ RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
2140
+ "Repeat FSE_CTable has maxSymbolValue %u < %u",
2055
2141
  ZSTD_getFSEMaxSymbolValue(ctable), max);
2056
- return ERROR(GENERIC);
2057
- }
2058
2142
  for (s = 0; s <= max; ++s) {
2059
2143
  unsigned const tableLog = cstate.stateLog;
2060
2144
  unsigned const badCost = (tableLog + 1) << kAccuracyLog;
2061
2145
  unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
2062
2146
  if (count[s] == 0)
2063
2147
  continue;
2064
- if (bitCost >= badCost) {
2065
- DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
2066
- return ERROR(GENERIC);
2067
- }
2148
+ RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
2149
+ "Repeat FSE_CTable has Prob[%u] == 0", s);
2068
2150
  cost += count[s] * bitCost;
2069
2151
  }
2070
2152
  return cost >> kAccuracyLog;
@@ -2080,7 +2162,7 @@ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
2080
2162
  BYTE wksp[FSE_NCOUNTBOUND];
2081
2163
  S16 norm[MaxSeq + 1];
2082
2164
  const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
2083
- CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
2165
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
2084
2166
  return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
2085
2167
  }
2086
2168
 
@@ -2186,15 +2268,15 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
2186
2268
 
2187
2269
  switch (type) {
2188
2270
  case set_rle:
2189
- CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
2190
- if (dstCapacity==0) return ERROR(dstSize_tooSmall);
2271
+ FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
2272
+ RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
2191
2273
  *op = codeTable[0];
2192
2274
  return 1;
2193
2275
  case set_repeat:
2194
2276
  memcpy(nextCTable, prevCTable, prevCTableSize);
2195
2277
  return 0;
2196
2278
  case set_basic:
2197
- CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
2279
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
2198
2280
  return 0;
2199
2281
  case set_compressed: {
2200
2282
  S16 norm[MaxSeq + 1];
@@ -2205,14 +2287,14 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
2205
2287
  nbSeq_1--;
2206
2288
  }
2207
2289
  assert(nbSeq_1 > 1);
2208
- CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
2290
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
2209
2291
  { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
2210
- if (FSE_isError(NCountSize)) return NCountSize;
2211
- CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
2292
+ FORWARD_IF_ERROR(NCountSize);
2293
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
2212
2294
  return NCountSize;
2213
2295
  }
2214
2296
  }
2215
- default: return assert(0), ERROR(GENERIC);
2297
+ default: assert(0); RETURN_ERROR(GENERIC);
2216
2298
  }
2217
2299
  }
2218
2300
 
@@ -2229,7 +2311,9 @@ ZSTD_encodeSequences_body(
2229
2311
  FSE_CState_t stateOffsetBits;
2230
2312
  FSE_CState_t stateLitLength;
2231
2313
 
2232
- CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
2314
+ RETURN_ERROR_IF(
2315
+ ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
2316
+ dstSize_tooSmall, "not enough space remaining");
2233
2317
  DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
2234
2318
  (int)(blockStream.endPtr - blockStream.startPtr),
2235
2319
  (unsigned)dstCapacity);
@@ -2303,7 +2387,7 @@ ZSTD_encodeSequences_body(
2303
2387
  FSE_flushCState(&blockStream, &stateLitLength);
2304
2388
 
2305
2389
  { size_t const streamSize = BIT_closeCStream(&blockStream);
2306
- if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
2390
+ RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
2307
2391
  return streamSize;
2308
2392
  }
2309
2393
  }
@@ -2368,6 +2452,21 @@ static size_t ZSTD_encodeSequences(
2368
2452
  sequences, nbSeq, longOffsets);
2369
2453
  }
2370
2454
 
2455
+ static int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
2456
+ {
2457
+ switch (cctxParams->literalCompressionMode) {
2458
+ case ZSTD_lcm_huffman:
2459
+ return 0;
2460
+ case ZSTD_lcm_uncompressed:
2461
+ return 1;
2462
+ default:
2463
+ assert(0 /* impossible: pre-validated */);
2464
+ /* fall-through */
2465
+ case ZSTD_lcm_auto:
2466
+ return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
2467
+ }
2468
+ }
2469
+
2371
2470
  /* ZSTD_compressSequences_internal():
2372
2471
  * actually compresses both literals and sequences */
2373
2472
  MEM_STATIC size_t
@@ -2403,22 +2502,22 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2403
2502
  /* Compress literals */
2404
2503
  { const BYTE* const literals = seqStorePtr->litStart;
2405
2504
  size_t const litSize = seqStorePtr->lit - literals;
2406
- int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
2407
2505
  size_t const cSize = ZSTD_compressLiterals(
2408
2506
  &prevEntropy->huf, &nextEntropy->huf,
2409
- cctxParams->cParams.strategy, disableLiteralCompression,
2507
+ cctxParams->cParams.strategy,
2508
+ ZSTD_disableLiteralsCompression(cctxParams),
2410
2509
  op, dstCapacity,
2411
2510
  literals, litSize,
2412
2511
  workspace, wkspSize,
2413
2512
  bmi2);
2414
- if (ZSTD_isError(cSize))
2415
- return cSize;
2513
+ FORWARD_IF_ERROR(cSize);
2416
2514
  assert(cSize <= dstCapacity);
2417
2515
  op += cSize;
2418
2516
  }
2419
2517
 
2420
2518
  /* Sequences Header */
2421
- if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
2519
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
2520
+ dstSize_tooSmall);
2422
2521
  if (nbSeq < 0x7F)
2423
2522
  *op++ = (BYTE)nbSeq;
2424
2523
  else if (nbSeq < LONGNBSEQ)
@@ -2452,7 +2551,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2452
2551
  count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
2453
2552
  prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
2454
2553
  workspace, wkspSize);
2455
- if (ZSTD_isError(countSize)) return countSize;
2554
+ FORWARD_IF_ERROR(countSize);
2456
2555
  if (LLtype == set_compressed)
2457
2556
  lastNCount = op;
2458
2557
  op += countSize;
@@ -2474,7 +2573,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2474
2573
  count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
2475
2574
  prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
2476
2575
  workspace, wkspSize);
2477
- if (ZSTD_isError(countSize)) return countSize;
2576
+ FORWARD_IF_ERROR(countSize);
2478
2577
  if (Offtype == set_compressed)
2479
2578
  lastNCount = op;
2480
2579
  op += countSize;
@@ -2494,7 +2593,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2494
2593
  count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
2495
2594
  prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
2496
2595
  workspace, wkspSize);
2497
- if (ZSTD_isError(countSize)) return countSize;
2596
+ FORWARD_IF_ERROR(countSize);
2498
2597
  if (MLtype == set_compressed)
2499
2598
  lastNCount = op;
2500
2599
  op += countSize;
@@ -2509,10 +2608,10 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2509
2608
  CTable_LitLength, llCodeTable,
2510
2609
  sequences, nbSeq,
2511
2610
  longOffsets, bmi2);
2512
- if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
2611
+ FORWARD_IF_ERROR(bitstreamSize);
2513
2612
  op += bitstreamSize;
2514
2613
  /* zstd versions <= 1.3.4 mistakenly report corruption when
2515
- * FSE_readNCount() recieves a buffer < 4 bytes.
2614
+ * FSE_readNCount() receives a buffer < 4 bytes.
2516
2615
  * Fixed by https://github.com/facebook/zstd/pull/1146.
2517
2616
  * This can happen when the last set_compressed table present is 2
2518
2617
  * bytes and the bitstream is only one byte.
@@ -2552,7 +2651,7 @@ ZSTD_compressSequences(seqStore_t* seqStorePtr,
2552
2651
  */
2553
2652
  if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
2554
2653
  return 0; /* block not compressed */
2555
- if (ZSTD_isError(cSize)) return cSize;
2654
+ FORWARD_IF_ERROR(cSize);
2556
2655
 
2557
2656
  /* Check compressibility */
2558
2657
  { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
@@ -2641,7 +2740,10 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
2641
2740
  goto out; /* don't even attempt compression below a certain srcSize */
2642
2741
  }
2643
2742
  ZSTD_resetSeqStore(&(zc->seqStore));
2644
- ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; /* required for optimal parser to read stats from dictionary */
2743
+ /* required for optimal parser to read stats from dictionary */
2744
+ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
2745
+ /* tell the optimal parser how we expect to compress literals */
2746
+ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
2645
2747
 
2646
2748
  /* a gap between an attached dict and the current window is not safe,
2647
2749
  * they must remain adjacent,
@@ -2679,7 +2781,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
2679
2781
  ldmSeqStore.seq = zc->ldmSequences;
2680
2782
  ldmSeqStore.capacity = zc->maxNbLdmSequences;
2681
2783
  /* Updates ldmSeqStore.size */
2682
- CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
2784
+ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
2683
2785
  &zc->appliedParams.ldmParams,
2684
2786
  src, srcSize));
2685
2787
  /* Updates ldmSeqStore.pos */
@@ -2752,8 +2854,9 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
2752
2854
  ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
2753
2855
  U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
2754
2856
 
2755
- if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
2756
- return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
2857
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
2858
+ dstSize_tooSmall,
2859
+ "not enough space to store compressed block");
2757
2860
  if (remaining < blockSize) blockSize = remaining;
2758
2861
 
2759
2862
  if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
@@ -2774,11 +2877,11 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
2774
2877
  { size_t cSize = ZSTD_compressBlock_internal(cctx,
2775
2878
  op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
2776
2879
  ip, blockSize);
2777
- if (ZSTD_isError(cSize)) return cSize;
2880
+ FORWARD_IF_ERROR(cSize);
2778
2881
 
2779
2882
  if (cSize == 0) { /* block is not compressible */
2780
2883
  cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
2781
- if (ZSTD_isError(cSize)) return cSize;
2884
+ FORWARD_IF_ERROR(cSize);
2782
2885
  } else {
2783
2886
  U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
2784
2887
  MEM_writeLE24(op, cBlockHeader24);
@@ -2811,11 +2914,11 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
2811
2914
  BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
2812
2915
  U32 const fcsCode = params.fParams.contentSizeFlag ?
2813
2916
  (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
2814
- BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
2917
+ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
2815
2918
  size_t pos=0;
2816
2919
 
2817
2920
  assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
2818
- if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall);
2921
+ RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
2819
2922
  DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
2820
2923
  !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
2821
2924
 
@@ -2823,7 +2926,7 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
2823
2926
  MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
2824
2927
  pos = 4;
2825
2928
  }
2826
- op[pos++] = frameHeaderDecriptionByte;
2929
+ op[pos++] = frameHeaderDescriptionByte;
2827
2930
  if (!singleSegment) op[pos++] = windowLogByte;
2828
2931
  switch(dictIDSizeCode)
2829
2932
  {
@@ -2847,11 +2950,11 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
2847
2950
  /* ZSTD_writeLastEmptyBlock() :
2848
2951
  * output an empty Block with end-of-frame mark to complete a frame
2849
2952
  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
2850
- * or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
2953
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
2851
2954
  */
2852
2955
  size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
2853
2956
  {
2854
- if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
2957
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);
2855
2958
  { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
2856
2959
  MEM_writeLE24(dst, cBlockHeader24);
2857
2960
  return ZSTD_blockHeaderSize;
@@ -2860,10 +2963,9 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
2860
2963
 
2861
2964
  size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
2862
2965
  {
2863
- if (cctx->stage != ZSTDcs_init)
2864
- return ERROR(stage_wrong);
2865
- if (cctx->appliedParams.ldmParams.enableLdm)
2866
- return ERROR(parameter_unsupported);
2966
+ RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);
2967
+ RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
2968
+ parameter_unsupported);
2867
2969
  cctx->externSeqStore.seq = seq;
2868
2970
  cctx->externSeqStore.size = nbSeq;
2869
2971
  cctx->externSeqStore.capacity = nbSeq;
@@ -2882,12 +2984,13 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
2882
2984
 
2883
2985
  DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
2884
2986
  cctx->stage, (unsigned)srcSize);
2885
- if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
2987
+ RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
2988
+ "missing init (ZSTD_compressBegin)");
2886
2989
 
2887
2990
  if (frame && (cctx->stage==ZSTDcs_init)) {
2888
2991
  fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
2889
2992
  cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
2890
- if (ZSTD_isError(fhSize)) return fhSize;
2993
+ FORWARD_IF_ERROR(fhSize);
2891
2994
  dstCapacity -= fhSize;
2892
2995
  dst = (char*)dst + fhSize;
2893
2996
  cctx->stage = ZSTDcs_ongoing;
@@ -2922,17 +3025,18 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
2922
3025
  { size_t const cSize = frame ?
2923
3026
  ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
2924
3027
  ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
2925
- if (ZSTD_isError(cSize)) return cSize;
3028
+ FORWARD_IF_ERROR(cSize);
2926
3029
  cctx->consumedSrcSize += srcSize;
2927
3030
  cctx->producedCSize += (cSize + fhSize);
2928
3031
  assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
2929
3032
  if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
2930
3033
  ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
2931
- if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
2932
- DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
2933
- (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
2934
- return ERROR(srcSize_wrong);
2935
- }
3034
+ RETURN_ERROR_IF(
3035
+ cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
3036
+ srcSize_wrong,
3037
+ "error : pledgedSrcSize = %u, while realSrcSize >= %u",
3038
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
3039
+ (unsigned)cctx->consumedSrcSize);
2936
3040
  }
2937
3041
  return cSize + fhSize;
2938
3042
  }
@@ -2957,7 +3061,7 @@ size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
2957
3061
  size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2958
3062
  {
2959
3063
  size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
2960
- if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
3064
+ RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
2961
3065
 
2962
3066
  return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
2963
3067
  }
@@ -3020,9 +3124,9 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
3020
3124
  NOTE: This behavior is not standard and could be improved in the future. */
3021
3125
  static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
3022
3126
  U32 s;
3023
- if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
3127
+ RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);
3024
3128
  for (s = 0; s <= maxSymbolValue; ++s) {
3025
- if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
3129
+ RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);
3026
3130
  }
3027
3131
  return 0;
3028
3132
  }
@@ -3060,53 +3164,56 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
3060
3164
 
3061
3165
  { unsigned maxSymbolValue = 255;
3062
3166
  size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
3063
- if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
3064
- if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
3167
+ RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);
3168
+ RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);
3065
3169
  dictPtr += hufHeaderSize;
3066
3170
  }
3067
3171
 
3068
3172
  { unsigned offcodeLog;
3069
3173
  size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
3070
- if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
3071
- if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
3174
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
3175
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
3072
3176
  /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
3073
3177
  /* fill all offset symbols to avoid garbage at end of table */
3074
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable,
3075
- offcodeNCount, MaxOff, offcodeLog,
3076
- workspace, HUF_WORKSPACE_SIZE),
3077
- dictionary_corrupted);
3178
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
3179
+ bs->entropy.fse.offcodeCTable,
3180
+ offcodeNCount, MaxOff, offcodeLog,
3181
+ workspace, HUF_WORKSPACE_SIZE)),
3182
+ dictionary_corrupted);
3078
3183
  dictPtr += offcodeHeaderSize;
3079
3184
  }
3080
3185
 
3081
3186
  { short matchlengthNCount[MaxML+1];
3082
3187
  unsigned matchlengthMaxValue = MaxML, matchlengthLog;
3083
3188
  size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
3084
- if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
3085
- if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
3189
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
3190
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
3086
3191
  /* Every match length code must have non-zero probability */
3087
- CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
3088
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable,
3089
- matchlengthNCount, matchlengthMaxValue, matchlengthLog,
3090
- workspace, HUF_WORKSPACE_SIZE),
3091
- dictionary_corrupted);
3192
+ FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
3193
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
3194
+ bs->entropy.fse.matchlengthCTable,
3195
+ matchlengthNCount, matchlengthMaxValue, matchlengthLog,
3196
+ workspace, HUF_WORKSPACE_SIZE)),
3197
+ dictionary_corrupted);
3092
3198
  dictPtr += matchlengthHeaderSize;
3093
3199
  }
3094
3200
 
3095
3201
  { short litlengthNCount[MaxLL+1];
3096
3202
  unsigned litlengthMaxValue = MaxLL, litlengthLog;
3097
3203
  size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
3098
- if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
3099
- if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
3204
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
3205
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
3100
3206
  /* Every literal length code must have non-zero probability */
3101
- CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
3102
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable,
3103
- litlengthNCount, litlengthMaxValue, litlengthLog,
3104
- workspace, HUF_WORKSPACE_SIZE),
3105
- dictionary_corrupted);
3207
+ FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
3208
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
3209
+ bs->entropy.fse.litlengthCTable,
3210
+ litlengthNCount, litlengthMaxValue, litlengthLog,
3211
+ workspace, HUF_WORKSPACE_SIZE)),
3212
+ dictionary_corrupted);
3106
3213
  dictPtr += litlengthHeaderSize;
3107
3214
  }
3108
3215
 
3109
- if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
3216
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
3110
3217
  bs->rep[0] = MEM_readLE32(dictPtr+0);
3111
3218
  bs->rep[1] = MEM_readLE32(dictPtr+4);
3112
3219
  bs->rep[2] = MEM_readLE32(dictPtr+8);
@@ -3119,19 +3226,19 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
3119
3226
  offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
3120
3227
  }
3121
3228
  /* All offset values <= dictContentSize + 128 KB must be representable */
3122
- CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
3229
+ FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
3123
3230
  /* All repCodes must be <= dictContentSize and != 0*/
3124
3231
  { U32 u;
3125
3232
  for (u=0; u<3; u++) {
3126
- if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
3127
- if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
3233
+ RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);
3234
+ RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);
3128
3235
  } }
3129
3236
 
3130
3237
  bs->entropy.huf.repeatMode = HUF_repeat_valid;
3131
3238
  bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
3132
3239
  bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
3133
3240
  bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
3134
- CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
3241
+ FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
3135
3242
  return dictID;
3136
3243
  }
3137
3244
  }
@@ -3161,8 +3268,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
3161
3268
  DEBUGLOG(4, "raw content dictionary detected");
3162
3269
  return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
3163
3270
  }
3164
- if (dictContentType == ZSTD_dct_fullDict)
3165
- return ERROR(dictionary_wrong);
3271
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
3166
3272
  assert(0); /* impossible */
3167
3273
  }
3168
3274
 
@@ -3189,13 +3295,13 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
3189
3295
  return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
3190
3296
  }
3191
3297
 
3192
- CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
3298
+ FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
3193
3299
  ZSTDcrp_continue, zbuff) );
3194
3300
  {
3195
3301
  size_t const dictID = ZSTD_compress_insertDictionary(
3196
3302
  cctx->blockState.prevCBlock, &cctx->blockState.matchState,
3197
3303
  &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
3198
- if (ZSTD_isError(dictID)) return dictID;
3304
+ FORWARD_IF_ERROR(dictID);
3199
3305
  assert(dictID <= (size_t)(U32)-1);
3200
3306
  cctx->dictID = (U32)dictID;
3201
3307
  }
@@ -3212,7 +3318,7 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
3212
3318
  {
3213
3319
  DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
3214
3320
  /* compression parameters verification and optimization */
3215
- CHECK_F( ZSTD_checkCParams(params.cParams) );
3321
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
3216
3322
  return ZSTD_compressBegin_internal(cctx,
3217
3323
  dict, dictSize, dictContentType, dtlm,
3218
3324
  cdict,
@@ -3260,12 +3366,12 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
3260
3366
  size_t fhSize = 0;
3261
3367
 
3262
3368
  DEBUGLOG(4, "ZSTD_writeEpilogue");
3263
- if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */
3369
+ RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
3264
3370
 
3265
3371
  /* special case : empty frame */
3266
3372
  if (cctx->stage == ZSTDcs_init) {
3267
3373
  fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
3268
- if (ZSTD_isError(fhSize)) return fhSize;
3374
+ FORWARD_IF_ERROR(fhSize);
3269
3375
  dstCapacity -= fhSize;
3270
3376
  op += fhSize;
3271
3377
  cctx->stage = ZSTDcs_ongoing;
@@ -3274,7 +3380,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
3274
3380
  if (cctx->stage != ZSTDcs_ending) {
3275
3381
  /* write one last empty block, make it the "last" block */
3276
3382
  U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
3277
- if (dstCapacity<4) return ERROR(dstSize_tooSmall);
3383
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
3278
3384
  MEM_writeLE32(op, cBlockHeader24);
3279
3385
  op += ZSTD_blockHeaderSize;
3280
3386
  dstCapacity -= ZSTD_blockHeaderSize;
@@ -3282,7 +3388,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
3282
3388
 
3283
3389
  if (cctx->appliedParams.fParams.checksumFlag) {
3284
3390
  U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
3285
- if (dstCapacity<4) return ERROR(dstSize_tooSmall);
3391
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
3286
3392
  DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
3287
3393
  MEM_writeLE32(op, checksum);
3288
3394
  op += 4;
@@ -3300,18 +3406,20 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
3300
3406
  size_t const cSize = ZSTD_compressContinue_internal(cctx,
3301
3407
  dst, dstCapacity, src, srcSize,
3302
3408
  1 /* frame mode */, 1 /* last chunk */);
3303
- if (ZSTD_isError(cSize)) return cSize;
3409
+ FORWARD_IF_ERROR(cSize);
3304
3410
  endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
3305
- if (ZSTD_isError(endResult)) return endResult;
3411
+ FORWARD_IF_ERROR(endResult);
3306
3412
  assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
3307
3413
  if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
3308
3414
  ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
3309
3415
  DEBUGLOG(4, "end of frame : controlling src size");
3310
- if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
3311
- DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
3312
- (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
3313
- return ERROR(srcSize_wrong);
3314
- } }
3416
+ RETURN_ERROR_IF(
3417
+ cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
3418
+ srcSize_wrong,
3419
+ "error : pledgedSrcSize = %u, while realSrcSize = %u",
3420
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
3421
+ (unsigned)cctx->consumedSrcSize);
3422
+ }
3315
3423
  return cSize + endResult;
3316
3424
  }
3317
3425
 
@@ -3339,7 +3447,7 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
3339
3447
  ZSTD_parameters params)
3340
3448
  {
3341
3449
  DEBUGLOG(4, "ZSTD_compress_advanced");
3342
- CHECK_F(ZSTD_checkCParams(params.cParams));
3450
+ FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));
3343
3451
  return ZSTD_compress_internal(cctx,
3344
3452
  dst, dstCapacity,
3345
3453
  src, srcSize,
@@ -3356,7 +3464,7 @@ size_t ZSTD_compress_advanced_internal(
3356
3464
  ZSTD_CCtx_params params)
3357
3465
  {
3358
3466
  DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
3359
- CHECK_F( ZSTD_compressBegin_internal(cctx,
3467
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
3360
3468
  dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
3361
3469
  params, srcSize, ZSTDb_not_buffered) );
3362
3470
  return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
@@ -3440,7 +3548,7 @@ static size_t ZSTD_initCDict_internal(
3440
3548
  void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
3441
3549
  cdict->dictBuffer = internalBuffer;
3442
3550
  cdict->dictContent = internalBuffer;
3443
- if (!internalBuffer) return ERROR(memory_allocation);
3551
+ RETURN_ERROR_IF(!internalBuffer, memory_allocation);
3444
3552
  memcpy(internalBuffer, dictBuffer, dictSize);
3445
3553
  }
3446
3554
  cdict->dictContentSize = dictSize;
@@ -3466,7 +3574,7 @@ static size_t ZSTD_initCDict_internal(
3466
3574
  &cdict->cBlockState, &cdict->matchState, &params,
3467
3575
  cdict->dictContent, cdict->dictContentSize,
3468
3576
  dictContentType, ZSTD_dtlm_full, cdict->workspace);
3469
- if (ZSTD_isError(dictID)) return dictID;
3577
+ FORWARD_IF_ERROR(dictID);
3470
3578
  assert(dictID <= (size_t)(U32)-1);
3471
3579
  cdict->dictID = (U32)dictID;
3472
3580
  }
@@ -3596,7 +3704,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
3596
3704
  ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
3597
3705
  {
3598
3706
  DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
3599
- if (cdict==NULL) return ERROR(dictionary_wrong);
3707
+ RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
3600
3708
  { ZSTD_CCtx_params params = cctx->requestedParams;
3601
3709
  params.cParams = ZSTD_getCParamsFromCDict(cdict);
3602
3710
  /* Increase window log to fit the entire dictionary and source if the
@@ -3632,7 +3740,7 @@ size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
3632
3740
  const void* src, size_t srcSize,
3633
3741
  const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
3634
3742
  {
3635
- CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */
3743
+ FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */
3636
3744
  return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
3637
3745
  }
3638
3746
 
@@ -3700,7 +3808,7 @@ static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
3700
3808
  assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3701
3809
  assert(!((dict) && (cdict))); /* either dict or cdict, not both */
3702
3810
 
3703
- CHECK_F( ZSTD_compressBegin_internal(cctx,
3811
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
3704
3812
  dict, dictSize, dictContentType, ZSTD_dtlm_fast,
3705
3813
  cdict,
3706
3814
  params, pledgedSrcSize,
@@ -3718,13 +3826,17 @@ static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
3718
3826
 
3719
3827
  /* ZSTD_resetCStream():
3720
3828
  * pledgedSrcSize == 0 means "unknown" */
3721
- size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
3829
+ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
3722
3830
  {
3723
- ZSTD_CCtx_params params = zcs->requestedParams;
3831
+ /* temporary : 0 interpreted as "unknown" during transition period.
3832
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
3833
+ * 0 will be interpreted as "empty" in the future.
3834
+ */
3835
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
3724
3836
  DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
3725
- if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
3726
- params.fParams.contentSizeFlag = 1;
3727
- return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
3837
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3838
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3839
+ return 0;
3728
3840
  }
3729
3841
 
3730
3842
  /*! ZSTD_initCStream_internal() :
@@ -3736,32 +3848,18 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
3736
3848
  ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
3737
3849
  {
3738
3850
  DEBUGLOG(4, "ZSTD_initCStream_internal");
3739
- params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
3851
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3852
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3740
3853
  assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3854
+ zcs->requestedParams = params;
3741
3855
  assert(!((dict) && (cdict))); /* either dict or cdict, not both */
3742
-
3743
- if (dict && dictSize >= 8) {
3744
- DEBUGLOG(4, "loading dictionary of size %u", (unsigned)dictSize);
3745
- if (zcs->staticSize) { /* static CCtx : never uses malloc */
3746
- /* incompatible with internal cdict creation */
3747
- return ERROR(memory_allocation);
3748
- }
3749
- ZSTD_freeCDict(zcs->cdictLocal);
3750
- zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
3751
- ZSTD_dlm_byCopy, ZSTD_dct_auto,
3752
- params.cParams, zcs->customMem);
3753
- zcs->cdict = zcs->cdictLocal;
3754
- if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
3856
+ if (dict) {
3857
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
3755
3858
  } else {
3756
- if (cdict) {
3757
- params.cParams = ZSTD_getCParamsFromCDict(cdict); /* cParams are enforced from cdict; it includes windowLog */
3758
- }
3759
- ZSTD_freeCDict(zcs->cdictLocal);
3760
- zcs->cdictLocal = NULL;
3761
- zcs->cdict = cdict;
3859
+ /* Dictionary is cleared if !cdict */
3860
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
3762
3861
  }
3763
-
3764
- return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
3862
+ return 0;
3765
3863
  }
3766
3864
 
3767
3865
  /* ZSTD_initCStream_usingCDict_advanced() :
@@ -3772,22 +3870,20 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
3772
3870
  unsigned long long pledgedSrcSize)
3773
3871
  {
3774
3872
  DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
3775
- if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
3776
- { ZSTD_CCtx_params params = zcs->requestedParams;
3777
- params.cParams = ZSTD_getCParamsFromCDict(cdict);
3778
- params.fParams = fParams;
3779
- return ZSTD_initCStream_internal(zcs,
3780
- NULL, 0, cdict,
3781
- params, pledgedSrcSize);
3782
- }
3873
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3874
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3875
+ zcs->requestedParams.fParams = fParams;
3876
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
3877
+ return 0;
3783
3878
  }
3784
3879
 
3785
3880
  /* note : cdict must outlive compression session */
3786
3881
  size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
3787
3882
  {
3788
- ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
3789
3883
  DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
3790
- return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); /* note : will check that cdict != NULL */
3884
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3885
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
3886
+ return 0;
3791
3887
  }
3792
3888
 
3793
3889
 
@@ -3797,33 +3893,53 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
3797
3893
  * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
3798
3894
  size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
3799
3895
  const void* dict, size_t dictSize,
3800
- ZSTD_parameters params, unsigned long long pledgedSrcSize)
3896
+ ZSTD_parameters params, unsigned long long pss)
3801
3897
  {
3802
- DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
3803
- (unsigned)pledgedSrcSize, params.fParams.contentSizeFlag);
3804
- CHECK_F( ZSTD_checkCParams(params.cParams) );
3805
- if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
3898
+ /* for compatibility with older programs relying on this behavior.
3899
+ * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
3900
+ * This line will be removed in the future.
3901
+ */
3902
+ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
3903
+ DEBUGLOG(4, "ZSTD_initCStream_advanced");
3904
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3905
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3906
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
3806
3907
  zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
3807
- return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize);
3908
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
3909
+ return 0;
3808
3910
  }
3809
3911
 
3810
3912
  size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
3811
3913
  {
3812
- ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
3813
- return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, zcs->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN);
3914
+ DEBUGLOG(4, "ZSTD_initCStream_usingDict");
3915
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3916
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
3917
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
3918
+ return 0;
3814
3919
  }
3815
3920
 
3816
3921
  size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
3817
3922
  {
3818
- U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
3819
- ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
3820
- return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, zcs->requestedParams, pledgedSrcSize);
3923
+ /* temporary : 0 interpreted as "unknown" during transition period.
3924
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
3925
+ * 0 will be interpreted as "empty" in the future.
3926
+ */
3927
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
3928
+ DEBUGLOG(4, "ZSTD_initCStream_srcSize");
3929
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3930
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
3931
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
3932
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3933
+ return 0;
3821
3934
  }
3822
3935
 
3823
3936
  size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
3824
3937
  {
3825
3938
  DEBUGLOG(4, "ZSTD_initCStream");
3826
- return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
3939
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3940
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
3941
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
3942
+ return 0;
3827
3943
  }
3828
3944
 
3829
3945
  /*====== Compression ======*/
@@ -3847,10 +3963,10 @@ static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
3847
3963
  * internal function for all *compressStream*() variants
3848
3964
  * non-static, because can be called from zstdmt_compress.c
3849
3965
  * @return : hint size for next input */
3850
- size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3851
- ZSTD_outBuffer* output,
3852
- ZSTD_inBuffer* input,
3853
- ZSTD_EndDirective const flushMode)
3966
+ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3967
+ ZSTD_outBuffer* output,
3968
+ ZSTD_inBuffer* input,
3969
+ ZSTD_EndDirective const flushMode)
3854
3970
  {
3855
3971
  const char* const istart = (const char*)input->src;
3856
3972
  const char* const iend = istart + input->size;
@@ -3873,8 +3989,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3873
3989
  switch(zcs->streamStage)
3874
3990
  {
3875
3991
  case zcss_init:
3876
- /* call ZSTD_initCStream() first ! */
3877
- return ERROR(init_missing);
3992
+ RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
3878
3993
 
3879
3994
  case zcss_load:
3880
3995
  if ( (flushMode == ZSTD_e_end)
@@ -3884,7 +3999,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3884
3999
  size_t const cSize = ZSTD_compressEnd(zcs,
3885
4000
  op, oend-op, ip, iend-ip);
3886
4001
  DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
3887
- if (ZSTD_isError(cSize)) return cSize;
4002
+ FORWARD_IF_ERROR(cSize);
3888
4003
  ip = iend;
3889
4004
  op += cSize;
3890
4005
  zcs->frameEnded = 1;
@@ -3925,7 +4040,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3925
4040
  zcs->inBuff + zcs->inToCompress, iSize) :
3926
4041
  ZSTD_compressContinue(zcs, cDst, oSize,
3927
4042
  zcs->inBuff + zcs->inToCompress, iSize);
3928
- if (ZSTD_isError(cSize)) return cSize;
4043
+ FORWARD_IF_ERROR(cSize);
3929
4044
  zcs->frameEnded = lastBlock;
3930
4045
  /* prepare next block */
3931
4046
  zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
@@ -4001,7 +4116,7 @@ static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
4001
4116
 
4002
4117
  size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
4003
4118
  {
4004
- CHECK_F( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
4119
+ FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
4005
4120
  return ZSTD_nextInputSizeHint_MTorST(zcs);
4006
4121
  }
4007
4122
 
@@ -4013,14 +4128,15 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
4013
4128
  {
4014
4129
  DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
4015
4130
  /* check conditions */
4016
- if (output->pos > output->size) return ERROR(GENERIC);
4017
- if (input->pos > input->size) return ERROR(GENERIC);
4131
+ RETURN_ERROR_IF(output->pos > output->size, GENERIC);
4132
+ RETURN_ERROR_IF(input->pos > input->size, GENERIC);
4018
4133
  assert(cctx!=NULL);
4019
4134
 
4020
4135
  /* transparent initialization stage */
4021
4136
  if (cctx->streamStage == zcss_init) {
4022
4137
  ZSTD_CCtx_params params = cctx->requestedParams;
4023
4138
  ZSTD_prefixDict const prefixDict = cctx->prefixDict;
4139
+ FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */
4024
4140
  memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
4025
4141
  assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
4026
4142
  DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
@@ -4039,11 +4155,11 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
4039
4155
  DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
4040
4156
  params.nbWorkers);
4041
4157
  cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
4042
- if (cctx->mtctx == NULL) return ERROR(memory_allocation);
4158
+ RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
4043
4159
  }
4044
4160
  /* mt compression */
4045
4161
  DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
4046
- CHECK_F( ZSTDMT_initCStream_internal(
4162
+ FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
4047
4163
  cctx->mtctx,
4048
4164
  prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
4049
4165
  cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
@@ -4051,7 +4167,7 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
4051
4167
  cctx->appliedParams.nbWorkers = params.nbWorkers;
4052
4168
  } else
4053
4169
  #endif
4054
- { CHECK_F( ZSTD_resetCStream_internal(cctx,
4170
+ { FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
4055
4171
  prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
4056
4172
  cctx->cdict,
4057
4173
  params, cctx->pledgedSrcSizePlusOne-1) );
@@ -4063,20 +4179,30 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
4063
4179
  /* compression stage */
4064
4180
  #ifdef ZSTD_MULTITHREAD
4065
4181
  if (cctx->appliedParams.nbWorkers > 0) {
4182
+ int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
4183
+ size_t flushMin;
4184
+ assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
4066
4185
  if (cctx->cParamsChanged) {
4067
4186
  ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
4068
4187
  cctx->cParamsChanged = 0;
4069
4188
  }
4070
- { size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
4189
+ do {
4190
+ flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
4071
4191
  if ( ZSTD_isError(flushMin)
4072
4192
  || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
4073
4193
  ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
4074
4194
  }
4075
- DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
4076
- return flushMin;
4077
- } }
4195
+ FORWARD_IF_ERROR(flushMin);
4196
+ } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
4197
+ DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
4198
+ /* Either we don't require maximum forward progress, we've finished the
4199
+ * flush, or we are out of output space.
4200
+ */
4201
+ assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
4202
+ return flushMin;
4203
+ }
4078
4204
  #endif
4079
- CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
4205
+ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );
4080
4206
  DEBUGLOG(5, "completed ZSTD_compressStream2");
4081
4207
  return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
4082
4208
  }
@@ -4107,10 +4233,10 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
4107
4233
  dst, dstCapacity, &oPos,
4108
4234
  src, srcSize, &iPos,
4109
4235
  ZSTD_e_end);
4110
- if (ZSTD_isError(result)) return result;
4236
+ FORWARD_IF_ERROR(result);
4111
4237
  if (result != 0) { /* compression not completed, due to lack of output space */
4112
4238
  assert(oPos == dstCapacity);
4113
- return ERROR(dstSize_tooSmall);
4239
+ RETURN_ERROR(dstSize_tooSmall);
4114
4240
  }
4115
4241
  assert(iPos == srcSize); /* all input is expected consumed */
4116
4242
  return oPos;
@@ -4132,7 +4258,7 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
4132
4258
  {
4133
4259
  ZSTD_inBuffer input = { NULL, 0, 0 };
4134
4260
  size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
4135
- CHECK_F( remainingToFlush );
4261
+ FORWARD_IF_ERROR( remainingToFlush );
4136
4262
  if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
4137
4263
  /* single thread mode : attempt to calculate remaining to flush more precisely */
4138
4264
  { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
@@ -4151,7 +4277,7 @@ int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
4151
4277
  int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
4152
4278
 
4153
4279
  static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
4154
- { /* "default" - guarantees a monotonically increasing memory budget */
4280
+ { /* "default" - for any srcSize > 256 KB */
4155
4281
  /* W, C, H, S, L, TL, strat */
4156
4282
  { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
4157
4283
  { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
@@ -4258,13 +4384,13 @@ static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEV
4258
4384
  };
4259
4385
 
4260
4386
  /*! ZSTD_getCParams() :
4261
- * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
4262
- * Size values are optional, provide 0 if not known or unused */
4387
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
4388
+ * Size values are optional, provide 0 if not known or unused */
4263
4389
  ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
4264
4390
  {
4265
4391
  size_t const addedSize = srcSizeHint ? 0 : 500;
4266
- U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
4267
- U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
4392
+ U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : ZSTD_CONTENTSIZE_UNKNOWN; /* intentional overflow for srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN */
4393
+ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
4268
4394
  int row = compressionLevel;
4269
4395
  DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
4270
4396
  if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
@@ -4272,13 +4398,14 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l
4272
4398
  if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
4273
4399
  { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
4274
4400
  if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */
4275
- return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);
4401
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); /* refine parameters based on srcSize & dictSize */
4276
4402
  }
4277
4403
  }
4278
4404
 
4279
4405
  /*! ZSTD_getParams() :
4280
- * same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
4281
- * All fields of `ZSTD_frameParameters` are set to default (0) */
4406
+ * same idea as ZSTD_getCParams()
4407
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
4408
+ * Fields of `ZSTD_frameParameters` are set to default values */
4282
4409
  ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
4283
4410
  ZSTD_parameters params;
4284
4411
  ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);