extlz4 0.3.3 → 0.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +1 -1
  3. data/Rakefile +43 -3
  4. data/contrib/lz4/CODING_STYLE +57 -0
  5. data/contrib/lz4/LICENSE +3 -2
  6. data/contrib/lz4/Makefile.inc +56 -30
  7. data/contrib/lz4/NEWS +46 -0
  8. data/contrib/lz4/README.md +17 -6
  9. data/contrib/lz4/SECURITY.md +17 -0
  10. data/contrib/lz4/build/README.md +4 -15
  11. data/contrib/lz4/build/VS2022/_build.bat +39 -0
  12. data/contrib/lz4/build/VS2022/_setup.bat +35 -0
  13. data/contrib/lz4/build/VS2022/_test.bat +38 -0
  14. data/contrib/lz4/build/VS2022/build-and-test-win32-debug.bat +26 -0
  15. data/contrib/lz4/build/VS2022/build-and-test-win32-release.bat +26 -0
  16. data/contrib/lz4/build/VS2022/build-and-test-x64-debug.bat +26 -0
  17. data/contrib/lz4/build/VS2022/build-and-test-x64-release.bat +26 -0
  18. data/contrib/lz4/build/{VS2017 → VS2022}/datagen/datagen.vcxproj +11 -7
  19. data/contrib/lz4/build/{VS2017 → VS2022}/frametest/frametest.vcxproj +4 -4
  20. data/contrib/lz4/build/{VS2017 → VS2022}/fullbench/fullbench.vcxproj +4 -4
  21. data/contrib/lz4/build/{VS2017 → VS2022}/fullbench-dll/fullbench-dll.vcxproj +4 -4
  22. data/contrib/lz4/build/{VS2017 → VS2022}/fuzzer/fuzzer.vcxproj +4 -4
  23. data/contrib/lz4/build/{VS2017 → VS2022}/liblz4/liblz4.vcxproj +4 -4
  24. data/contrib/lz4/build/{VS2010 → VS2022}/liblz4-dll/liblz4-dll.rc +1 -1
  25. data/contrib/lz4/build/{VS2017 → VS2022}/liblz4-dll/liblz4-dll.vcxproj +4 -4
  26. data/contrib/lz4/build/{VS2010 → VS2022}/lz4/lz4.rc +1 -1
  27. data/contrib/lz4/build/{VS2017 → VS2022}/lz4/lz4.vcxproj +33 -8
  28. data/contrib/lz4/build/{VS2017 → VS2022}/lz4.sln +5 -2
  29. data/contrib/lz4/build/cmake/CMakeLists.txt +133 -100
  30. data/contrib/lz4/build/cmake/lz4Config.cmake.in +2 -0
  31. data/contrib/lz4/build/meson/GetLz4LibraryVersion.py +39 -0
  32. data/contrib/lz4/build/meson/README.md +34 -0
  33. data/contrib/lz4/build/meson/meson/contrib/gen_manual/meson.build +42 -0
  34. data/contrib/lz4/build/meson/meson/contrib/meson.build +11 -0
  35. data/contrib/lz4/build/meson/meson/examples/meson.build +32 -0
  36. data/contrib/lz4/build/meson/meson/lib/meson.build +87 -0
  37. data/contrib/lz4/build/meson/meson/meson.build +135 -0
  38. data/contrib/lz4/build/meson/meson/ossfuzz/meson.build +35 -0
  39. data/contrib/lz4/build/meson/meson/programs/meson.build +91 -0
  40. data/contrib/lz4/build/meson/meson/tests/meson.build +162 -0
  41. data/contrib/lz4/build/meson/meson.build +31 -0
  42. data/contrib/lz4/build/meson/meson_options.txt +44 -0
  43. data/contrib/lz4/build/visual/README.md +5 -0
  44. data/contrib/lz4/build/visual/generate_solution.cmd +55 -0
  45. data/contrib/lz4/build/visual/generate_vs2015.cmd +3 -0
  46. data/contrib/lz4/build/visual/generate_vs2017.cmd +3 -0
  47. data/contrib/lz4/build/visual/generate_vs2019.cmd +3 -0
  48. data/contrib/lz4/build/visual/generate_vs2022.cmd +3 -0
  49. data/contrib/lz4/lib/LICENSE +1 -1
  50. data/contrib/lz4/lib/README.md +69 -13
  51. data/contrib/lz4/lib/liblz4-dll.rc.in +1 -1
  52. data/contrib/lz4/lib/liblz4.pc.in +3 -3
  53. data/contrib/lz4/lib/lz4.c +608 -274
  54. data/contrib/lz4/lib/lz4.h +212 -102
  55. data/contrib/lz4/lib/lz4file.c +341 -0
  56. data/contrib/lz4/lib/lz4file.h +93 -0
  57. data/contrib/lz4/lib/lz4frame.c +545 -308
  58. data/contrib/lz4/lib/lz4frame.h +252 -124
  59. data/contrib/lz4/lib/lz4frame_static.h +1 -1
  60. data/contrib/lz4/lib/lz4hc.c +1038 -461
  61. data/contrib/lz4/lib/lz4hc.h +57 -56
  62. data/contrib/lz4/lib/xxhash.c +21 -21
  63. data/contrib/lz4/ossfuzz/Makefile +1 -0
  64. data/contrib/lz4/ossfuzz/decompress_fuzzer.c +18 -2
  65. data/contrib/lz4/ossfuzz/fuzz_helpers.h +4 -3
  66. data/contrib/lz4/ossfuzz/round_trip_frame_uncompressed_fuzzer.c +134 -0
  67. data/contrib/lz4/ossfuzz/round_trip_fuzzer.c +66 -6
  68. data/ext/blockapi.c +19 -19
  69. data/ext/extlz4.h +12 -0
  70. data/ext/frameapi.c +26 -26
  71. data/ext/hashargs.c +7 -1
  72. metadata +47 -30
  73. data/contrib/lz4/build/VS2010/datagen/datagen.vcxproj +0 -169
  74. data/contrib/lz4/build/VS2010/frametest/frametest.vcxproj +0 -176
  75. data/contrib/lz4/build/VS2010/fullbench/fullbench.vcxproj +0 -176
  76. data/contrib/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +0 -180
  77. data/contrib/lz4/build/VS2010/fuzzer/fuzzer.vcxproj +0 -173
  78. data/contrib/lz4/build/VS2010/liblz4/liblz4.vcxproj +0 -175
  79. data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj +0 -179
  80. data/contrib/lz4/build/VS2010/lz4/lz4.vcxproj +0 -189
  81. data/contrib/lz4/build/VS2010/lz4.sln +0 -98
  82. data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc +0 -51
  83. data/contrib/lz4/build/VS2017/lz4/lz4.rc +0 -51
  84. data/contrib/lz4/tmp +0 -0
  85. data/contrib/lz4/tmpsparse +0 -0
@@ -44,8 +44,9 @@
44
44
  /*-************************************
45
45
  * Compiler Options
46
46
  **************************************/
47
+ #include <limits.h>
47
48
  #ifdef _MSC_VER /* Visual Studio */
48
- # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
49
+ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
49
50
  #endif
50
51
 
51
52
 
@@ -54,14 +55,27 @@
54
55
  **************************************/
55
56
  /*
56
57
  * LZ4F_HEAPMODE :
57
- * Select how default compression functions will allocate memory for their hash table,
58
- * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
58
+ * Control how LZ4F_compressFrame allocates the Compression State,
59
+ * either on stack (0:default, fastest), or in memory heap (1:requires malloc()).
59
60
  */
60
61
  #ifndef LZ4F_HEAPMODE
61
62
  # define LZ4F_HEAPMODE 0
62
63
  #endif
63
64
 
64
65
 
66
+ /*-************************************
67
+ * Library declarations
68
+ **************************************/
69
+ #define LZ4F_STATIC_LINKING_ONLY
70
+ #include "lz4frame.h"
71
+ #define LZ4_STATIC_LINKING_ONLY
72
+ #include "lz4.h"
73
+ #define LZ4_HC_STATIC_LINKING_ONLY
74
+ #include "lz4hc.h"
75
+ #define XXH_STATIC_LINKING_ONLY
76
+ #include "xxhash.h"
77
+
78
+
65
79
  /*-************************************
66
80
  * Memory routines
67
81
  **************************************/
@@ -70,7 +84,13 @@
70
84
  * malloc(), calloc() and free()
71
85
  * towards another library or solution of their choice
72
86
  * by modifying below section.
73
- */
87
+ **/
88
+
89
+ #include <string.h> /* memset, memcpy, memmove */
90
+ #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
91
+ # define MEM_INIT(p,v,s) memset((p),(v),(s))
92
+ #endif
93
+
74
94
  #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
75
95
  # include <stdlib.h> /* malloc, calloc, free */
76
96
  # define ALLOC(s) malloc(s)
@@ -78,23 +98,43 @@
78
98
  # define FREEMEM(p) free(p)
79
99
  #endif
80
100
 
81
- #include <string.h> /* memset, memcpy, memmove */
82
- #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
83
- # define MEM_INIT(p,v,s) memset((p),(v),(s))
84
- #endif
101
+ static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem)
102
+ {
103
+ /* custom calloc defined : use it */
104
+ if (cmem.customCalloc != NULL) {
105
+ return cmem.customCalloc(cmem.opaqueState, s);
106
+ }
107
+ /* nothing defined : use default <stdlib.h>'s calloc() */
108
+ if (cmem.customAlloc == NULL) {
109
+ return ALLOC_AND_ZERO(s);
110
+ }
111
+ /* only custom alloc defined : use it, and combine it with memset() */
112
+ { void* const p = cmem.customAlloc(cmem.opaqueState, s);
113
+ if (p != NULL) MEM_INIT(p, 0, s);
114
+ return p;
115
+ } }
85
116
 
117
+ static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
118
+ {
119
+ /* custom malloc defined : use it */
120
+ if (cmem.customAlloc != NULL) {
121
+ return cmem.customAlloc(cmem.opaqueState, s);
122
+ }
123
+ /* nothing defined : use default <stdlib.h>'s malloc() */
124
+ return ALLOC(s);
125
+ }
86
126
 
87
- /*-************************************
88
- * Library declarations
89
- **************************************/
90
- #define LZ4F_STATIC_LINKING_ONLY
91
- #include "lz4frame.h"
92
- #define LZ4_STATIC_LINKING_ONLY
93
- #include "lz4.h"
94
- #define LZ4_HC_STATIC_LINKING_ONLY
95
- #include "lz4hc.h"
96
- #define XXH_STATIC_LINKING_ONLY
97
- #include "xxhash.h"
127
+ static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
128
+ {
129
+ if (p == NULL) return;
130
+ if (cmem.customFree != NULL) {
131
+ /* custom allocation defined : use it */
132
+ cmem.customFree(cmem.opaqueState, p);
133
+ return;
134
+ }
135
+ /* nothing defined : use default <stdlib.h>'s free() */
136
+ FREEMEM(p);
137
+ }
98
138
 
99
139
 
100
140
  /*-************************************
@@ -115,7 +155,7 @@
115
155
  static int g_debuglog_enable = 1;
116
156
  # define DEBUGLOG(l, ...) { \
117
157
  if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
118
- fprintf(stderr, __FILE__ ": "); \
158
+ fprintf(stderr, __FILE__ " (%i): ", __LINE__ ); \
119
159
  fprintf(stderr, __VA_ARGS__); \
120
160
  fprintf(stderr, " \n"); \
121
161
  } }
@@ -143,14 +183,14 @@ static int g_debuglog_enable = 1;
143
183
  #endif
144
184
 
145
185
 
146
- /* unoptimized version; solves endianess & alignment issues */
186
+ /* unoptimized version; solves endianness & alignment issues */
147
187
  static U32 LZ4F_readLE32 (const void* src)
148
188
  {
149
189
  const BYTE* const srcPtr = (const BYTE*)src;
150
190
  U32 value32 = srcPtr[0];
151
- value32 += ((U32)srcPtr[1])<< 8;
152
- value32 += ((U32)srcPtr[2])<<16;
153
- value32 += ((U32)srcPtr[3])<<24;
191
+ value32 |= ((U32)srcPtr[1])<< 8;
192
+ value32 |= ((U32)srcPtr[2])<<16;
193
+ value32 |= ((U32)srcPtr[3])<<24;
154
194
  return value32;
155
195
  }
156
196
 
@@ -167,13 +207,13 @@ static U64 LZ4F_readLE64 (const void* src)
167
207
  {
168
208
  const BYTE* const srcPtr = (const BYTE*)src;
169
209
  U64 value64 = srcPtr[0];
170
- value64 += ((U64)srcPtr[1]<<8);
171
- value64 += ((U64)srcPtr[2]<<16);
172
- value64 += ((U64)srcPtr[3]<<24);
173
- value64 += ((U64)srcPtr[4]<<32);
174
- value64 += ((U64)srcPtr[5]<<40);
175
- value64 += ((U64)srcPtr[6]<<48);
176
- value64 += ((U64)srcPtr[7]<<56);
210
+ value64 |= ((U64)srcPtr[1]<<8);
211
+ value64 |= ((U64)srcPtr[2]<<16);
212
+ value64 |= ((U64)srcPtr[3]<<24);
213
+ value64 |= ((U64)srcPtr[4]<<32);
214
+ value64 |= ((U64)srcPtr[5]<<40);
215
+ value64 |= ((U64)srcPtr[6]<<48);
216
+ value64 |= ((U64)srcPtr[7]<<56);
177
217
  return value64;
178
218
  }
179
219
 
@@ -206,8 +246,6 @@ static void LZ4F_writeLE64 (void* dst, U64 value64)
206
246
  #define _4BITS 0x0F
207
247
  #define _8BITS 0xFF
208
248
 
209
- #define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
210
- #define LZ4F_MAGICNUMBER 0x184D2204U
211
249
  #define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
212
250
  #define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
213
251
 
@@ -220,22 +258,28 @@ static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checks
220
258
  /*-************************************
221
259
  * Structures and local types
222
260
  **************************************/
261
+
262
+ typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_BlockCompressMode_e;
263
+ typedef enum { ctxNone, ctxFast, ctxHC } LZ4F_CtxType_e;
264
+
223
265
  typedef struct LZ4F_cctx_s
224
266
  {
267
+ LZ4F_CustomMem cmem;
225
268
  LZ4F_preferences_t prefs;
226
269
  U32 version;
227
- U32 cStage;
270
+ U32 cStage; /* 0 : compression uninitialized ; 1 : initialized, can compress */
228
271
  const LZ4F_CDict* cdict;
229
272
  size_t maxBlockSize;
230
273
  size_t maxBufferSize;
231
- BYTE* tmpBuff;
232
- BYTE* tmpIn;
233
- size_t tmpInSize;
274
+ BYTE* tmpBuff; /* internal buffer, for streaming */
275
+ BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */
276
+ size_t tmpInSize; /* amount of data to compress after tmpIn */
234
277
  U64 totalInSize;
235
278
  XXH32_state_t xxh;
236
279
  void* lz4CtxPtr;
237
280
  U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
238
- U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
281
+ U16 lz4CtxType; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
282
+ LZ4F_BlockCompressMode_e blockCompressMode;
239
283
  } LZ4F_cctx_t;
240
284
 
241
285
 
@@ -264,27 +308,38 @@ LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
264
308
  return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
265
309
  }
266
310
 
267
- static LZ4F_errorCode_t err0r(LZ4F_errorCodes code)
311
+ static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
268
312
  {
269
313
  /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
270
314
  LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
271
315
  return (LZ4F_errorCode_t)-(ptrdiff_t)code;
272
316
  }
273
317
 
318
+ #define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
319
+
320
+ #define RETURN_ERROR_IF(c,e) do { \
321
+ if (c) { \
322
+ DEBUGLOG(3, "Error: " #c); \
323
+ RETURN_ERROR(e); \
324
+ } \
325
+ } while (0)
326
+
327
+ #define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0)
328
+
274
329
  unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
275
330
 
276
331
  int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; }
277
332
 
278
- size_t LZ4F_getBlockSize(unsigned blockSizeID)
333
+ size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID)
279
334
  {
280
335
  static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
281
336
 
282
337
  if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
283
338
  if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
284
- return err0r(LZ4F_ERROR_maxBlockSize_invalid);
285
- blockSizeID -= LZ4F_max64KB;
286
- return blockSizes[blockSizeID];
287
- }
339
+ RETURN_ERROR(maxBlockSize_invalid);
340
+ { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB;
341
+ return blockSizes[blockSizeIdx];
342
+ } }
288
343
 
289
344
  /*-************************************
290
345
  * Private functions
@@ -382,6 +437,7 @@ size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
382
437
  BYTE* dstPtr = dstStart;
383
438
  BYTE* const dstEnd = dstStart + dstCapacity;
384
439
 
440
+ DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize);
385
441
  if (preferencesPtr!=NULL)
386
442
  prefs = *preferencesPtr;
387
443
  else
@@ -397,21 +453,20 @@ size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
397
453
  MEM_INIT(&options, 0, sizeof(options));
398
454
  options.stableSrc = 1;
399
455
 
400
- if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs)) /* condition to guarantee success */
401
- return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
456
+ RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall);
402
457
 
403
458
  { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
404
- if (LZ4F_isError(headerSize)) return headerSize;
459
+ FORWARD_IF_ERROR(headerSize);
405
460
  dstPtr += headerSize; /* header size */ }
406
461
 
407
462
  assert(dstEnd >= dstPtr);
408
463
  { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
409
- if (LZ4F_isError(cSize)) return cSize;
464
+ FORWARD_IF_ERROR(cSize);
410
465
  dstPtr += cSize; }
411
466
 
412
467
  assert(dstEnd >= dstPtr);
413
468
  { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
414
- if (LZ4F_isError(tailSize)) return tailSize;
469
+ FORWARD_IF_ERROR(tailSize);
415
470
  dstPtr += tailSize; }
416
471
 
417
472
  assert(dstEnd >= dstStart);
@@ -432,27 +487,26 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
432
487
  {
433
488
  size_t result;
434
489
  #if (LZ4F_HEAPMODE)
435
- LZ4F_cctx_t *cctxPtr;
490
+ LZ4F_cctx_t* cctxPtr;
436
491
  result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION);
437
- if (LZ4F_isError(result)) return result;
492
+ FORWARD_IF_ERROR(result);
438
493
  #else
439
494
  LZ4F_cctx_t cctx;
440
495
  LZ4_stream_t lz4ctx;
441
- LZ4F_cctx_t *cctxPtr = &cctx;
496
+ LZ4F_cctx_t* const cctxPtr = &cctx;
442
497
 
443
- DEBUGLOG(4, "LZ4F_compressFrame");
444
498
  MEM_INIT(&cctx, 0, sizeof(cctx));
445
499
  cctx.version = LZ4F_VERSION;
446
500
  cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
447
- if (preferencesPtr == NULL ||
448
- preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN)
449
- {
501
+ if ( preferencesPtr == NULL
502
+ || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) {
450
503
  LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
451
504
  cctxPtr->lz4CtxPtr = &lz4ctx;
452
505
  cctxPtr->lz4CtxAlloc = 1;
453
- cctxPtr->lz4CtxState = 1;
506
+ cctxPtr->lz4CtxType = ctxFast;
454
507
  }
455
508
  #endif
509
+ DEBUGLOG(4, "LZ4F_compressFrame");
456
510
 
457
511
  result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity,
458
512
  srcBuffer, srcSize,
@@ -461,10 +515,9 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
461
515
  #if (LZ4F_HEAPMODE)
462
516
  LZ4F_freeCompressionContext(cctxPtr);
463
517
  #else
464
- if (preferencesPtr != NULL &&
465
- preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN)
466
- {
467
- FREEMEM(cctxPtr->lz4CtxPtr);
518
+ if ( preferencesPtr != NULL
519
+ && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) {
520
+ LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
468
521
  }
469
522
  #endif
470
523
  return result;
@@ -476,48 +529,62 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
476
529
  *****************************************************/
477
530
 
478
531
  struct LZ4F_CDict_s {
532
+ LZ4F_CustomMem cmem;
479
533
  void* dictContent;
480
534
  LZ4_stream_t* fastCtx;
481
535
  LZ4_streamHC_t* HCCtx;
482
536
  }; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
483
537
 
484
- /*! LZ4F_createCDict() :
485
- * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
486
- * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
487
- * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
488
- * `dictBuffer` can be released after LZ4F_CDict creation, since its content is copied within CDict
489
- * @return : digested dictionary for compression, or NULL if failed */
490
- LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
538
+ LZ4F_CDict*
539
+ LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize)
491
540
  {
492
541
  const char* dictStart = (const char*)dictBuffer;
493
- LZ4F_CDict* cdict = (LZ4F_CDict*) ALLOC(sizeof(*cdict));
494
- DEBUGLOG(4, "LZ4F_createCDict");
542
+ LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem);
543
+ DEBUGLOG(4, "LZ4F_createCDict_advanced");
495
544
  if (!cdict) return NULL;
545
+ cdict->cmem = cmem;
496
546
  if (dictSize > 64 KB) {
497
547
  dictStart += dictSize - 64 KB;
498
548
  dictSize = 64 KB;
499
549
  }
500
- cdict->dictContent = ALLOC(dictSize);
501
- cdict->fastCtx = LZ4_createStream();
502
- cdict->HCCtx = LZ4_createStreamHC();
550
+ cdict->dictContent = LZ4F_malloc(dictSize, cmem);
551
+ /* note: using @cmem to allocate => can't use default create */
552
+ cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
553
+ cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
503
554
  if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
504
555
  LZ4F_freeCDict(cdict);
505
556
  return NULL;
506
557
  }
507
558
  memcpy(cdict->dictContent, dictStart, dictSize);
508
- LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
559
+ LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
560
+ LZ4_loadDictSlow(cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
561
+ LZ4_initStreamHC(cdict->HCCtx, sizeof(LZ4_streamHC_t));
562
+ /* note: we don't know at this point which compression level is going to be used
563
+ * as a consequence, HCCtx is created for the more common HC mode */
509
564
  LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
510
565
  LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
511
566
  return cdict;
512
567
  }
513
568
 
569
+ /*! LZ4F_createCDict() :
570
+ * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
571
+ * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
572
+ * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
573
+ * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
574
+ * @return : digested dictionary for compression, or NULL if failed */
575
+ LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
576
+ {
577
+ DEBUGLOG(4, "LZ4F_createCDict");
578
+ return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize);
579
+ }
580
+
514
581
  void LZ4F_freeCDict(LZ4F_CDict* cdict)
515
582
  {
516
583
  if (cdict==NULL) return; /* support free on NULL */
517
- FREEMEM(cdict->dictContent);
518
- LZ4_freeStream(cdict->fastCtx);
519
- LZ4_freeStreamHC(cdict->HCCtx);
520
- FREEMEM(cdict);
584
+ LZ4F_free(cdict->dictContent, cdict->cmem);
585
+ LZ4F_free(cdict->fastCtx, cdict->cmem);
586
+ LZ4F_free(cdict->HCCtx, cdict->cmem);
587
+ LZ4F_free(cdict, cdict->cmem);
521
588
  }
522
589
 
523
590
 
@@ -525,6 +592,20 @@ void LZ4F_freeCDict(LZ4F_CDict* cdict)
525
592
  * Advanced compression functions
526
593
  ***********************************/
527
594
 
595
+ LZ4F_cctx*
596
+ LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
597
+ {
598
+ LZ4F_cctx* const cctxPtr =
599
+ (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem);
600
+ if (cctxPtr==NULL) return NULL;
601
+
602
+ cctxPtr->cmem = customMem;
603
+ cctxPtr->version = version;
604
+ cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */
605
+
606
+ return cctxPtr;
607
+ }
608
+
528
609
  /*! LZ4F_createCompressionContext() :
529
610
  * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
530
611
  * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
@@ -532,29 +613,26 @@ void LZ4F_freeCDict(LZ4F_CDict* cdict)
532
613
  * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
533
614
  * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
534
615
  * Object can release its memory using LZ4F_freeCompressionContext();
535
- */
536
- LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
616
+ **/
617
+ LZ4F_errorCode_t
618
+ LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
537
619
  {
538
- LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOC_AND_ZERO(sizeof(LZ4F_cctx_t));
539
- if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed);
540
-
541
- cctxPtr->version = version;
542
- cctxPtr->cStage = 0; /* Next stage : init stream */
543
-
544
- *LZ4F_compressionContextPtr = cctxPtr;
620
+ assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */
621
+ /* in case it nonetheless happen in production */
622
+ RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null);
545
623
 
624
+ *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version);
625
+ RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed);
546
626
  return LZ4F_OK_NoError;
547
627
  }
548
628
 
549
-
550
629
  LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
551
630
  {
552
631
  if (cctxPtr != NULL) { /* support free on NULL */
553
- FREEMEM(cctxPtr->lz4CtxPtr); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
554
- FREEMEM(cctxPtr->tmpBuff);
555
- FREEMEM(cctxPtr);
632
+ LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
633
+ LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
634
+ LZ4F_free(cctxPtr, cctxPtr->cmem);
556
635
  }
557
-
558
636
  return LZ4F_OK_NoError;
559
637
  }
560
638
 
@@ -572,7 +650,7 @@ static void LZ4F_initStream(void* ctx,
572
650
  int level,
573
651
  LZ4F_blockMode_t blockMode) {
574
652
  if (level < LZ4HC_CLEVEL_MIN) {
575
- if (cdict != NULL || blockMode == LZ4F_blockLinked) {
653
+ if (cdict || blockMode == LZ4F_blockLinked) {
576
654
  /* In these cases, we will call LZ4_compress_fast_continue(),
577
655
  * which needs an already reset context. Otherwise, we'll call a
578
656
  * one-shot API. The non-continued APIs internally perform their own
@@ -580,139 +658,204 @@ static void LZ4F_initStream(void* ctx,
580
658
  * tableType they need the context to be in. So in that case this
581
659
  * would be misguided / wasted work. */
582
660
  LZ4_resetStream_fast((LZ4_stream_t*)ctx);
661
+ if (cdict)
662
+ LZ4_attach_dictionary((LZ4_stream_t*)ctx, cdict->fastCtx);
583
663
  }
584
- LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
664
+ /* In these cases, we'll call a one-shot API.
665
+ * The non-continued APIs internally perform their own resets
666
+ * at the beginning of their calls, where they know
667
+ * which tableType they need the context to be in.
668
+ * Therefore, a reset here would be wasted work. */
585
669
  } else {
586
670
  LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
587
- LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
671
+ if (cdict)
672
+ LZ4_attach_HC_dictionary((LZ4_streamHC_t*)ctx, cdict->HCCtx);
588
673
  }
589
674
  }
590
675
 
676
+ static int ctxTypeID_to_size(int ctxTypeID) {
677
+ switch(ctxTypeID) {
678
+ case 1:
679
+ return LZ4_sizeofState();
680
+ case 2:
681
+ return LZ4_sizeofStateHC();
682
+ default:
683
+ return 0;
684
+ }
685
+ }
591
686
 
592
- /*! LZ4F_compressBegin_usingCDict() :
593
- * init streaming compression and writes frame header into dstBuffer.
594
- * dstBuffer must be >= LZ4F_HEADER_SIZE_MAX bytes.
595
- * @return : number of bytes written into dstBuffer for the header
596
- * or an error code (can be tested using LZ4F_isError())
687
+ /* LZ4F_compressBegin_internal()
688
+ * Note: only accepts @cdict _or_ @dictBuffer as non NULL.
597
689
  */
598
- size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
690
+ size_t LZ4F_compressBegin_internal(LZ4F_cctx* cctx,
599
691
  void* dstBuffer, size_t dstCapacity,
692
+ const void* dictBuffer, size_t dictSize,
600
693
  const LZ4F_CDict* cdict,
601
694
  const LZ4F_preferences_t* preferencesPtr)
602
695
  {
603
- LZ4F_preferences_t prefNull;
696
+ LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES;
604
697
  BYTE* const dstStart = (BYTE*)dstBuffer;
605
698
  BYTE* dstPtr = dstStart;
606
- BYTE* headerStart;
607
699
 
608
- if (dstCapacity < maxFHSize) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
609
- MEM_INIT(&prefNull, 0, sizeof(prefNull));
700
+ RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall);
610
701
  if (preferencesPtr == NULL) preferencesPtr = &prefNull;
611
- cctxPtr->prefs = *preferencesPtr;
612
-
613
- /* Ctx Management */
614
- { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
615
- if (cctxPtr->lz4CtxAlloc < ctxTypeID) {
616
- FREEMEM(cctxPtr->lz4CtxPtr);
617
- if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
618
- cctxPtr->lz4CtxPtr = LZ4_createStream();
702
+ cctx->prefs = *preferencesPtr;
703
+
704
+ /* cctx Management */
705
+ { U16 const ctxTypeID = (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
706
+ int requiredSize = ctxTypeID_to_size(ctxTypeID);
707
+ int allocatedSize = ctxTypeID_to_size(cctx->lz4CtxAlloc);
708
+ if (allocatedSize < requiredSize) {
709
+ /* not enough space allocated */
710
+ LZ4F_free(cctx->lz4CtxPtr, cctx->cmem);
711
+ if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
712
+ /* must take ownership of memory allocation,
713
+ * in order to respect custom allocator contract */
714
+ cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctx->cmem);
715
+ if (cctx->lz4CtxPtr)
716
+ LZ4_initStream(cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
619
717
  } else {
620
- cctxPtr->lz4CtxPtr = LZ4_createStreamHC();
718
+ cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctx->cmem);
719
+ if (cctx->lz4CtxPtr)
720
+ LZ4_initStreamHC(cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
621
721
  }
622
- if (cctxPtr->lz4CtxPtr == NULL)
623
- return err0r(LZ4F_ERROR_allocation_failed);
624
- cctxPtr->lz4CtxAlloc = ctxTypeID;
625
- cctxPtr->lz4CtxState = ctxTypeID;
626
- } else if (cctxPtr->lz4CtxState != ctxTypeID) {
627
- /* otherwise, a sufficient buffer is allocated, but we need to
628
- * reset it to the correct context type */
629
- if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
630
- LZ4_initStream((LZ4_stream_t *) cctxPtr->lz4CtxPtr, sizeof (LZ4_stream_t));
722
+ RETURN_ERROR_IF(cctx->lz4CtxPtr == NULL, allocation_failed);
723
+ cctx->lz4CtxAlloc = ctxTypeID;
724
+ cctx->lz4CtxType = ctxTypeID;
725
+ } else if (cctx->lz4CtxType != ctxTypeID) {
726
+ /* otherwise, a sufficient buffer is already allocated,
727
+ * but we need to reset it to the correct context type */
728
+ if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
729
+ LZ4_initStream((LZ4_stream_t*)cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
631
730
  } else {
632
- LZ4_initStreamHC((LZ4_streamHC_t *) cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
633
- LZ4_setCompressionLevel((LZ4_streamHC_t *) cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
731
+ LZ4_initStreamHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
732
+ LZ4_setCompressionLevel((LZ4_streamHC_t*)cctx->lz4CtxPtr, cctx->prefs.compressionLevel);
634
733
  }
635
- cctxPtr->lz4CtxState = ctxTypeID;
636
- }
637
- }
734
+ cctx->lz4CtxType = ctxTypeID;
735
+ } }
638
736
 
639
737
  /* Buffer Management */
640
- if (cctxPtr->prefs.frameInfo.blockSizeID == 0)
641
- cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
642
- cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
738
+ if (cctx->prefs.frameInfo.blockSizeID == 0)
739
+ cctx->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
740
+ cctx->maxBlockSize = LZ4F_getBlockSize(cctx->prefs.frameInfo.blockSizeID);
643
741
 
644
742
  { size_t const requiredBuffSize = preferencesPtr->autoFlush ?
645
- ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
646
- cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
647
-
648
- if (cctxPtr->maxBufferSize < requiredBuffSize) {
649
- cctxPtr->maxBufferSize = 0;
650
- FREEMEM(cctxPtr->tmpBuff);
651
- cctxPtr->tmpBuff = (BYTE*)ALLOC_AND_ZERO(requiredBuffSize);
652
- if (cctxPtr->tmpBuff == NULL) return err0r(LZ4F_ERROR_allocation_failed);
653
- cctxPtr->maxBufferSize = requiredBuffSize;
743
+ ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
744
+ cctx->maxBlockSize + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
745
+
746
+ if (cctx->maxBufferSize < requiredBuffSize) {
747
+ cctx->maxBufferSize = 0;
748
+ LZ4F_free(cctx->tmpBuff, cctx->cmem);
749
+ cctx->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctx->cmem);
750
+ RETURN_ERROR_IF(cctx->tmpBuff == NULL, allocation_failed);
751
+ cctx->maxBufferSize = requiredBuffSize;
654
752
  } }
655
- cctxPtr->tmpIn = cctxPtr->tmpBuff;
656
- cctxPtr->tmpInSize = 0;
657
- (void)XXH32_reset(&(cctxPtr->xxh), 0);
753
+ cctx->tmpIn = cctx->tmpBuff;
754
+ cctx->tmpInSize = 0;
755
+ (void)XXH32_reset(&(cctx->xxh), 0);
658
756
 
659
757
  /* context init */
660
- cctxPtr->cdict = cdict;
661
- if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
758
+ cctx->cdict = cdict;
759
+ if (cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
662
760
  /* frame init only for blockLinked : blockIndependent will be init at each block */
663
- LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked);
761
+ LZ4F_initStream(cctx->lz4CtxPtr, cdict, cctx->prefs.compressionLevel, LZ4F_blockLinked);
664
762
  }
665
763
  if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
666
- LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
764
+ LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctx->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
667
765
  }
766
+ if (dictBuffer) {
767
+ assert(cdict == NULL);
768
+ RETURN_ERROR_IF(dictSize > INT_MAX, parameter_invalid);
769
+ if (cctx->lz4CtxType == ctxFast) {
770
+ /* lz4 fast*/
771
+ LZ4_loadDict((LZ4_stream_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
772
+ } else {
773
+ /* lz4hc */
774
+ assert(cctx->lz4CtxType == ctxHC);
775
+ LZ4_loadDictHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
776
+ }
777
+ }
778
+
779
+ /* Stage 2 : Write Frame Header */
668
780
 
669
781
  /* Magic Number */
670
782
  LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
671
783
  dstPtr += 4;
672
- headerStart = dstPtr;
673
-
674
- /* FLG Byte */
675
- *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
676
- + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
677
- + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
678
- + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
679
- + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
680
- + (cctxPtr->prefs.frameInfo.dictID > 0) );
681
- /* BD Byte */
682
- *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
683
- /* Optional Frame content size field */
684
- if (cctxPtr->prefs.frameInfo.contentSize) {
685
- LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
686
- dstPtr += 8;
687
- cctxPtr->totalInSize = 0;
688
- }
689
- /* Optional dictionary ID field */
690
- if (cctxPtr->prefs.frameInfo.dictID) {
691
- LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
692
- dstPtr += 4;
784
+ { BYTE* const headerStart = dstPtr;
785
+
786
+ /* FLG Byte */
787
+ *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
788
+ + ((cctx->prefs.frameInfo.blockMode & _1BIT ) << 5)
789
+ + ((cctx->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
790
+ + ((unsigned)(cctx->prefs.frameInfo.contentSize > 0) << 3)
791
+ + ((cctx->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
792
+ + (cctx->prefs.frameInfo.dictID > 0) );
793
+ /* BD Byte */
794
+ *dstPtr++ = (BYTE)((cctx->prefs.frameInfo.blockSizeID & _3BITS) << 4);
795
+ /* Optional Frame content size field */
796
+ if (cctx->prefs.frameInfo.contentSize) {
797
+ LZ4F_writeLE64(dstPtr, cctx->prefs.frameInfo.contentSize);
798
+ dstPtr += 8;
799
+ cctx->totalInSize = 0;
800
+ }
801
+ /* Optional dictionary ID field */
802
+ if (cctx->prefs.frameInfo.dictID) {
803
+ LZ4F_writeLE32(dstPtr, cctx->prefs.frameInfo.dictID);
804
+ dstPtr += 4;
805
+ }
806
+ /* Header CRC Byte */
807
+ *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
808
+ dstPtr++;
693
809
  }
694
- /* Header CRC Byte */
695
- *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
696
- dstPtr++;
697
810
 
698
- cctxPtr->cStage = 1; /* header written, now request input data block */
811
+ cctx->cStage = 1; /* header written, now request input data block */
699
812
  return (size_t)(dstPtr - dstStart);
700
813
  }
701
814
 
815
+ size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
816
+ void* dstBuffer, size_t dstCapacity,
817
+ const LZ4F_preferences_t* preferencesPtr)
818
+ {
819
+ return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
820
+ NULL, 0,
821
+ NULL, preferencesPtr);
822
+ }
702
823
 
703
- /*! LZ4F_compressBegin() :
704
- * init streaming compression and writes frame header into dstBuffer.
705
- * dstBuffer must be >= LZ4F_HEADER_SIZE_MAX bytes.
706
- * preferencesPtr can be NULL, in which case default parameters are selected.
707
- * @return : number of bytes written into dstBuffer for the header
708
- * or an error code (can be tested using LZ4F_isError())
709
- */
710
- size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
824
+ /* LZ4F_compressBegin_usingDictOnce:
825
+ * Hidden implementation,
826
+ * employed for multi-threaded compression
827
+ * when frame defines linked blocks */
828
+ size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx* cctx,
711
829
  void* dstBuffer, size_t dstCapacity,
830
+ const void* dict, size_t dictSize,
831
+ const LZ4F_preferences_t* preferencesPtr)
832
+ {
833
+ return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
834
+ dict, dictSize,
835
+ NULL, preferencesPtr);
836
+ }
837
+
838
+ size_t LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx,
839
+ void* dstBuffer, size_t dstCapacity,
840
+ const void* dict, size_t dictSize,
841
+ const LZ4F_preferences_t* preferencesPtr)
842
+ {
843
+ /* note : incorrect implementation :
844
+ * this will only use the dictionary once,
845
+ * instead of once *per* block when frames defines independent blocks */
846
+ return LZ4F_compressBegin_usingDictOnce(cctx, dstBuffer, dstCapacity,
847
+ dict, dictSize,
848
+ preferencesPtr);
849
+ }
850
+
851
+ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
852
+ void* dstBuffer, size_t dstCapacity,
853
+ const LZ4F_CDict* cdict,
712
854
  const LZ4F_preferences_t* preferencesPtr)
713
855
  {
714
- return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity,
715
- NULL, preferencesPtr);
856
+ return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
857
+ NULL, 0,
858
+ cdict, preferencesPtr);
716
859
  }
717
860
 
718
861
 
@@ -744,11 +887,13 @@ static size_t LZ4F_makeBlock(void* dst,
744
887
  LZ4F_blockChecksum_t crcFlag)
745
888
  {
746
889
  BYTE* const cSizePtr = (BYTE*)dst;
747
- U32 cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
748
- (int)(srcSize), (int)(srcSize-1),
749
- level, cdict);
750
- if (cSize == 0) { /* compression failed */
751
- DEBUGLOG(5, "LZ4F_makeBlock: compression failed, creating a raw block (size %u)", (U32)srcSize);
890
+ U32 cSize;
891
+ assert(compress != NULL);
892
+ cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
893
+ (int)(srcSize), (int)(srcSize-1),
894
+ level, cdict);
895
+
896
+ if (cSize == 0 || cSize >= srcSize) {
752
897
  cSize = (U32)srcSize;
753
898
  LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
754
899
  memcpy(cSizePtr+BHSize, src, srcSize);
@@ -766,6 +911,7 @@ static size_t LZ4F_makeBlock(void* dst,
766
911
  static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
767
912
  {
768
913
  int const acceleration = (level < 0) ? -level + 1 : 1;
914
+ DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize);
769
915
  LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
770
916
  if (cdict) {
771
917
  return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
@@ -778,6 +924,7 @@ static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, in
778
924
  {
779
925
  int const acceleration = (level < 0) ? -level + 1 : 1;
780
926
  (void)cdict; /* init once at beginning of frame */
927
+ DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize);
781
928
  return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
782
929
  }
783
930
 
@@ -796,8 +943,16 @@ static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst,
796
943
  return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
797
944
  }
798
945
 
799
- static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level)
946
+ static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
947
+ {
948
+ (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict;
949
+ return 0;
950
+ }
951
+
952
+ static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_BlockCompressMode_e compressMode)
800
953
  {
954
+ if (compressMode == LZ4B_UNCOMPRESSED)
955
+ return LZ4F_doNotCompressBlock;
801
956
  if (level < LZ4HC_CLEVEL_MIN) {
802
957
  if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
803
958
  return LZ4F_compressBlock_continue;
@@ -806,6 +961,7 @@ static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int lev
806
961
  return LZ4F_compressBlockHC_continue;
807
962
  }
808
963
 
964
+ /* Save history (up to 64KB) into @tmpBuff */
809
965
  static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
810
966
  {
811
967
  if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
@@ -815,38 +971,57 @@ static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
815
971
 
816
972
  typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
817
973
 
818
- /*! LZ4F_compressUpdate() :
974
+ static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } };
975
+
976
+
977
+ /*! LZ4F_compressUpdateImpl() :
819
978
  * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
820
- * dstBuffer MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
821
- * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
979
+ * When successful, the function always entirely consumes @srcBuffer.
980
+ * src data is either buffered or compressed into @dstBuffer.
981
+ * If the block compression does not match the compression of the previous block, the old data is flushed
982
+ * and operations continue with the new compression mode.
983
+ * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on.
984
+ * @compressOptionsPtr is optional : provide NULL to mean "default".
822
985
  * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
823
986
  * or an error code if it fails (which can be tested using LZ4F_isError())
987
+ * After an error, the state is left in a UB state, and must be re-initialized.
824
988
  */
825
- size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
826
- void* dstBuffer, size_t dstCapacity,
989
+ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
990
+ void* dstBuffer, size_t dstCapacity,
827
991
  const void* srcBuffer, size_t srcSize,
828
- const LZ4F_compressOptions_t* compressOptionsPtr)
829
- {
830
- LZ4F_compressOptions_t cOptionsNull;
992
+ const LZ4F_compressOptions_t* compressOptionsPtr,
993
+ LZ4F_BlockCompressMode_e blockCompression)
994
+ {
831
995
  size_t const blockSize = cctxPtr->maxBlockSize;
832
996
  const BYTE* srcPtr = (const BYTE*)srcBuffer;
833
997
  const BYTE* const srcEnd = srcPtr + srcSize;
834
998
  BYTE* const dstStart = (BYTE*)dstBuffer;
835
999
  BYTE* dstPtr = dstStart;
836
1000
  LZ4F_lastBlockStatus lastBlockCompressed = notDone;
837
- compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
838
-
1001
+ compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression);
1002
+ size_t bytesWritten;
839
1003
  DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
840
1004
 
841
- if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
1005
+ RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */
842
1006
  if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
843
- return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
844
- MEM_INIT(&cOptionsNull, 0, sizeof(cOptionsNull));
845
- if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull;
1007
+ RETURN_ERROR(dstMaxSize_tooSmall);
1008
+
1009
+ if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize)
1010
+ RETURN_ERROR(dstMaxSize_tooSmall);
1011
+
1012
+ /* flush currently written block, to continue with new block compression */
1013
+ if (cctxPtr->blockCompressMode != blockCompression) {
1014
+ bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
1015
+ dstPtr += bytesWritten;
1016
+ cctxPtr->blockCompressMode = blockCompression;
1017
+ }
1018
+
1019
+ if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull;
846
1020
 
847
1021
  /* complete tmp buffer */
848
1022
  if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
849
1023
  size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
1024
+ assert(blockSize > cctxPtr->tmpInSize);
850
1025
  if (sizeToCopy > srcSize) {
851
1026
  /* add src to tmpIn buffer */
852
1027
  memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
@@ -864,11 +1039,9 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
864
1039
  compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
865
1040
  cctxPtr->cdict,
866
1041
  cctxPtr->prefs.frameInfo.blockChecksumFlag);
867
-
868
1042
  if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
869
1043
  cctxPtr->tmpInSize = 0;
870
- }
871
- }
1044
+ } }
872
1045
 
873
1046
  while ((size_t)(srcEnd - srcPtr) >= blockSize) {
874
1047
  /* compress full blocks */
@@ -882,33 +1055,38 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
882
1055
  }
883
1056
 
884
1057
  if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
885
- /* compress remaining input < blockSize */
1058
+ /* autoFlush : remaining input (< blockSize) is compressed */
886
1059
  lastBlockCompressed = fromSrcBuffer;
887
1060
  dstPtr += LZ4F_makeBlock(dstPtr,
888
1061
  srcPtr, (size_t)(srcEnd - srcPtr),
889
1062
  compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
890
1063
  cctxPtr->cdict,
891
1064
  cctxPtr->prefs.frameInfo.blockChecksumFlag);
892
- srcPtr = srcEnd;
1065
+ srcPtr = srcEnd;
893
1066
  }
894
1067
 
895
- /* preserve dictionary if necessary */
1068
+ /* preserve dictionary within @tmpBuff whenever necessary */
896
1069
  if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
1070
+ /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */
1071
+ assert(blockCompression == LZ4B_COMPRESSED);
897
1072
  if (compressOptionsPtr->stableSrc) {
898
- cctxPtr->tmpIn = cctxPtr->tmpBuff;
1073
+ cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */
899
1074
  } else {
900
1075
  int const realDictSize = LZ4F_localSaveDict(cctxPtr);
901
- if (realDictSize==0) return err0r(LZ4F_ERROR_GENERIC);
1076
+ assert(0 <= realDictSize && realDictSize <= 64 KB);
902
1077
  cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
903
1078
  }
904
1079
  }
905
1080
 
906
1081
  /* keep tmpIn within limits */
907
- if ((cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) /* necessarily LZ4F_blockLinked && lastBlockCompressed==fromTmpBuffer */
908
- && !(cctxPtr->prefs.autoFlush))
1082
+ if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */
1083
+ && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */
909
1084
  {
1085
+ /* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
1086
+ * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
910
1087
  int const realDictSize = LZ4F_localSaveDict(cctxPtr);
911
1088
  cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
1089
+ assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize));
912
1090
  }
913
1091
 
914
1092
  /* some input data left, necessarily < blockSize */
@@ -926,6 +1104,49 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
926
1104
  return (size_t)(dstPtr - dstStart);
927
1105
  }
928
1106
 
1107
+ /*! LZ4F_compressUpdate() :
1108
+ * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
1109
+ * When successful, the function always entirely consumes @srcBuffer.
1110
+ * src data is either buffered or compressed into @dstBuffer.
1111
+ * If previously an uncompressed block was written, buffered data is flushed
1112
+ * before appending compressed data is continued.
1113
+ * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
1114
+ * @compressOptionsPtr is optional : provide NULL to mean "default".
1115
+ * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
1116
+ * or an error code if it fails (which can be tested using LZ4F_isError())
1117
+ * After an error, the state is left in a UB state, and must be re-initialized.
1118
+ */
1119
+ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
1120
+ void* dstBuffer, size_t dstCapacity,
1121
+ const void* srcBuffer, size_t srcSize,
1122
+ const LZ4F_compressOptions_t* compressOptionsPtr)
1123
+ {
1124
+ return LZ4F_compressUpdateImpl(cctxPtr,
1125
+ dstBuffer, dstCapacity,
1126
+ srcBuffer, srcSize,
1127
+ compressOptionsPtr, LZ4B_COMPRESSED);
1128
+ }
1129
+
1130
+ /*! LZ4F_uncompressedUpdate() :
1131
+ * Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed.
1132
+ * This symbol is only supported when LZ4F_blockIndependent is used
1133
+ * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
1134
+ * @compressOptionsPtr is optional : provide NULL to mean "default".
1135
+ * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
1136
+ * or an error code if it fails (which can be tested using LZ4F_isError())
1137
+ * After an error, the state is left in a UB state, and must be re-initialized.
1138
+ */
1139
+ size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
1140
+ void* dstBuffer, size_t dstCapacity,
1141
+ const void* srcBuffer, size_t srcSize,
1142
+ const LZ4F_compressOptions_t* compressOptionsPtr)
1143
+ {
1144
+ return LZ4F_compressUpdateImpl(cctxPtr,
1145
+ dstBuffer, dstCapacity,
1146
+ srcBuffer, srcSize,
1147
+ compressOptionsPtr, LZ4B_UNCOMPRESSED);
1148
+ }
1149
+
929
1150
 
930
1151
  /*! LZ4F_flush() :
931
1152
  * When compressed data must be sent immediately, without waiting for a block to be filled,
@@ -944,13 +1165,12 @@ size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
944
1165
  compressFunc_t compress;
945
1166
 
946
1167
  if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
947
- if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
948
- if (dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize))
949
- return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
950
- (void)compressOptionsPtr; /* not yet useful */
1168
+ RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized);
1169
+ RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall);
1170
+ (void)compressOptionsPtr; /* not useful (yet) */
951
1171
 
952
1172
  /* select compression function */
953
- compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
1173
+ compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompressMode);
954
1174
 
955
1175
  /* compress tmp buffer */
956
1176
  dstPtr += LZ4F_makeBlock(dstPtr,
@@ -992,30 +1212,29 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
992
1212
 
993
1213
  size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
994
1214
  DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
995
- if (LZ4F_isError(flushSize)) return flushSize;
1215
+ FORWARD_IF_ERROR(flushSize);
996
1216
  dstPtr += flushSize;
997
1217
 
998
1218
  assert(flushSize <= dstCapacity);
999
1219
  dstCapacity -= flushSize;
1000
1220
 
1001
- if (dstCapacity < 4) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
1221
+ RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall);
1002
1222
  LZ4F_writeLE32(dstPtr, 0);
1003
1223
  dstPtr += 4; /* endMark */
1004
1224
 
1005
1225
  if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
1006
1226
  U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
1007
- if (dstCapacity < 8) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
1008
- DEBUGLOG(5,"Writing 32-bit content checksum");
1227
+ RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall);
1228
+ DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh);
1009
1229
  LZ4F_writeLE32(dstPtr, xxh);
1010
1230
  dstPtr+=4; /* content Checksum */
1011
1231
  }
1012
1232
 
1013
1233
  cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
1014
- cctxPtr->maxBufferSize = 0; /* reuse HC context */
1015
1234
 
1016
1235
  if (cctxPtr->prefs.frameInfo.contentSize) {
1017
1236
  if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
1018
- return err0r(LZ4F_ERROR_frameSize_wrong);
1237
+ RETURN_ERROR(frameSize_wrong);
1019
1238
  }
1020
1239
 
1021
1240
  return (size_t)(dstPtr - dstStart);
@@ -1039,6 +1258,7 @@ typedef enum {
1039
1258
  } dStage_t;
1040
1259
 
1041
1260
  struct LZ4F_dctx_s {
1261
+ LZ4F_CustomMem cmem;
1042
1262
  LZ4F_frameInfo_t frameInfo;
1043
1263
  U32 version;
1044
1264
  dStage_t dStage;
@@ -1056,26 +1276,37 @@ struct LZ4F_dctx_s {
1056
1276
  size_t tmpOutStart;
1057
1277
  XXH32_state_t xxh;
1058
1278
  XXH32_state_t blockChecksum;
1279
+ int skipChecksum;
1059
1280
  BYTE header[LZ4F_HEADER_SIZE_MAX];
1060
1281
  }; /* typedef'd to LZ4F_dctx in lz4frame.h */
1061
1282
 
1062
1283
 
1284
+ LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
1285
+ {
1286
+ LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem);
1287
+ if (dctx == NULL) return NULL;
1288
+
1289
+ dctx->cmem = customMem;
1290
+ dctx->version = version;
1291
+ return dctx;
1292
+ }
1293
+
1063
1294
  /*! LZ4F_createDecompressionContext() :
1064
1295
  * Create a decompressionContext object, which will track all decompression operations.
1065
1296
  * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
1066
1297
  * Object can later be released using LZ4F_freeDecompressionContext().
1067
1298
  * @return : if != 0, there was an error during context creation.
1068
1299
  */
1069
- LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
1300
+ LZ4F_errorCode_t
1301
+ LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
1070
1302
  {
1071
- LZ4F_dctx* const dctx = (LZ4F_dctx*)ALLOC_AND_ZERO(sizeof(LZ4F_dctx));
1072
- if (dctx == NULL) { /* failed allocation */
1073
- *LZ4F_decompressionContextPtr = NULL;
1074
- return err0r(LZ4F_ERROR_allocation_failed);
1075
- }
1303
+ assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */
1304
+ RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */
1076
1305
 
1077
- dctx->version = versionNumber;
1078
- *LZ4F_decompressionContextPtr = dctx;
1306
+ *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber);
1307
+ if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */
1308
+ RETURN_ERROR(allocation_failed);
1309
+ }
1079
1310
  return LZ4F_OK_NoError;
1080
1311
  }
1081
1312
 
@@ -1084,21 +1315,23 @@ LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
1084
1315
  LZ4F_errorCode_t result = LZ4F_OK_NoError;
1085
1316
  if (dctx != NULL) { /* can accept NULL input, like free() */
1086
1317
  result = (LZ4F_errorCode_t)dctx->dStage;
1087
- FREEMEM(dctx->tmpIn);
1088
- FREEMEM(dctx->tmpOutBuffer);
1089
- FREEMEM(dctx);
1318
+ LZ4F_free(dctx->tmpIn, dctx->cmem);
1319
+ LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
1320
+ LZ4F_free(dctx, dctx->cmem);
1090
1321
  }
1091
1322
  return result;
1092
1323
  }
1093
1324
 
1094
1325
 
1095
1326
  /*==--- Streaming Decompression operations ---==*/
1096
-
1097
1327
  void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
1098
1328
  {
1329
+ DEBUGLOG(5, "LZ4F_resetDecompressionContext");
1099
1330
  dctx->dStage = dstage_getFrameHeader;
1100
1331
  dctx->dict = NULL;
1101
1332
  dctx->dictSize = 0;
1333
+ dctx->skipChecksum = 0;
1334
+ dctx->frameRemainingSize = 0;
1102
1335
  }
1103
1336
 
1104
1337
 
@@ -1118,7 +1351,7 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
1118
1351
 
1119
1352
  DEBUGLOG(5, "LZ4F_decodeHeader");
1120
1353
  /* need to decode header to get frameInfo */
1121
- if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */
1354
+ RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */
1122
1355
  MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
1123
1356
 
1124
1357
  /* special case : skippable frames */
@@ -1132,14 +1365,13 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
1132
1365
  } else {
1133
1366
  dctx->dStage = dstage_getSFrameSize;
1134
1367
  return 4;
1135
- }
1136
- }
1368
+ } }
1137
1369
 
1138
1370
  /* control magic number */
1139
1371
  #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1140
1372
  if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
1141
1373
  DEBUGLOG(4, "frame header error : unknown magic number");
1142
- return err0r(LZ4F_ERROR_frameType_unknown);
1374
+ RETURN_ERROR(frameType_unknown);
1143
1375
  }
1144
1376
  #endif
1145
1377
  dctx->frameInfo.frameType = LZ4F_frame;
@@ -1153,9 +1385,10 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
1153
1385
  contentChecksumFlag = (FLG>>2) & _1BIT;
1154
1386
  dictIDFlag = FLG & _1BIT;
1155
1387
  /* validate */
1156
- if (((FLG>>1)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
1157
- if (version != 1) return err0r(LZ4F_ERROR_headerVersion_wrong); /* Version Number, only supported value */
1388
+ if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
1389
+ if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */
1158
1390
  }
1391
+ DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag);
1159
1392
 
1160
1393
  /* Frame Header Size */
1161
1394
  frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
@@ -1173,17 +1406,16 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
1173
1406
  { U32 const BD = srcPtr[5];
1174
1407
  blockSizeID = (BD>>4) & _3BITS;
1175
1408
  /* validate */
1176
- if (((BD>>7)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
1177
- if (blockSizeID < 4) return err0r(LZ4F_ERROR_maxBlockSize_invalid); /* 4-7 only supported values for the time being */
1178
- if (((BD>>0)&_4BITS) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bits */
1409
+ if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
1410
+ if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */
1411
+ if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */
1179
1412
  }
1180
1413
 
1181
1414
  /* check header */
1182
1415
  assert(frameHeaderSize > 5);
1183
1416
  #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1184
1417
  { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
1185
- if (HC != srcPtr[frameHeaderSize-1])
1186
- return err0r(LZ4F_ERROR_headerChecksum_invalid);
1418
+ RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid);
1187
1419
  }
1188
1420
  #endif
1189
1421
 
@@ -1192,10 +1424,10 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
1192
1424
  dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
1193
1425
  dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
1194
1426
  dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
1195
- dctx->maxBlockSize = LZ4F_getBlockSize(blockSizeID);
1196
- if (contentSizeFlag)
1197
- dctx->frameRemainingSize =
1198
- dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
1427
+ dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
1428
+ if (contentSizeFlag) {
1429
+ dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
1430
+ }
1199
1431
  if (dictIDFlag)
1200
1432
  dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
1201
1433
 
@@ -1211,11 +1443,11 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
1211
1443
  */
1212
1444
  size_t LZ4F_headerSize(const void* src, size_t srcSize)
1213
1445
  {
1214
- if (src == NULL) return err0r(LZ4F_ERROR_srcPtr_wrong);
1446
+ RETURN_ERROR_IF(src == NULL, srcPtr_wrong);
1215
1447
 
1216
1448
  /* minimal srcSize to determine header size */
1217
1449
  if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH)
1218
- return err0r(LZ4F_ERROR_frameHeader_incomplete);
1450
+ RETURN_ERROR(frameHeader_incomplete);
1219
1451
 
1220
1452
  /* special case : skippable frames */
1221
1453
  if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START)
@@ -1224,7 +1456,7 @@ size_t LZ4F_headerSize(const void* src, size_t srcSize)
1224
1456
  /* control magic number */
1225
1457
  #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1226
1458
  if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
1227
- return err0r(LZ4F_ERROR_frameType_unknown);
1459
+ RETURN_ERROR(frameType_unknown);
1228
1460
  #endif
1229
1461
 
1230
1462
  /* Frame Header Size */
@@ -1266,13 +1498,13 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
1266
1498
  if (dctx->dStage == dstage_storeFrameHeader) {
1267
1499
  /* frame decoding already started, in the middle of header => automatic fail */
1268
1500
  *srcSizePtr = 0;
1269
- return err0r(LZ4F_ERROR_frameDecoding_alreadyStarted);
1501
+ RETURN_ERROR(frameDecoding_alreadyStarted);
1270
1502
  } else {
1271
1503
  size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
1272
1504
  if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
1273
1505
  if (*srcSizePtr < hSize) {
1274
1506
  *srcSizePtr=0;
1275
- return err0r(LZ4F_ERROR_frameHeader_incomplete);
1507
+ RETURN_ERROR(frameHeader_incomplete);
1276
1508
  }
1277
1509
 
1278
1510
  { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
@@ -1290,16 +1522,14 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
1290
1522
 
1291
1523
  /* LZ4F_updateDict() :
1292
1524
  * only used for LZ4F_blockLinked mode
1293
- * Condition : dstPtr != NULL
1525
+ * Condition : @dstPtr != NULL
1294
1526
  */
1295
1527
  static void LZ4F_updateDict(LZ4F_dctx* dctx,
1296
1528
  const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
1297
1529
  unsigned withinTmp)
1298
1530
  {
1299
1531
  assert(dstPtr != NULL);
1300
- if (dctx->dictSize==0) {
1301
- dctx->dict = (const BYTE*)dstPtr; /* priority to prefix mode */
1302
- }
1532
+ if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */
1303
1533
  assert(dctx->dict != NULL);
1304
1534
 
1305
1535
  if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
@@ -1362,7 +1592,6 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx,
1362
1592
  }
1363
1593
 
1364
1594
 
1365
-
1366
1595
  /*! LZ4F_decompress() :
1367
1596
  * Call this function repetitively to regenerate compressed data in srcBuffer.
1368
1597
  * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
@@ -1398,7 +1627,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1398
1627
  size_t nextSrcSizeHint = 1;
1399
1628
 
1400
1629
 
1401
- DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
1630
+ DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)",
1402
1631
  srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
1403
1632
  if (dstBuffer == NULL) assert(*dstSizePtr == 0);
1404
1633
  MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
@@ -1406,6 +1635,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1406
1635
  *srcSizePtr = 0;
1407
1636
  *dstSizePtr = 0;
1408
1637
  assert(dctx != NULL);
1638
+ dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */
1409
1639
 
1410
1640
  /* behaves as a state machine */
1411
1641
 
@@ -1418,7 +1648,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1418
1648
  DEBUGLOG(6, "dstage_getFrameHeader");
1419
1649
  if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
1420
1650
  size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
1421
- if (LZ4F_isError(hSize)) return hSize;
1651
+ FORWARD_IF_ERROR(hSize);
1422
1652
  srcPtr += hSize;
1423
1653
  break;
1424
1654
  }
@@ -1440,9 +1670,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1440
1670
  doAnotherStage = 0; /* not enough src data, ask for some more */
1441
1671
  break;
1442
1672
  }
1443
- { size_t const hSize = LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget); /* will update dStage appropriately */
1444
- if (LZ4F_isError(hSize)) return hSize;
1445
- }
1673
+ FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */
1446
1674
  break;
1447
1675
 
1448
1676
  case dstage_init:
@@ -1453,14 +1681,12 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1453
1681
  + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0);
1454
1682
  if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */
1455
1683
  dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/
1456
- FREEMEM(dctx->tmpIn);
1457
- dctx->tmpIn = (BYTE*)ALLOC(dctx->maxBlockSize + BFSize /* block checksum */);
1458
- if (dctx->tmpIn == NULL)
1459
- return err0r(LZ4F_ERROR_allocation_failed);
1460
- FREEMEM(dctx->tmpOutBuffer);
1461
- dctx->tmpOutBuffer= (BYTE*)ALLOC(bufferNeeded);
1462
- if (dctx->tmpOutBuffer== NULL)
1463
- return err0r(LZ4F_ERROR_allocation_failed);
1684
+ LZ4F_free(dctx->tmpIn, dctx->cmem);
1685
+ dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem);
1686
+ RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed);
1687
+ LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
1688
+ dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem);
1689
+ RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed);
1464
1690
  dctx->maxBufferSize = bufferNeeded;
1465
1691
  } }
1466
1692
  dctx->tmpInSize = 0;
@@ -1509,7 +1735,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1509
1735
  break;
1510
1736
  }
1511
1737
  if (nextCBlockSize > dctx->maxBlockSize) {
1512
- return err0r(LZ4F_ERROR_maxBlockSize_invalid);
1738
+ RETURN_ERROR(maxBlockSize_invalid);
1513
1739
  }
1514
1740
  if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
1515
1741
  /* next block is uncompressed */
@@ -1540,21 +1766,23 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1540
1766
  size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
1541
1767
  sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
1542
1768
  memcpy(dstPtr, srcPtr, sizeToCopy);
1543
- if (dctx->frameInfo.blockChecksumFlag) {
1544
- (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
1769
+ if (!dctx->skipChecksum) {
1770
+ if (dctx->frameInfo.blockChecksumFlag) {
1771
+ (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
1772
+ }
1773
+ if (dctx->frameInfo.contentChecksumFlag)
1774
+ (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
1545
1775
  }
1546
- if (dctx->frameInfo.contentChecksumFlag)
1547
- (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
1548
1776
  if (dctx->frameInfo.contentSize)
1549
1777
  dctx->frameRemainingSize -= sizeToCopy;
1550
1778
 
1551
1779
  /* history management (linked blocks only)*/
1552
1780
  if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
1553
1781
  LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
1554
- } }
1555
-
1556
- srcPtr += sizeToCopy;
1557
- dstPtr += sizeToCopy;
1782
+ }
1783
+ srcPtr += sizeToCopy;
1784
+ dstPtr += sizeToCopy;
1785
+ }
1558
1786
  if (sizeToCopy == dctx->tmpInTarget) { /* all done */
1559
1787
  if (dctx->frameInfo.blockChecksumFlag) {
1560
1788
  dctx->tmpInSize = 0;
@@ -1590,14 +1818,15 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1590
1818
  }
1591
1819
  crcSrc = dctx->header;
1592
1820
  }
1593
- { U32 const readCRC = LZ4F_readLE32(crcSrc);
1821
+ if (!dctx->skipChecksum) {
1822
+ U32 const readCRC = LZ4F_readLE32(crcSrc);
1594
1823
  U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
1595
1824
  #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1596
1825
  DEBUGLOG(6, "compare block checksum");
1597
1826
  if (readCRC != calcCRC) {
1598
1827
  DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
1599
1828
  readCRC, calcCRC);
1600
- return err0r(LZ4F_ERROR_blockChecksum_invalid);
1829
+ RETURN_ERROR(blockChecksum_invalid);
1601
1830
  }
1602
1831
  #else
1603
1832
  (void)readCRC;
@@ -1637,37 +1866,44 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1637
1866
  }
1638
1867
 
1639
1868
  /* At this stage, input is large enough to decode a block */
1869
+
1870
+ /* First, decode and control block checksum if it exists */
1640
1871
  if (dctx->frameInfo.blockChecksumFlag) {
1872
+ assert(dctx->tmpInTarget >= 4);
1641
1873
  dctx->tmpInTarget -= 4;
1642
1874
  assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
1643
1875
  { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
1644
1876
  U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
1645
1877
  #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1646
- if (readBlockCrc != calcBlockCrc)
1647
- return err0r(LZ4F_ERROR_blockChecksum_invalid);
1878
+ RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid);
1648
1879
  #else
1649
1880
  (void)readBlockCrc;
1650
1881
  (void)calcBlockCrc;
1651
1882
  #endif
1652
1883
  } }
1653
1884
 
1654
- if ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) {
1885
+ /* decode directly into destination buffer if there is enough room */
1886
+ if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize)
1887
+ /* unless the dictionary is stored in tmpOut:
1888
+ * in which case it's faster to decode within tmpOut
1889
+ * to benefit from prefix speedup */
1890
+ && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) )
1891
+ {
1655
1892
  const char* dict = (const char*)dctx->dict;
1656
1893
  size_t dictSize = dctx->dictSize;
1657
1894
  int decodedSize;
1658
1895
  assert(dstPtr != NULL);
1659
1896
  if (dict && dictSize > 1 GB) {
1660
- /* the dictSize param is an int, avoid truncation / sign issues */
1897
+ /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */
1661
1898
  dict += dictSize - 64 KB;
1662
1899
  dictSize = 64 KB;
1663
1900
  }
1664
- /* enough capacity in `dst` to decompress directly there */
1665
1901
  decodedSize = LZ4_decompress_safe_usingDict(
1666
1902
  (const char*)selectedIn, (char*)dstPtr,
1667
1903
  (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
1668
1904
  dict, (int)dictSize);
1669
- if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC); /* decompression failed */
1670
- if (dctx->frameInfo.contentChecksumFlag)
1905
+ RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
1906
+ if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum))
1671
1907
  XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
1672
1908
  if (dctx->frameInfo.contentSize)
1673
1909
  dctx->frameRemainingSize -= (size_t)decodedSize;
@@ -1678,25 +1914,27 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1678
1914
  }
1679
1915
 
1680
1916
  dstPtr += decodedSize;
1681
- dctx->dStage = dstage_getBlockHeader;
1917
+ dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */
1682
1918
  break;
1683
1919
  }
1684
1920
 
1685
1921
  /* not enough place into dst : decode into tmpOut */
1686
- /* ensure enough place for tmpOut */
1922
+
1923
+ /* manage dictionary */
1687
1924
  if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
1688
1925
  if (dctx->dict == dctx->tmpOutBuffer) {
1926
+ /* truncate dictionary to 64 KB if too big */
1689
1927
  if (dctx->dictSize > 128 KB) {
1690
1928
  memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB);
1691
1929
  dctx->dictSize = 64 KB;
1692
1930
  }
1693
1931
  dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize;
1694
- } else { /* dict not within tmp */
1932
+ } else { /* dict not within tmpOut */
1695
1933
  size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB);
1696
1934
  dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace;
1697
1935
  } }
1698
1936
 
1699
- /* Decode block */
1937
+ /* Decode block into tmpOut */
1700
1938
  { const char* dict = (const char*)dctx->dict;
1701
1939
  size_t dictSize = dctx->dictSize;
1702
1940
  int decodedSize;
@@ -1709,9 +1947,8 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1709
1947
  (const char*)selectedIn, (char*)dctx->tmpOut,
1710
1948
  (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
1711
1949
  dict, (int)dictSize);
1712
- if (decodedSize < 0) /* decompression failed */
1713
- return err0r(LZ4F_ERROR_decompressionFailed);
1714
- if (dctx->frameInfo.contentChecksumFlag)
1950
+ RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
1951
+ if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum)
1715
1952
  XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize);
1716
1953
  if (dctx->frameInfo.contentSize)
1717
1954
  dctx->frameRemainingSize -= (size_t)decodedSize;
@@ -1744,8 +1981,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1744
1981
  break;
1745
1982
 
1746
1983
  case dstage_getSuffix:
1747
- if (dctx->frameRemainingSize)
1748
- return err0r(LZ4F_ERROR_frameSize_wrong); /* incorrect frame size decoded */
1984
+ RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */
1749
1985
  if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
1750
1986
  nextSrcSizeHint = 0;
1751
1987
  LZ4F_resetDecompressionContext(dctx);
@@ -1777,20 +2013,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1777
2013
  } /* if (dctx->dStage == dstage_storeSuffix) */
1778
2014
 
1779
2015
  /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
1780
- { U32 const readCRC = LZ4F_readLE32(selectedIn);
2016
+ if (!dctx->skipChecksum) {
2017
+ U32 const readCRC = LZ4F_readLE32(selectedIn);
1781
2018
  U32 const resultCRC = XXH32_digest(&(dctx->xxh));
2019
+ DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC, resultCRC);
1782
2020
  #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1783
- if (readCRC != resultCRC)
1784
- return err0r(LZ4F_ERROR_contentChecksum_invalid);
2021
+ RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid);
1785
2022
  #else
1786
2023
  (void)readCRC;
1787
2024
  (void)resultCRC;
1788
2025
  #endif
1789
- nextSrcSizeHint = 0;
1790
- LZ4F_resetDecompressionContext(dctx);
1791
- doAnotherStage = 0;
1792
- break;
1793
2026
  }
2027
+ nextSrcSizeHint = 0;
2028
+ LZ4F_resetDecompressionContext(dctx);
2029
+ doAnotherStage = 0;
2030
+ break;
1794
2031
 
1795
2032
  case dstage_getSFrameSize:
1796
2033
  if ((srcEnd - srcPtr) >= 4) {
@@ -1841,7 +2078,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
1841
2078
  } /* switch (dctx->dStage) */
1842
2079
  } /* while (doAnotherStage) */
1843
2080
 
1844
- /* preserve history within tmp whenever necessary */
2081
+ /* preserve history within tmpOut whenever necessary */
1845
2082
  LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
1846
2083
  if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
1847
2084
  && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */