extlz4 0.3.4 → 0.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +1 -1
- data/Rakefile +21 -3
- data/contrib/lz4/CODING_STYLE +57 -0
- data/contrib/lz4/LICENSE +1 -1
- data/contrib/lz4/Makefile.inc +17 -15
- data/contrib/lz4/NEWS +25 -0
- data/contrib/lz4/README.md +16 -5
- data/contrib/lz4/SECURITY.md +17 -0
- data/contrib/lz4/build/README.md +4 -15
- data/contrib/lz4/build/VS2022/_build.bat +39 -0
- data/contrib/lz4/build/VS2022/_setup.bat +35 -0
- data/contrib/lz4/build/VS2022/_test.bat +38 -0
- data/contrib/lz4/build/VS2022/build-and-test-win32-debug.bat +26 -0
- data/contrib/lz4/build/VS2022/build-and-test-win32-release.bat +26 -0
- data/contrib/lz4/build/VS2022/build-and-test-x64-debug.bat +26 -0
- data/contrib/lz4/build/VS2022/build-and-test-x64-release.bat +26 -0
- data/contrib/lz4/build/VS2022/datagen/datagen.vcxproj +7 -3
- data/contrib/lz4/build/{VS2017 → VS2022}/lz4/lz4.vcxproj +21 -7
- data/contrib/lz4/build/VS2022/lz4.sln +5 -2
- data/contrib/lz4/build/cmake/CMakeLists.txt +95 -100
- data/contrib/lz4/build/meson/GetLz4LibraryVersion.py +39 -0
- data/contrib/lz4/build/meson/README.md +34 -0
- data/contrib/lz4/build/meson/meson/contrib/gen_manual/meson.build +42 -0
- data/contrib/lz4/build/meson/meson/contrib/meson.build +11 -0
- data/contrib/lz4/build/meson/meson/examples/meson.build +32 -0
- data/contrib/lz4/build/meson/meson/lib/meson.build +87 -0
- data/contrib/lz4/build/meson/meson/meson.build +135 -0
- data/contrib/lz4/build/meson/meson/ossfuzz/meson.build +35 -0
- data/contrib/lz4/build/meson/meson/programs/meson.build +91 -0
- data/contrib/lz4/build/meson/meson/tests/meson.build +162 -0
- data/contrib/lz4/build/meson/meson.build +31 -0
- data/contrib/lz4/build/meson/meson_options.txt +44 -0
- data/contrib/lz4/build/visual/README.md +5 -0
- data/contrib/lz4/build/visual/generate_solution.cmd +55 -0
- data/contrib/lz4/build/visual/generate_vs2015.cmd +3 -0
- data/contrib/lz4/build/visual/generate_vs2017.cmd +3 -0
- data/contrib/lz4/build/visual/generate_vs2019.cmd +3 -0
- data/contrib/lz4/build/visual/generate_vs2022.cmd +3 -0
- data/contrib/lz4/lib/README.md +25 -1
- data/contrib/lz4/lib/lz4.c +206 -99
- data/contrib/lz4/lib/lz4.h +111 -69
- data/contrib/lz4/lib/lz4file.c +111 -81
- data/contrib/lz4/lib/lz4file.h +2 -2
- data/contrib/lz4/lib/lz4frame.c +179 -121
- data/contrib/lz4/lib/lz4frame.h +162 -103
- data/contrib/lz4/lib/lz4hc.c +943 -382
- data/contrib/lz4/lib/lz4hc.h +43 -42
- data/contrib/lz4/lib/xxhash.c +21 -21
- data/contrib/lz4/ossfuzz/decompress_fuzzer.c +1 -1
- data/contrib/lz4/ossfuzz/fuzz_helpers.h +1 -1
- data/ext/blockapi.c +11 -11
- data/ext/frameapi.c +23 -23
- metadata +34 -28
- data/contrib/lz4/build/VS2010/datagen/datagen.vcxproj +0 -169
- data/contrib/lz4/build/VS2010/frametest/frametest.vcxproj +0 -176
- data/contrib/lz4/build/VS2010/fullbench/fullbench.vcxproj +0 -176
- data/contrib/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +0 -180
- data/contrib/lz4/build/VS2010/fuzzer/fuzzer.vcxproj +0 -173
- data/contrib/lz4/build/VS2010/liblz4/liblz4.vcxproj +0 -175
- data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc +0 -51
- data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj +0 -179
- data/contrib/lz4/build/VS2010/lz4/lz4.vcxproj +0 -189
- data/contrib/lz4/build/VS2010/lz4.sln +0 -98
- data/contrib/lz4/build/VS2017/datagen/datagen.vcxproj +0 -173
- data/contrib/lz4/build/VS2017/frametest/frametest.vcxproj +0 -180
- data/contrib/lz4/build/VS2017/fullbench/fullbench.vcxproj +0 -180
- data/contrib/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj +0 -184
- data/contrib/lz4/build/VS2017/fuzzer/fuzzer.vcxproj +0 -177
- data/contrib/lz4/build/VS2017/liblz4/liblz4.vcxproj +0 -179
- data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc +0 -51
- data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj +0 -183
- data/contrib/lz4/build/VS2017/lz4/lz4.rc +0 -51
- data/contrib/lz4/build/VS2017/lz4.sln +0 -103
- /data/contrib/lz4/build/{VS2010 → VS2022}/lz4/lz4.rc +0 -0
data/contrib/lz4/lib/lz4frame.c
CHANGED
@@ -44,6 +44,7 @@
|
|
44
44
|
/*-************************************
|
45
45
|
* Compiler Options
|
46
46
|
**************************************/
|
47
|
+
#include <limits.h>
|
47
48
|
#ifdef _MSC_VER /* Visual Studio */
|
48
49
|
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
49
50
|
#endif
|
@@ -54,8 +55,8 @@
|
|
54
55
|
**************************************/
|
55
56
|
/*
|
56
57
|
* LZ4F_HEAPMODE :
|
57
|
-
*
|
58
|
-
*
|
58
|
+
* Control how LZ4F_compressFrame allocates the Compression State,
|
59
|
+
* either on stack (0:default, fastest), or in memory heap (1:requires malloc()).
|
59
60
|
*/
|
60
61
|
#ifndef LZ4F_HEAPMODE
|
61
62
|
# define LZ4F_HEAPMODE 0
|
@@ -125,8 +126,9 @@ static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
|
|
125
126
|
|
126
127
|
static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
|
127
128
|
{
|
128
|
-
|
129
|
+
if (p == NULL) return;
|
129
130
|
if (cmem.customFree != NULL) {
|
131
|
+
/* custom allocation defined : use it */
|
130
132
|
cmem.customFree(cmem.opaqueState, p);
|
131
133
|
return;
|
132
134
|
}
|
@@ -153,7 +155,7 @@ static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
|
|
153
155
|
static int g_debuglog_enable = 1;
|
154
156
|
# define DEBUGLOG(l, ...) { \
|
155
157
|
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
|
156
|
-
fprintf(stderr, __FILE__ ": ");
|
158
|
+
fprintf(stderr, __FILE__ " (%i): ", __LINE__ ); \
|
157
159
|
fprintf(stderr, __VA_ARGS__); \
|
158
160
|
fprintf(stderr, " \n"); \
|
159
161
|
} }
|
@@ -186,9 +188,9 @@ static U32 LZ4F_readLE32 (const void* src)
|
|
186
188
|
{
|
187
189
|
const BYTE* const srcPtr = (const BYTE*)src;
|
188
190
|
U32 value32 = srcPtr[0];
|
189
|
-
value32
|
190
|
-
value32
|
191
|
-
value32
|
191
|
+
value32 |= ((U32)srcPtr[1])<< 8;
|
192
|
+
value32 |= ((U32)srcPtr[2])<<16;
|
193
|
+
value32 |= ((U32)srcPtr[3])<<24;
|
192
194
|
return value32;
|
193
195
|
}
|
194
196
|
|
@@ -205,13 +207,13 @@ static U64 LZ4F_readLE64 (const void* src)
|
|
205
207
|
{
|
206
208
|
const BYTE* const srcPtr = (const BYTE*)src;
|
207
209
|
U64 value64 = srcPtr[0];
|
208
|
-
value64
|
209
|
-
value64
|
210
|
-
value64
|
211
|
-
value64
|
212
|
-
value64
|
213
|
-
value64
|
214
|
-
value64
|
210
|
+
value64 |= ((U64)srcPtr[1]<<8);
|
211
|
+
value64 |= ((U64)srcPtr[2]<<16);
|
212
|
+
value64 |= ((U64)srcPtr[3]<<24);
|
213
|
+
value64 |= ((U64)srcPtr[4]<<32);
|
214
|
+
value64 |= ((U64)srcPtr[5]<<40);
|
215
|
+
value64 |= ((U64)srcPtr[6]<<48);
|
216
|
+
value64 |= ((U64)srcPtr[7]<<56);
|
215
217
|
return value64;
|
216
218
|
}
|
217
219
|
|
@@ -257,14 +259,15 @@ static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checks
|
|
257
259
|
* Structures and local types
|
258
260
|
**************************************/
|
259
261
|
|
260
|
-
typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED}
|
262
|
+
typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_BlockCompressMode_e;
|
263
|
+
typedef enum { ctxNone, ctxFast, ctxHC } LZ4F_CtxType_e;
|
261
264
|
|
262
265
|
typedef struct LZ4F_cctx_s
|
263
266
|
{
|
264
267
|
LZ4F_CustomMem cmem;
|
265
268
|
LZ4F_preferences_t prefs;
|
266
269
|
U32 version;
|
267
|
-
U32 cStage;
|
270
|
+
U32 cStage; /* 0 : compression uninitialized ; 1 : initialized, can compress */
|
268
271
|
const LZ4F_CDict* cdict;
|
269
272
|
size_t maxBlockSize;
|
270
273
|
size_t maxBufferSize;
|
@@ -275,8 +278,8 @@ typedef struct LZ4F_cctx_s
|
|
275
278
|
XXH32_state_t xxh;
|
276
279
|
void* lz4CtxPtr;
|
277
280
|
U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
|
278
|
-
U16
|
279
|
-
|
281
|
+
U16 lz4CtxType; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
|
282
|
+
LZ4F_BlockCompressMode_e blockCompressMode;
|
280
283
|
} LZ4F_cctx_t;
|
281
284
|
|
282
285
|
|
@@ -314,9 +317,14 @@ static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
|
|
314
317
|
|
315
318
|
#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
|
316
319
|
|
317
|
-
#define RETURN_ERROR_IF(c,e)
|
320
|
+
#define RETURN_ERROR_IF(c,e) do { \
|
321
|
+
if (c) { \
|
322
|
+
DEBUGLOG(3, "Error: " #c); \
|
323
|
+
RETURN_ERROR(e); \
|
324
|
+
} \
|
325
|
+
} while (0)
|
318
326
|
|
319
|
-
#define FORWARD_IF_ERROR(r)
|
327
|
+
#define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0)
|
320
328
|
|
321
329
|
unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
|
322
330
|
|
@@ -429,6 +437,7 @@ size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
|
|
429
437
|
BYTE* dstPtr = dstStart;
|
430
438
|
BYTE* const dstEnd = dstStart + dstCapacity;
|
431
439
|
|
440
|
+
DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize);
|
432
441
|
if (preferencesPtr!=NULL)
|
433
442
|
prefs = *preferencesPtr;
|
434
443
|
else
|
@@ -494,7 +503,7 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
|
|
494
503
|
LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
|
495
504
|
cctxPtr->lz4CtxPtr = &lz4ctx;
|
496
505
|
cctxPtr->lz4CtxAlloc = 1;
|
497
|
-
cctxPtr->
|
506
|
+
cctxPtr->lz4CtxType = ctxFast;
|
498
507
|
}
|
499
508
|
#endif
|
500
509
|
DEBUGLOG(4, "LZ4F_compressFrame");
|
@@ -539,18 +548,19 @@ LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t di
|
|
539
548
|
dictSize = 64 KB;
|
540
549
|
}
|
541
550
|
cdict->dictContent = LZ4F_malloc(dictSize, cmem);
|
551
|
+
/* note: using @cmem to allocate => can't use default create */
|
542
552
|
cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
|
543
|
-
if (cdict->fastCtx)
|
544
|
-
LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
|
545
553
|
cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
|
546
|
-
if (cdict->HCCtx)
|
547
|
-
LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t));
|
548
554
|
if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
|
549
555
|
LZ4F_freeCDict(cdict);
|
550
556
|
return NULL;
|
551
557
|
}
|
552
558
|
memcpy(cdict->dictContent, dictStart, dictSize);
|
553
|
-
|
559
|
+
LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
|
560
|
+
LZ4_loadDictSlow(cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
|
561
|
+
LZ4_initStreamHC(cdict->HCCtx, sizeof(LZ4_streamHC_t));
|
562
|
+
/* note: we don't know at this point which compression level is going to be used
|
563
|
+
* as a consequence, HCCtx is created for the more common HC mode */
|
554
564
|
LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
|
555
565
|
LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
|
556
566
|
return cdict;
|
@@ -616,7 +626,6 @@ LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned v
|
|
616
626
|
return LZ4F_OK_NoError;
|
617
627
|
}
|
618
628
|
|
619
|
-
|
620
629
|
LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
|
621
630
|
{
|
622
631
|
if (cctxPtr != NULL) { /* support free on NULL */
|
@@ -641,7 +650,7 @@ static void LZ4F_initStream(void* ctx,
|
|
641
650
|
int level,
|
642
651
|
LZ4F_blockMode_t blockMode) {
|
643
652
|
if (level < LZ4HC_CLEVEL_MIN) {
|
644
|
-
if (cdict
|
653
|
+
if (cdict || blockMode == LZ4F_blockLinked) {
|
645
654
|
/* In these cases, we will call LZ4_compress_fast_continue(),
|
646
655
|
* which needs an already reset context. Otherwise, we'll call a
|
647
656
|
* one-shot API. The non-continued APIs internally perform their own
|
@@ -649,11 +658,18 @@ static void LZ4F_initStream(void* ctx,
|
|
649
658
|
* tableType they need the context to be in. So in that case this
|
650
659
|
* would be misguided / wasted work. */
|
651
660
|
LZ4_resetStream_fast((LZ4_stream_t*)ctx);
|
661
|
+
if (cdict)
|
662
|
+
LZ4_attach_dictionary((LZ4_stream_t*)ctx, cdict->fastCtx);
|
652
663
|
}
|
653
|
-
|
664
|
+
/* In these cases, we'll call a one-shot API.
|
665
|
+
* The non-continued APIs internally perform their own resets
|
666
|
+
* at the beginning of their calls, where they know
|
667
|
+
* which tableType they need the context to be in.
|
668
|
+
* Therefore, a reset here would be wasted work. */
|
654
669
|
} else {
|
655
670
|
LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
|
656
|
-
|
671
|
+
if (cdict)
|
672
|
+
LZ4_attach_HC_dictionary((LZ4_streamHC_t*)ctx, cdict->HCCtx);
|
657
673
|
}
|
658
674
|
}
|
659
675
|
|
@@ -668,14 +684,12 @@ static int ctxTypeID_to_size(int ctxTypeID) {
|
|
668
684
|
}
|
669
685
|
}
|
670
686
|
|
671
|
-
|
672
|
-
*
|
673
|
-
* @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
674
|
-
* @return : number of bytes written into @dstBuffer for the header
|
675
|
-
* or an error code (can be tested using LZ4F_isError())
|
687
|
+
/* LZ4F_compressBegin_internal()
|
688
|
+
* Note: only accepts @cdict _or_ @dictBuffer as non NULL.
|
676
689
|
*/
|
677
|
-
size_t
|
690
|
+
size_t LZ4F_compressBegin_internal(LZ4F_cctx* cctx,
|
678
691
|
void* dstBuffer, size_t dstCapacity,
|
692
|
+
const void* dictBuffer, size_t dictSize,
|
679
693
|
const LZ4F_CDict* cdict,
|
680
694
|
const LZ4F_preferences_t* preferencesPtr)
|
681
695
|
{
|
@@ -685,71 +699,85 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
|
685
699
|
|
686
700
|
RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall);
|
687
701
|
if (preferencesPtr == NULL) preferencesPtr = &prefNull;
|
688
|
-
|
702
|
+
cctx->prefs = *preferencesPtr;
|
689
703
|
|
690
704
|
/* cctx Management */
|
691
|
-
{ U16 const ctxTypeID = (
|
705
|
+
{ U16 const ctxTypeID = (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
|
692
706
|
int requiredSize = ctxTypeID_to_size(ctxTypeID);
|
693
|
-
int allocatedSize = ctxTypeID_to_size(
|
707
|
+
int allocatedSize = ctxTypeID_to_size(cctx->lz4CtxAlloc);
|
694
708
|
if (allocatedSize < requiredSize) {
|
695
709
|
/* not enough space allocated */
|
696
|
-
LZ4F_free(
|
697
|
-
if (
|
710
|
+
LZ4F_free(cctx->lz4CtxPtr, cctx->cmem);
|
711
|
+
if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
|
698
712
|
/* must take ownership of memory allocation,
|
699
713
|
* in order to respect custom allocator contract */
|
700
|
-
|
701
|
-
if (
|
702
|
-
LZ4_initStream(
|
714
|
+
cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctx->cmem);
|
715
|
+
if (cctx->lz4CtxPtr)
|
716
|
+
LZ4_initStream(cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
|
703
717
|
} else {
|
704
|
-
|
705
|
-
if (
|
706
|
-
LZ4_initStreamHC(
|
718
|
+
cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctx->cmem);
|
719
|
+
if (cctx->lz4CtxPtr)
|
720
|
+
LZ4_initStreamHC(cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
|
707
721
|
}
|
708
|
-
RETURN_ERROR_IF(
|
709
|
-
|
710
|
-
|
711
|
-
} else if (
|
722
|
+
RETURN_ERROR_IF(cctx->lz4CtxPtr == NULL, allocation_failed);
|
723
|
+
cctx->lz4CtxAlloc = ctxTypeID;
|
724
|
+
cctx->lz4CtxType = ctxTypeID;
|
725
|
+
} else if (cctx->lz4CtxType != ctxTypeID) {
|
712
726
|
/* otherwise, a sufficient buffer is already allocated,
|
713
727
|
* but we need to reset it to the correct context type */
|
714
|
-
if (
|
715
|
-
LZ4_initStream((LZ4_stream_t*)
|
728
|
+
if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
|
729
|
+
LZ4_initStream((LZ4_stream_t*)cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
|
716
730
|
} else {
|
717
|
-
LZ4_initStreamHC((LZ4_streamHC_t*)
|
718
|
-
LZ4_setCompressionLevel((LZ4_streamHC_t*)
|
731
|
+
LZ4_initStreamHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
|
732
|
+
LZ4_setCompressionLevel((LZ4_streamHC_t*)cctx->lz4CtxPtr, cctx->prefs.compressionLevel);
|
719
733
|
}
|
720
|
-
|
734
|
+
cctx->lz4CtxType = ctxTypeID;
|
721
735
|
} }
|
722
736
|
|
723
737
|
/* Buffer Management */
|
724
|
-
if (
|
725
|
-
|
726
|
-
|
738
|
+
if (cctx->prefs.frameInfo.blockSizeID == 0)
|
739
|
+
cctx->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
|
740
|
+
cctx->maxBlockSize = LZ4F_getBlockSize(cctx->prefs.frameInfo.blockSizeID);
|
727
741
|
|
728
742
|
{ size_t const requiredBuffSize = preferencesPtr->autoFlush ?
|
729
|
-
((
|
730
|
-
|
731
|
-
|
732
|
-
if (
|
733
|
-
|
734
|
-
LZ4F_free(
|
735
|
-
|
736
|
-
RETURN_ERROR_IF(
|
737
|
-
|
743
|
+
((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
|
744
|
+
cctx->maxBlockSize + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
|
745
|
+
|
746
|
+
if (cctx->maxBufferSize < requiredBuffSize) {
|
747
|
+
cctx->maxBufferSize = 0;
|
748
|
+
LZ4F_free(cctx->tmpBuff, cctx->cmem);
|
749
|
+
cctx->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctx->cmem);
|
750
|
+
RETURN_ERROR_IF(cctx->tmpBuff == NULL, allocation_failed);
|
751
|
+
cctx->maxBufferSize = requiredBuffSize;
|
738
752
|
} }
|
739
|
-
|
740
|
-
|
741
|
-
(void)XXH32_reset(&(
|
753
|
+
cctx->tmpIn = cctx->tmpBuff;
|
754
|
+
cctx->tmpInSize = 0;
|
755
|
+
(void)XXH32_reset(&(cctx->xxh), 0);
|
742
756
|
|
743
757
|
/* context init */
|
744
|
-
|
745
|
-
if (
|
758
|
+
cctx->cdict = cdict;
|
759
|
+
if (cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
|
746
760
|
/* frame init only for blockLinked : blockIndependent will be init at each block */
|
747
|
-
LZ4F_initStream(
|
761
|
+
LZ4F_initStream(cctx->lz4CtxPtr, cdict, cctx->prefs.compressionLevel, LZ4F_blockLinked);
|
748
762
|
}
|
749
763
|
if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
|
750
|
-
LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)
|
764
|
+
LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctx->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
|
765
|
+
}
|
766
|
+
if (dictBuffer) {
|
767
|
+
assert(cdict == NULL);
|
768
|
+
RETURN_ERROR_IF(dictSize > INT_MAX, parameter_invalid);
|
769
|
+
if (cctx->lz4CtxType == ctxFast) {
|
770
|
+
/* lz4 fast*/
|
771
|
+
LZ4_loadDict((LZ4_stream_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
|
772
|
+
} else {
|
773
|
+
/* lz4hc */
|
774
|
+
assert(cctx->lz4CtxType == ctxHC);
|
775
|
+
LZ4_loadDictHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
|
776
|
+
}
|
751
777
|
}
|
752
778
|
|
779
|
+
/* Stage 2 : Write Frame Header */
|
780
|
+
|
753
781
|
/* Magic Number */
|
754
782
|
LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
|
755
783
|
dstPtr += 4;
|
@@ -757,22 +785,22 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
|
757
785
|
|
758
786
|
/* FLG Byte */
|
759
787
|
*dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
|
760
|
-
+ ((
|
761
|
-
+ ((
|
762
|
-
+ ((unsigned)(
|
763
|
-
+ ((
|
764
|
-
+ (
|
788
|
+
+ ((cctx->prefs.frameInfo.blockMode & _1BIT ) << 5)
|
789
|
+
+ ((cctx->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
|
790
|
+
+ ((unsigned)(cctx->prefs.frameInfo.contentSize > 0) << 3)
|
791
|
+
+ ((cctx->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
|
792
|
+
+ (cctx->prefs.frameInfo.dictID > 0) );
|
765
793
|
/* BD Byte */
|
766
|
-
*dstPtr++ = (BYTE)((
|
794
|
+
*dstPtr++ = (BYTE)((cctx->prefs.frameInfo.blockSizeID & _3BITS) << 4);
|
767
795
|
/* Optional Frame content size field */
|
768
|
-
if (
|
769
|
-
LZ4F_writeLE64(dstPtr,
|
796
|
+
if (cctx->prefs.frameInfo.contentSize) {
|
797
|
+
LZ4F_writeLE64(dstPtr, cctx->prefs.frameInfo.contentSize);
|
770
798
|
dstPtr += 8;
|
771
|
-
|
799
|
+
cctx->totalInSize = 0;
|
772
800
|
}
|
773
801
|
/* Optional dictionary ID field */
|
774
|
-
if (
|
775
|
-
LZ4F_writeLE32(dstPtr,
|
802
|
+
if (cctx->prefs.frameInfo.dictID) {
|
803
|
+
LZ4F_writeLE32(dstPtr, cctx->prefs.frameInfo.dictID);
|
776
804
|
dstPtr += 4;
|
777
805
|
}
|
778
806
|
/* Header CRC Byte */
|
@@ -780,24 +808,54 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
|
780
808
|
dstPtr++;
|
781
809
|
}
|
782
810
|
|
783
|
-
|
811
|
+
cctx->cStage = 1; /* header written, now request input data block */
|
784
812
|
return (size_t)(dstPtr - dstStart);
|
785
813
|
}
|
786
814
|
|
815
|
+
size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
|
816
|
+
void* dstBuffer, size_t dstCapacity,
|
817
|
+
const LZ4F_preferences_t* preferencesPtr)
|
818
|
+
{
|
819
|
+
return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
|
820
|
+
NULL, 0,
|
821
|
+
NULL, preferencesPtr);
|
822
|
+
}
|
787
823
|
|
788
|
-
|
789
|
-
*
|
790
|
-
*
|
791
|
-
*
|
792
|
-
*
|
793
|
-
|
794
|
-
|
795
|
-
|
824
|
+
/* LZ4F_compressBegin_usingDictOnce:
|
825
|
+
* Hidden implementation,
|
826
|
+
* employed for multi-threaded compression
|
827
|
+
* when frame defines linked blocks */
|
828
|
+
size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx* cctx,
|
829
|
+
void* dstBuffer, size_t dstCapacity,
|
830
|
+
const void* dict, size_t dictSize,
|
831
|
+
const LZ4F_preferences_t* preferencesPtr)
|
832
|
+
{
|
833
|
+
return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
|
834
|
+
dict, dictSize,
|
835
|
+
NULL, preferencesPtr);
|
836
|
+
}
|
837
|
+
|
838
|
+
size_t LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx,
|
839
|
+
void* dstBuffer, size_t dstCapacity,
|
840
|
+
const void* dict, size_t dictSize,
|
841
|
+
const LZ4F_preferences_t* preferencesPtr)
|
842
|
+
{
|
843
|
+
/* note : incorrect implementation :
|
844
|
+
* this will only use the dictionary once,
|
845
|
+
* instead of once *per* block when frames defines independent blocks */
|
846
|
+
return LZ4F_compressBegin_usingDictOnce(cctx, dstBuffer, dstCapacity,
|
847
|
+
dict, dictSize,
|
848
|
+
preferencesPtr);
|
849
|
+
}
|
850
|
+
|
851
|
+
size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
|
796
852
|
void* dstBuffer, size_t dstCapacity,
|
853
|
+
const LZ4F_CDict* cdict,
|
797
854
|
const LZ4F_preferences_t* preferencesPtr)
|
798
855
|
{
|
799
|
-
return
|
800
|
-
|
856
|
+
return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
|
857
|
+
NULL, 0,
|
858
|
+
cdict, preferencesPtr);
|
801
859
|
}
|
802
860
|
|
803
861
|
|
@@ -891,9 +949,10 @@ static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int sr
|
|
891
949
|
return 0;
|
892
950
|
}
|
893
951
|
|
894
|
-
static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level,
|
952
|
+
static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_BlockCompressMode_e compressMode)
|
895
953
|
{
|
896
|
-
if (compressMode == LZ4B_UNCOMPRESSED)
|
954
|
+
if (compressMode == LZ4B_UNCOMPRESSED)
|
955
|
+
return LZ4F_doNotCompressBlock;
|
897
956
|
if (level < LZ4HC_CLEVEL_MIN) {
|
898
957
|
if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
|
899
958
|
return LZ4F_compressBlock_continue;
|
@@ -931,7 +990,7 @@ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
|
|
931
990
|
void* dstBuffer, size_t dstCapacity,
|
932
991
|
const void* srcBuffer, size_t srcSize,
|
933
992
|
const LZ4F_compressOptions_t* compressOptionsPtr,
|
934
|
-
|
993
|
+
LZ4F_BlockCompressMode_e blockCompression)
|
935
994
|
{
|
936
995
|
size_t const blockSize = cctxPtr->maxBlockSize;
|
937
996
|
const BYTE* srcPtr = (const BYTE*)srcBuffer;
|
@@ -951,10 +1010,10 @@ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
|
|
951
1010
|
RETURN_ERROR(dstMaxSize_tooSmall);
|
952
1011
|
|
953
1012
|
/* flush currently written block, to continue with new block compression */
|
954
|
-
if (cctxPtr->
|
1013
|
+
if (cctxPtr->blockCompressMode != blockCompression) {
|
955
1014
|
bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
|
956
1015
|
dstPtr += bytesWritten;
|
957
|
-
cctxPtr->
|
1016
|
+
cctxPtr->blockCompressMode = blockCompression;
|
958
1017
|
}
|
959
1018
|
|
960
1019
|
if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull;
|
@@ -1068,13 +1127,9 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
|
|
1068
1127
|
compressOptionsPtr, LZ4B_COMPRESSED);
|
1069
1128
|
}
|
1070
1129
|
|
1071
|
-
/*!
|
1072
|
-
* LZ4F_compressUpdate()
|
1073
|
-
*
|
1074
|
-
* src data is either buffered or compressed into @dstBuffer.
|
1075
|
-
* If previously an uncompressed block was written, buffered data is flushed
|
1076
|
-
* before appending compressed data is continued.
|
1077
|
-
* This is only supported when LZ4F_blockIndependent is used
|
1130
|
+
/*! LZ4F_uncompressedUpdate() :
|
1131
|
+
* Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed.
|
1132
|
+
* This symbol is only supported when LZ4F_blockIndependent is used
|
1078
1133
|
* @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
|
1079
1134
|
* @compressOptionsPtr is optional : provide NULL to mean "default".
|
1080
1135
|
* @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
|
@@ -1084,8 +1139,8 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
|
|
1084
1139
|
size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
|
1085
1140
|
void* dstBuffer, size_t dstCapacity,
|
1086
1141
|
const void* srcBuffer, size_t srcSize,
|
1087
|
-
const LZ4F_compressOptions_t* compressOptionsPtr)
|
1088
|
-
|
1142
|
+
const LZ4F_compressOptions_t* compressOptionsPtr)
|
1143
|
+
{
|
1089
1144
|
return LZ4F_compressUpdateImpl(cctxPtr,
|
1090
1145
|
dstBuffer, dstCapacity,
|
1091
1146
|
srcBuffer, srcSize,
|
@@ -1115,7 +1170,7 @@ size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
|
|
1115
1170
|
(void)compressOptionsPtr; /* not useful (yet) */
|
1116
1171
|
|
1117
1172
|
/* select compression function */
|
1118
|
-
compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->
|
1173
|
+
compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompressMode);
|
1119
1174
|
|
1120
1175
|
/* compress tmp buffer */
|
1121
1176
|
dstPtr += LZ4F_makeBlock(dstPtr,
|
@@ -1170,13 +1225,12 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
|
|
1170
1225
|
if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
|
1171
1226
|
U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
|
1172
1227
|
RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall);
|
1173
|
-
DEBUGLOG(5,"Writing 32-bit content checksum");
|
1228
|
+
DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh);
|
1174
1229
|
LZ4F_writeLE32(dstPtr, xxh);
|
1175
1230
|
dstPtr+=4; /* content Checksum */
|
1176
1231
|
}
|
1177
1232
|
|
1178
1233
|
cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
|
1179
|
-
cctxPtr->maxBufferSize = 0; /* reuse HC context */
|
1180
1234
|
|
1181
1235
|
if (cctxPtr->prefs.frameInfo.contentSize) {
|
1182
1236
|
if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
|
@@ -1270,13 +1324,14 @@ LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
|
|
1270
1324
|
|
1271
1325
|
|
1272
1326
|
/*==--- Streaming Decompression operations ---==*/
|
1273
|
-
|
1274
1327
|
void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
|
1275
1328
|
{
|
1329
|
+
DEBUGLOG(5, "LZ4F_resetDecompressionContext");
|
1276
1330
|
dctx->dStage = dstage_getFrameHeader;
|
1277
1331
|
dctx->dict = NULL;
|
1278
1332
|
dctx->dictSize = 0;
|
1279
1333
|
dctx->skipChecksum = 0;
|
1334
|
+
dctx->frameRemainingSize = 0;
|
1280
1335
|
}
|
1281
1336
|
|
1282
1337
|
|
@@ -1333,6 +1388,7 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|
1333
1388
|
if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
|
1334
1389
|
if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */
|
1335
1390
|
}
|
1391
|
+
DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag);
|
1336
1392
|
|
1337
1393
|
/* Frame Header Size */
|
1338
1394
|
frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
|
@@ -1369,8 +1425,9 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|
1369
1425
|
dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
|
1370
1426
|
dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
|
1371
1427
|
dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
|
1372
|
-
if (contentSizeFlag)
|
1428
|
+
if (contentSizeFlag) {
|
1373
1429
|
dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
|
1430
|
+
}
|
1374
1431
|
if (dictIDFlag)
|
1375
1432
|
dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
|
1376
1433
|
|
@@ -1570,7 +1627,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1570
1627
|
size_t nextSrcSizeHint = 1;
|
1571
1628
|
|
1572
1629
|
|
1573
|
-
DEBUGLOG(5, "LZ4F_decompress
|
1630
|
+
DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)",
|
1574
1631
|
srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
|
1575
1632
|
if (dstBuffer == NULL) assert(*dstSizePtr == 0);
|
1576
1633
|
MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
|
@@ -1722,10 +1779,10 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1722
1779
|
/* history management (linked blocks only)*/
|
1723
1780
|
if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
|
1724
1781
|
LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
|
1725
|
-
|
1726
|
-
|
1727
|
-
|
1728
|
-
|
1782
|
+
}
|
1783
|
+
srcPtr += sizeToCopy;
|
1784
|
+
dstPtr += sizeToCopy;
|
1785
|
+
}
|
1729
1786
|
if (sizeToCopy == dctx->tmpInTarget) { /* all done */
|
1730
1787
|
if (dctx->frameInfo.blockChecksumFlag) {
|
1731
1788
|
dctx->tmpInSize = 0;
|
@@ -1959,6 +2016,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1959
2016
|
if (!dctx->skipChecksum) {
|
1960
2017
|
U32 const readCRC = LZ4F_readLE32(selectedIn);
|
1961
2018
|
U32 const resultCRC = XXH32_digest(&(dctx->xxh));
|
2019
|
+
DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC, resultCRC);
|
1962
2020
|
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
1963
2021
|
RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid);
|
1964
2022
|
#else
|