extlz4 0.3.4 → 0.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +1 -1
- data/Rakefile +21 -3
- data/contrib/lz4/CODING_STYLE +57 -0
- data/contrib/lz4/LICENSE +1 -1
- data/contrib/lz4/Makefile.inc +17 -15
- data/contrib/lz4/NEWS +25 -0
- data/contrib/lz4/README.md +16 -5
- data/contrib/lz4/SECURITY.md +17 -0
- data/contrib/lz4/build/README.md +4 -15
- data/contrib/lz4/build/VS2022/_build.bat +39 -0
- data/contrib/lz4/build/VS2022/_setup.bat +35 -0
- data/contrib/lz4/build/VS2022/_test.bat +38 -0
- data/contrib/lz4/build/VS2022/build-and-test-win32-debug.bat +26 -0
- data/contrib/lz4/build/VS2022/build-and-test-win32-release.bat +26 -0
- data/contrib/lz4/build/VS2022/build-and-test-x64-debug.bat +26 -0
- data/contrib/lz4/build/VS2022/build-and-test-x64-release.bat +26 -0
- data/contrib/lz4/build/VS2022/datagen/datagen.vcxproj +7 -3
- data/contrib/lz4/build/{VS2017 → VS2022}/lz4/lz4.vcxproj +21 -7
- data/contrib/lz4/build/VS2022/lz4.sln +5 -2
- data/contrib/lz4/build/cmake/CMakeLists.txt +95 -100
- data/contrib/lz4/build/meson/GetLz4LibraryVersion.py +39 -0
- data/contrib/lz4/build/meson/README.md +34 -0
- data/contrib/lz4/build/meson/meson/contrib/gen_manual/meson.build +42 -0
- data/contrib/lz4/build/meson/meson/contrib/meson.build +11 -0
- data/contrib/lz4/build/meson/meson/examples/meson.build +32 -0
- data/contrib/lz4/build/meson/meson/lib/meson.build +87 -0
- data/contrib/lz4/build/meson/meson/meson.build +135 -0
- data/contrib/lz4/build/meson/meson/ossfuzz/meson.build +35 -0
- data/contrib/lz4/build/meson/meson/programs/meson.build +91 -0
- data/contrib/lz4/build/meson/meson/tests/meson.build +162 -0
- data/contrib/lz4/build/meson/meson.build +31 -0
- data/contrib/lz4/build/meson/meson_options.txt +44 -0
- data/contrib/lz4/build/visual/README.md +5 -0
- data/contrib/lz4/build/visual/generate_solution.cmd +55 -0
- data/contrib/lz4/build/visual/generate_vs2015.cmd +3 -0
- data/contrib/lz4/build/visual/generate_vs2017.cmd +3 -0
- data/contrib/lz4/build/visual/generate_vs2019.cmd +3 -0
- data/contrib/lz4/build/visual/generate_vs2022.cmd +3 -0
- data/contrib/lz4/lib/README.md +25 -1
- data/contrib/lz4/lib/lz4.c +206 -99
- data/contrib/lz4/lib/lz4.h +111 -69
- data/contrib/lz4/lib/lz4file.c +111 -81
- data/contrib/lz4/lib/lz4file.h +2 -2
- data/contrib/lz4/lib/lz4frame.c +179 -121
- data/contrib/lz4/lib/lz4frame.h +162 -103
- data/contrib/lz4/lib/lz4hc.c +943 -382
- data/contrib/lz4/lib/lz4hc.h +43 -42
- data/contrib/lz4/lib/xxhash.c +21 -21
- data/contrib/lz4/ossfuzz/decompress_fuzzer.c +1 -1
- data/contrib/lz4/ossfuzz/fuzz_helpers.h +1 -1
- data/ext/blockapi.c +11 -11
- data/ext/frameapi.c +23 -23
- metadata +34 -28
- data/contrib/lz4/build/VS2010/datagen/datagen.vcxproj +0 -169
- data/contrib/lz4/build/VS2010/frametest/frametest.vcxproj +0 -176
- data/contrib/lz4/build/VS2010/fullbench/fullbench.vcxproj +0 -176
- data/contrib/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +0 -180
- data/contrib/lz4/build/VS2010/fuzzer/fuzzer.vcxproj +0 -173
- data/contrib/lz4/build/VS2010/liblz4/liblz4.vcxproj +0 -175
- data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc +0 -51
- data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj +0 -179
- data/contrib/lz4/build/VS2010/lz4/lz4.vcxproj +0 -189
- data/contrib/lz4/build/VS2010/lz4.sln +0 -98
- data/contrib/lz4/build/VS2017/datagen/datagen.vcxproj +0 -173
- data/contrib/lz4/build/VS2017/frametest/frametest.vcxproj +0 -180
- data/contrib/lz4/build/VS2017/fullbench/fullbench.vcxproj +0 -180
- data/contrib/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj +0 -184
- data/contrib/lz4/build/VS2017/fuzzer/fuzzer.vcxproj +0 -177
- data/contrib/lz4/build/VS2017/liblz4/liblz4.vcxproj +0 -179
- data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc +0 -51
- data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj +0 -183
- data/contrib/lz4/build/VS2017/lz4/lz4.rc +0 -51
- data/contrib/lz4/build/VS2017/lz4.sln +0 -103
- /data/contrib/lz4/build/{VS2010 → VS2022}/lz4/lz4.rc +0 -0
data/contrib/lz4/lib/lz4.c
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
/*
|
2
2
|
LZ4 - Fast LZ compression algorithm
|
3
|
-
Copyright (C) 2011-
|
3
|
+
Copyright (C) 2011-2023, Yann Collet.
|
4
4
|
|
5
5
|
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
6
6
|
|
@@ -37,7 +37,8 @@
|
|
37
37
|
**************************************/
|
38
38
|
/*
|
39
39
|
* LZ4_HEAPMODE :
|
40
|
-
* Select how
|
40
|
+
* Select how stateless compression functions like `LZ4_compress_default()`
|
41
|
+
* allocate memory for their hash table,
|
41
42
|
* in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
|
42
43
|
*/
|
43
44
|
#ifndef LZ4_HEAPMODE
|
@@ -78,7 +79,7 @@
|
|
78
79
|
( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
|
79
80
|
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
80
81
|
# define LZ4_FORCE_MEMORY_ACCESS 2
|
81
|
-
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
|
82
|
+
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)
|
82
83
|
# define LZ4_FORCE_MEMORY_ACCESS 1
|
83
84
|
# endif
|
84
85
|
#endif
|
@@ -105,15 +106,13 @@
|
|
105
106
|
# define LZ4_SRC_INCLUDED 1
|
106
107
|
#endif
|
107
108
|
|
108
|
-
#ifndef LZ4_STATIC_LINKING_ONLY
|
109
|
-
#define LZ4_STATIC_LINKING_ONLY
|
110
|
-
#endif
|
111
|
-
|
112
109
|
#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
|
113
|
-
#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
|
110
|
+
# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
|
114
111
|
#endif
|
115
112
|
|
116
|
-
#
|
113
|
+
#ifndef LZ4_STATIC_LINKING_ONLY
|
114
|
+
# define LZ4_STATIC_LINKING_ONLY
|
115
|
+
#endif
|
117
116
|
#include "lz4.h"
|
118
117
|
/* see also "memory routines" below */
|
119
118
|
|
@@ -125,14 +124,17 @@
|
|
125
124
|
# include <intrin.h> /* only present in VS2005+ */
|
126
125
|
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
127
126
|
# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */
|
127
|
+
# pragma warning(disable : 6239) /* disable: C6239: (<non-zero constant> && <expression>) always evaluates to the result of <expression> */
|
128
|
+
# pragma warning(disable : 6240) /* disable: C6240: (<expression> && <non-zero constant>) always evaluates to the result of <expression> */
|
129
|
+
# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */
|
128
130
|
#endif /* _MSC_VER */
|
129
131
|
|
130
132
|
#ifndef LZ4_FORCE_INLINE
|
131
|
-
#
|
133
|
+
# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */
|
132
134
|
# define LZ4_FORCE_INLINE static __forceinline
|
133
135
|
# else
|
134
136
|
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
135
|
-
#
|
137
|
+
# if defined (__GNUC__) || defined (__clang__)
|
136
138
|
# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
|
137
139
|
# else
|
138
140
|
# define LZ4_FORCE_INLINE static inline
|
@@ -279,7 +281,7 @@ static const int LZ4_minLength = (MFLIMIT+1);
|
|
279
281
|
static int g_debuglog_enable = 1;
|
280
282
|
# define DEBUGLOG(l, ...) { \
|
281
283
|
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
|
282
|
-
fprintf(stderr, __FILE__
|
284
|
+
fprintf(stderr, __FILE__ " %i: ", __LINE__); \
|
283
285
|
fprintf(stderr, __VA_ARGS__); \
|
284
286
|
fprintf(stderr, " \n"); \
|
285
287
|
} }
|
@@ -364,6 +366,11 @@ static unsigned LZ4_isLittleEndian(void)
|
|
364
366
|
return one.c[0];
|
365
367
|
}
|
366
368
|
|
369
|
+
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
|
370
|
+
#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))
|
371
|
+
#elif defined(_MSC_VER)
|
372
|
+
#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))
|
373
|
+
#endif
|
367
374
|
|
368
375
|
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
|
369
376
|
/* lie to the compiler about data alignment; use with caution */
|
@@ -379,14 +386,16 @@ static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
|
379
386
|
|
380
387
|
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
381
388
|
/* currently only defined for gcc and icc */
|
382
|
-
typedef
|
389
|
+
LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;
|
390
|
+
LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;
|
391
|
+
LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;
|
383
392
|
|
384
|
-
static U16 LZ4_read16(const void* ptr) { return ((const
|
385
|
-
static U32 LZ4_read32(const void* ptr) { return ((const
|
386
|
-
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const
|
393
|
+
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }
|
394
|
+
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }
|
395
|
+
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }
|
387
396
|
|
388
|
-
static void LZ4_write16(void* memPtr, U16 value) { ((
|
389
|
-
static void LZ4_write32(void* memPtr, U32 value) { ((
|
397
|
+
static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }
|
398
|
+
static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }
|
390
399
|
|
391
400
|
#else /* safe and portable access using memcpy() */
|
392
401
|
|
@@ -424,9 +433,21 @@ static U16 LZ4_readLE16(const void* memPtr)
|
|
424
433
|
return LZ4_read16(memPtr);
|
425
434
|
} else {
|
426
435
|
const BYTE* p = (const BYTE*)memPtr;
|
427
|
-
return (U16)((U16)p[0]
|
436
|
+
return (U16)((U16)p[0] | (p[1]<<8));
|
437
|
+
}
|
438
|
+
}
|
439
|
+
|
440
|
+
#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
|
441
|
+
static U32 LZ4_readLE32(const void* memPtr)
|
442
|
+
{
|
443
|
+
if (LZ4_isLittleEndian()) {
|
444
|
+
return LZ4_read32(memPtr);
|
445
|
+
} else {
|
446
|
+
const BYTE* p = (const BYTE*)memPtr;
|
447
|
+
return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);
|
428
448
|
}
|
429
449
|
}
|
450
|
+
#endif
|
430
451
|
|
431
452
|
static void LZ4_writeLE16(void* memPtr, U16 value)
|
432
453
|
{
|
@@ -509,7 +530,7 @@ LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
|
|
509
530
|
|
510
531
|
/* LZ4_memcpy_using_offset() presumes :
|
511
532
|
* - dstEnd >= dstPtr + MINMATCH
|
512
|
-
* - there is at least
|
533
|
+
* - there is at least 12 bytes available to write after dstEnd */
|
513
534
|
LZ4_FORCE_INLINE void
|
514
535
|
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
|
515
536
|
{
|
@@ -524,12 +545,12 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
|
|
524
545
|
case 2:
|
525
546
|
LZ4_memcpy(v, srcPtr, 2);
|
526
547
|
LZ4_memcpy(&v[2], srcPtr, 2);
|
527
|
-
#if defined(_MSC_VER) && (_MSC_VER <=
|
548
|
+
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
|
528
549
|
# pragma warning(push)
|
529
550
|
# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
|
530
551
|
#endif
|
531
552
|
LZ4_memcpy(&v[4], v, 4);
|
532
|
-
#if defined(_MSC_VER) && (_MSC_VER <=
|
553
|
+
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
|
533
554
|
# pragma warning(pop)
|
534
555
|
#endif
|
535
556
|
break;
|
@@ -776,7 +797,12 @@ LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
|
|
776
797
|
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
|
777
798
|
{
|
778
799
|
if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
|
800
|
+
|
801
|
+
#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
|
802
|
+
return LZ4_hash4(LZ4_readLE32(p), tableType);
|
803
|
+
#else
|
779
804
|
return LZ4_hash4(LZ4_read32(p), tableType);
|
805
|
+
#endif
|
780
806
|
}
|
781
807
|
|
782
808
|
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
|
@@ -803,23 +829,19 @@ LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableT
|
|
803
829
|
}
|
804
830
|
}
|
805
831
|
|
832
|
+
/* LZ4_putPosition*() : only used in byPtr mode */
|
806
833
|
LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
|
807
|
-
void* tableBase, tableType_t const tableType
|
808
|
-
const BYTE* srcBase)
|
834
|
+
void* tableBase, tableType_t const tableType)
|
809
835
|
{
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
|
814
|
-
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
|
815
|
-
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
|
816
|
-
}
|
836
|
+
const BYTE** const hashTable = (const BYTE**)tableBase;
|
837
|
+
assert(tableType == byPtr); (void)tableType;
|
838
|
+
hashTable[h] = p;
|
817
839
|
}
|
818
840
|
|
819
|
-
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType
|
841
|
+
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)
|
820
842
|
{
|
821
843
|
U32 const h = LZ4_hashPosition(p, tableType);
|
822
|
-
LZ4_putPositionOnHash(p, h, tableBase, tableType
|
844
|
+
LZ4_putPositionOnHash(p, h, tableBase, tableType);
|
823
845
|
}
|
824
846
|
|
825
847
|
/* LZ4_getIndexOnHash() :
|
@@ -844,20 +866,18 @@ LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_
|
|
844
866
|
assert(0); return 0; /* forbidden case */
|
845
867
|
}
|
846
868
|
|
847
|
-
static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType
|
869
|
+
static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType)
|
848
870
|
{
|
849
|
-
|
850
|
-
|
851
|
-
{ const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
|
871
|
+
assert(tableType == byPtr); (void)tableType;
|
872
|
+
{ const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
|
852
873
|
}
|
853
874
|
|
854
875
|
LZ4_FORCE_INLINE const BYTE*
|
855
876
|
LZ4_getPosition(const BYTE* p,
|
856
|
-
const void* tableBase, tableType_t tableType
|
857
|
-
const BYTE* srcBase)
|
877
|
+
const void* tableBase, tableType_t tableType)
|
858
878
|
{
|
859
879
|
U32 const h = LZ4_hashPosition(p, tableType);
|
860
|
-
return LZ4_getPositionOnHash(h, tableBase, tableType
|
880
|
+
return LZ4_getPositionOnHash(h, tableBase, tableType);
|
861
881
|
}
|
862
882
|
|
863
883
|
LZ4_FORCE_INLINE void
|
@@ -901,9 +921,9 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
|
|
901
921
|
cctx->dictSize = 0;
|
902
922
|
}
|
903
923
|
|
904
|
-
/**
|
924
|
+
/** LZ4_compress_generic_validated() :
|
905
925
|
* inlined, to ensure branches are decided at compilation time.
|
906
|
-
*
|
926
|
+
* The following conditions are presumed already validated:
|
907
927
|
* - source != NULL
|
908
928
|
* - inputSize > 0
|
909
929
|
*/
|
@@ -921,10 +941,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
|
921
941
|
const int acceleration)
|
922
942
|
{
|
923
943
|
int result;
|
924
|
-
const BYTE* ip = (const BYTE*)
|
944
|
+
const BYTE* ip = (const BYTE*)source;
|
925
945
|
|
926
946
|
U32 const startIndex = cctx->currentOffset;
|
927
|
-
const BYTE* base = (const BYTE*)
|
947
|
+
const BYTE* base = (const BYTE*)source - startIndex;
|
928
948
|
const BYTE* lowLimit;
|
929
949
|
|
930
950
|
const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
|
@@ -932,7 +952,8 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
|
932
952
|
dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
|
933
953
|
const U32 dictSize =
|
934
954
|
dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
|
935
|
-
const U32 dictDelta =
|
955
|
+
const U32 dictDelta =
|
956
|
+
(dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */
|
936
957
|
|
937
958
|
int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
|
938
959
|
U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
|
@@ -957,11 +978,11 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
|
957
978
|
|
958
979
|
DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
|
959
980
|
assert(ip != NULL);
|
981
|
+
if (tableType == byU16) assert(inputSize<LZ4_64Klimit); /* Size too large (not within 64K limit) */
|
982
|
+
if (tableType == byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
|
960
983
|
/* If init conditions are not met, we don't have to mark stream
|
961
984
|
* as having dirty context, since no action was taken yet */
|
962
985
|
if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
|
963
|
-
if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
|
964
|
-
if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
|
965
986
|
assert(acceleration >= 1);
|
966
987
|
|
967
988
|
lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
|
@@ -981,7 +1002,12 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
|
981
1002
|
if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
982
1003
|
|
983
1004
|
/* First Byte */
|
984
|
-
|
1005
|
+
{ U32 const h = LZ4_hashPosition(ip, tableType);
|
1006
|
+
if (tableType == byPtr) {
|
1007
|
+
LZ4_putPositionOnHash(ip, h, cctx->hashTable, byPtr);
|
1008
|
+
} else {
|
1009
|
+
LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);
|
1010
|
+
} }
|
985
1011
|
ip++; forwardH = LZ4_hashPosition(ip, tableType);
|
986
1012
|
|
987
1013
|
/* Main Loop */
|
@@ -1004,9 +1030,9 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
|
1004
1030
|
if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
|
1005
1031
|
assert(ip < mflimitPlusOne);
|
1006
1032
|
|
1007
|
-
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType
|
1033
|
+
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);
|
1008
1034
|
forwardH = LZ4_hashPosition(forwardIp, tableType);
|
1009
|
-
LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType
|
1035
|
+
LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);
|
1010
1036
|
|
1011
1037
|
} while ( (match+LZ4_DISTANCE_MAX < ip)
|
1012
1038
|
|| (LZ4_read32(match) != LZ4_read32(ip)) );
|
@@ -1077,7 +1103,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
|
1077
1103
|
|
1078
1104
|
/* Catch up */
|
1079
1105
|
filledIp = ip;
|
1080
|
-
|
1106
|
+
assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */
|
1107
|
+
if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {
|
1108
|
+
do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));
|
1109
|
+
}
|
1081
1110
|
|
1082
1111
|
/* Encode Literals */
|
1083
1112
|
{ unsigned const litLength = (unsigned)(ip - anchor);
|
@@ -1092,7 +1121,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
|
|
1092
1121
|
goto _last_literals;
|
1093
1122
|
}
|
1094
1123
|
if (litLength >= RUN_MASK) {
|
1095
|
-
|
1124
|
+
unsigned len = litLength - RUN_MASK;
|
1096
1125
|
*token = (RUN_MASK<<ML_BITS);
|
1097
1126
|
for(; len >= 255 ; len-=255) *op++ = 255;
|
1098
1127
|
*op++ = (BYTE)len;
|
@@ -1204,13 +1233,19 @@ _next_match:
|
|
1204
1233
|
if (ip >= mflimitPlusOne) break;
|
1205
1234
|
|
1206
1235
|
/* Fill table */
|
1207
|
-
|
1236
|
+
{ U32 const h = LZ4_hashPosition(ip-2, tableType);
|
1237
|
+
if (tableType == byPtr) {
|
1238
|
+
LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);
|
1239
|
+
} else {
|
1240
|
+
U32 const idx = (U32)((ip-2) - base);
|
1241
|
+
LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);
|
1242
|
+
} }
|
1208
1243
|
|
1209
1244
|
/* Test next position */
|
1210
1245
|
if (tableType == byPtr) {
|
1211
1246
|
|
1212
|
-
match = LZ4_getPosition(ip, cctx->hashTable, tableType
|
1213
|
-
LZ4_putPosition(ip, cctx->hashTable, tableType
|
1247
|
+
match = LZ4_getPosition(ip, cctx->hashTable, tableType);
|
1248
|
+
LZ4_putPosition(ip, cctx->hashTable, tableType);
|
1214
1249
|
if ( (match+LZ4_DISTANCE_MAX >= ip)
|
1215
1250
|
&& (LZ4_read32(match) == LZ4_read32(ip)) )
|
1216
1251
|
{ token=op++; *token=0; goto _next_match; }
|
@@ -1224,6 +1259,7 @@ _next_match:
|
|
1224
1259
|
if (dictDirective == usingDictCtx) {
|
1225
1260
|
if (matchIndex < startIndex) {
|
1226
1261
|
/* there was no match, try the dictionary */
|
1262
|
+
assert(tableType == byU32);
|
1227
1263
|
matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
|
1228
1264
|
match = dictBase + matchIndex;
|
1229
1265
|
lowLimit = dictionary; /* required for match length counter */
|
@@ -1377,9 +1413,10 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
|
|
1377
1413
|
*/
|
1378
1414
|
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
|
1379
1415
|
{
|
1380
|
-
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
|
1416
|
+
LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;
|
1381
1417
|
if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
|
1382
1418
|
if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
|
1419
|
+
assert(ctx != NULL);
|
1383
1420
|
|
1384
1421
|
if (dstCapacity >= LZ4_compressBound(srcSize)) {
|
1385
1422
|
if (srcSize < LZ4_64Klimit) {
|
@@ -1413,17 +1450,17 @@ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst
|
|
1413
1450
|
}
|
1414
1451
|
|
1415
1452
|
|
1416
|
-
int LZ4_compress_fast(const char*
|
1453
|
+
int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)
|
1417
1454
|
{
|
1418
1455
|
int result;
|
1419
1456
|
#if (LZ4_HEAPMODE)
|
1420
|
-
LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
|
1457
|
+
LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
|
1421
1458
|
if (ctxPtr == NULL) return 0;
|
1422
1459
|
#else
|
1423
1460
|
LZ4_stream_t ctx;
|
1424
1461
|
LZ4_stream_t* const ctxPtr = &ctx;
|
1425
1462
|
#endif
|
1426
|
-
result = LZ4_compress_fast_extState(ctxPtr,
|
1463
|
+
result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);
|
1427
1464
|
|
1428
1465
|
#if (LZ4_HEAPMODE)
|
1429
1466
|
FREEMEM(ctxPtr);
|
@@ -1432,43 +1469,51 @@ int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutp
|
|
1432
1469
|
}
|
1433
1470
|
|
1434
1471
|
|
1435
|
-
int LZ4_compress_default(const char* src, char* dst, int srcSize, int
|
1472
|
+
int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)
|
1436
1473
|
{
|
1437
|
-
return LZ4_compress_fast(src, dst, srcSize,
|
1474
|
+
return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);
|
1438
1475
|
}
|
1439
1476
|
|
1440
1477
|
|
1441
1478
|
/* Note!: This function leaves the stream in an unclean/broken state!
|
1442
1479
|
* It is not safe to subsequently use the same state with a _fastReset() or
|
1443
1480
|
* _continue() call without resetting it. */
|
1444
|
-
static int
|
1481
|
+
static int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
|
1445
1482
|
{
|
1446
1483
|
void* const s = LZ4_initStream(state, sizeof (*state));
|
1447
1484
|
assert(s != NULL); (void)s;
|
1448
1485
|
|
1449
1486
|
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
|
1450
|
-
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize,
|
1487
|
+
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);
|
1451
1488
|
} else {
|
1452
1489
|
if (*srcSizePtr < LZ4_64Klimit) {
|
1453
|
-
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue,
|
1490
|
+
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);
|
1454
1491
|
} else {
|
1455
1492
|
tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
|
1456
|
-
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue,
|
1493
|
+
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);
|
1457
1494
|
} }
|
1458
1495
|
}
|
1459
1496
|
|
1497
|
+
int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
|
1498
|
+
{
|
1499
|
+
int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);
|
1500
|
+
/* clean the state on exit */
|
1501
|
+
LZ4_initStream(state, sizeof (LZ4_stream_t));
|
1502
|
+
return r;
|
1503
|
+
}
|
1504
|
+
|
1460
1505
|
|
1461
1506
|
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
|
1462
1507
|
{
|
1463
1508
|
#if (LZ4_HEAPMODE)
|
1464
|
-
LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
|
1509
|
+
LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
|
1465
1510
|
if (ctx == NULL) return 0;
|
1466
1511
|
#else
|
1467
1512
|
LZ4_stream_t ctxBody;
|
1468
|
-
LZ4_stream_t* ctx = &ctxBody;
|
1513
|
+
LZ4_stream_t* const ctx = &ctxBody;
|
1469
1514
|
#endif
|
1470
1515
|
|
1471
|
-
int result =
|
1516
|
+
int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);
|
1472
1517
|
|
1473
1518
|
#if (LZ4_HEAPMODE)
|
1474
1519
|
FREEMEM(ctx);
|
@@ -1537,14 +1582,17 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
|
|
1537
1582
|
#endif
|
1538
1583
|
|
1539
1584
|
|
1585
|
+
typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;
|
1540
1586
|
#define HASH_UNIT sizeof(reg_t)
|
1541
|
-
int
|
1587
|
+
int LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,
|
1588
|
+
const char* dictionary, int dictSize,
|
1589
|
+
LoadDict_mode_e _ld)
|
1542
1590
|
{
|
1543
|
-
LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
|
1591
|
+
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
|
1544
1592
|
const tableType_t tableType = byU32;
|
1545
1593
|
const BYTE* p = (const BYTE*)dictionary;
|
1546
1594
|
const BYTE* const dictEnd = p + dictSize;
|
1547
|
-
|
1595
|
+
U32 idx32;
|
1548
1596
|
|
1549
1597
|
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
|
1550
1598
|
|
@@ -1567,19 +1615,46 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
|
1567
1615
|
}
|
1568
1616
|
|
1569
1617
|
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
|
1570
|
-
base = dictEnd - dict->currentOffset;
|
1571
1618
|
dict->dictionary = p;
|
1572
1619
|
dict->dictSize = (U32)(dictEnd - p);
|
1573
1620
|
dict->tableType = (U32)tableType;
|
1621
|
+
idx32 = dict->currentOffset - dict->dictSize;
|
1574
1622
|
|
1575
1623
|
while (p <= dictEnd-HASH_UNIT) {
|
1576
|
-
|
1577
|
-
|
1624
|
+
U32 const h = LZ4_hashPosition(p, tableType);
|
1625
|
+
/* Note: overwriting => favors positions end of dictionary */
|
1626
|
+
LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
|
1627
|
+
p+=3; idx32+=3;
|
1628
|
+
}
|
1629
|
+
|
1630
|
+
if (_ld == _ld_slow) {
|
1631
|
+
/* Fill hash table with additional references, to improve compression capability */
|
1632
|
+
p = dict->dictionary;
|
1633
|
+
idx32 = dict->currentOffset - dict->dictSize;
|
1634
|
+
while (p <= dictEnd-HASH_UNIT) {
|
1635
|
+
U32 const h = LZ4_hashPosition(p, tableType);
|
1636
|
+
U32 const limit = dict->currentOffset - 64 KB;
|
1637
|
+
if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {
|
1638
|
+
/* Note: not overwriting => favors positions beginning of dictionary */
|
1639
|
+
LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
|
1640
|
+
}
|
1641
|
+
p++; idx32++;
|
1642
|
+
}
|
1578
1643
|
}
|
1579
1644
|
|
1580
1645
|
return (int)dict->dictSize;
|
1581
1646
|
}
|
1582
1647
|
|
1648
|
+
int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
1649
|
+
{
|
1650
|
+
return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);
|
1651
|
+
}
|
1652
|
+
|
1653
|
+
int LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
1654
|
+
{
|
1655
|
+
return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);
|
1656
|
+
}
|
1657
|
+
|
1583
1658
|
void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)
|
1584
1659
|
{
|
1585
1660
|
const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :
|
@@ -1711,7 +1786,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
|
|
1711
1786
|
/* Hidden debug function, to force-test external dictionary mode */
|
1712
1787
|
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
|
1713
1788
|
{
|
1714
|
-
LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
|
1789
|
+
LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;
|
1715
1790
|
int result;
|
1716
1791
|
|
1717
1792
|
LZ4_renormDictT(streamPtr, srcSize);
|
@@ -1774,7 +1849,7 @@ typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
|
|
1774
1849
|
* does not know end of input
|
1775
1850
|
* presumes input is well formed
|
1776
1851
|
* note : will consume at least one byte */
|
1777
|
-
size_t read_long_length_no_check(const BYTE** pp)
|
1852
|
+
static size_t read_long_length_no_check(const BYTE** pp)
|
1778
1853
|
{
|
1779
1854
|
size_t b, l = 0;
|
1780
1855
|
do { b = **pp; (*pp)++; l += b; } while (b==255);
|
@@ -1911,6 +1986,17 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit,
|
|
1911
1986
|
if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */
|
1912
1987
|
return rvl_error;
|
1913
1988
|
}
|
1989
|
+
s = **ip;
|
1990
|
+
(*ip)++;
|
1991
|
+
length += s;
|
1992
|
+
if (unlikely((*ip) > ilimit)) { /* read limit reached */
|
1993
|
+
return rvl_error;
|
1994
|
+
}
|
1995
|
+
/* accumulator overflow detection (32-bit mode only) */
|
1996
|
+
if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
|
1997
|
+
return rvl_error;
|
1998
|
+
}
|
1999
|
+
if (likely(s != 255)) return length;
|
1914
2000
|
do {
|
1915
2001
|
s = **ip;
|
1916
2002
|
(*ip)++;
|
@@ -1919,10 +2005,10 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit,
|
|
1919
2005
|
return rvl_error;
|
1920
2006
|
}
|
1921
2007
|
/* accumulator overflow detection (32-bit mode only) */
|
1922
|
-
if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
|
2008
|
+
if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
|
1923
2009
|
return rvl_error;
|
1924
2010
|
}
|
1925
|
-
} while (s==255);
|
2011
|
+
} while (s == 255);
|
1926
2012
|
|
1927
2013
|
return length;
|
1928
2014
|
}
|
@@ -1988,63 +2074,73 @@ LZ4_decompress_generic(
|
|
1988
2074
|
* note : fast loop may show a regression for some client arm chips. */
|
1989
2075
|
#if LZ4_FAST_DEC_LOOP
|
1990
2076
|
if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
|
1991
|
-
DEBUGLOG(6, "
|
2077
|
+
DEBUGLOG(6, "move to safe decode loop");
|
1992
2078
|
goto safe_decode;
|
1993
2079
|
}
|
1994
2080
|
|
1995
2081
|
/* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
|
2082
|
+
DEBUGLOG(6, "using fast decode loop");
|
1996
2083
|
while (1) {
|
1997
2084
|
/* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
|
1998
2085
|
assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
|
1999
2086
|
assert(ip < iend);
|
2000
2087
|
token = *ip++;
|
2001
2088
|
length = token >> ML_BITS; /* literal length */
|
2089
|
+
DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
|
2002
2090
|
|
2003
2091
|
/* decode literal length */
|
2004
2092
|
if (length == RUN_MASK) {
|
2005
2093
|
size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
|
2006
|
-
if (addl == rvl_error) {
|
2094
|
+
if (addl == rvl_error) {
|
2095
|
+
DEBUGLOG(6, "error reading long literal length");
|
2096
|
+
goto _output_error;
|
2097
|
+
}
|
2007
2098
|
length += addl;
|
2008
2099
|
if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
|
2009
2100
|
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
|
2010
2101
|
|
2011
2102
|
/* copy literals */
|
2012
|
-
cpy = op+length;
|
2013
2103
|
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
|
2014
|
-
if ((
|
2015
|
-
LZ4_wildCopy32(op, ip,
|
2016
|
-
ip += length; op
|
2017
|
-
} else {
|
2018
|
-
cpy = op+length;
|
2019
|
-
DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
|
2104
|
+
if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
|
2105
|
+
LZ4_wildCopy32(op, ip, op+length);
|
2106
|
+
ip += length; op += length;
|
2107
|
+
} else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {
|
2020
2108
|
/* We don't need to check oend, since we check it once for each loop below */
|
2021
|
-
|
2109
|
+
DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
|
2022
2110
|
/* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
|
2023
2111
|
LZ4_memcpy(op, ip, 16);
|
2024
|
-
ip += length; op
|
2112
|
+
ip += length; op += length;
|
2113
|
+
} else {
|
2114
|
+
goto safe_literal_copy;
|
2025
2115
|
}
|
2026
2116
|
|
2027
2117
|
/* get offset */
|
2028
2118
|
offset = LZ4_readLE16(ip); ip+=2;
|
2119
|
+
DEBUGLOG(6, "blockPos%6u: offset = %u", (unsigned)(op-(BYTE*)dst), (unsigned)offset);
|
2029
2120
|
match = op - offset;
|
2030
2121
|
assert(match <= op); /* overflow check */
|
2031
2122
|
|
2032
2123
|
/* get matchlength */
|
2033
2124
|
length = token & ML_MASK;
|
2125
|
+
DEBUGLOG(7, " match length token = %u (len==%u)", (unsigned)length, (unsigned)length+MINMATCH);
|
2034
2126
|
|
2035
2127
|
if (length == ML_MASK) {
|
2036
2128
|
size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
|
2037
|
-
if (addl == rvl_error) {
|
2129
|
+
if (addl == rvl_error) {
|
2130
|
+
DEBUGLOG(5, "error reading long match length");
|
2131
|
+
goto _output_error;
|
2132
|
+
}
|
2038
2133
|
length += addl;
|
2039
2134
|
length += MINMATCH;
|
2135
|
+
DEBUGLOG(7, " long match length == %u", (unsigned)length);
|
2040
2136
|
if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
|
2041
|
-
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
|
2042
2137
|
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
|
2043
2138
|
goto safe_match_copy;
|
2044
2139
|
}
|
2045
2140
|
} else {
|
2046
2141
|
length += MINMATCH;
|
2047
2142
|
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
|
2143
|
+
DEBUGLOG(7, "moving to safe_match_copy (ml==%u)", (unsigned)length);
|
2048
2144
|
goto safe_match_copy;
|
2049
2145
|
}
|
2050
2146
|
|
@@ -2062,7 +2158,10 @@ LZ4_decompress_generic(
|
|
2062
2158
|
continue;
|
2063
2159
|
} } }
|
2064
2160
|
|
2065
|
-
if (checkOffset && (unlikely(match + dictSize < lowPrefix))) {
|
2161
|
+
if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {
|
2162
|
+
DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match);
|
2163
|
+
goto _output_error;
|
2164
|
+
}
|
2066
2165
|
/* match starting within external dictionary */
|
2067
2166
|
if ((dict==usingExtDict) && (match < lowPrefix)) {
|
2068
2167
|
assert(dictEnd != NULL);
|
@@ -2071,7 +2170,8 @@ LZ4_decompress_generic(
|
|
2071
2170
|
DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
|
2072
2171
|
length = MIN(length, (size_t)(oend-op));
|
2073
2172
|
} else {
|
2074
|
-
|
2173
|
+
DEBUGLOG(6, "end-of-block condition violated")
|
2174
|
+
goto _output_error;
|
2075
2175
|
} }
|
2076
2176
|
|
2077
2177
|
if (length <= (size_t)(lowPrefix-match)) {
|
@@ -2111,10 +2211,12 @@ LZ4_decompress_generic(
|
|
2111
2211
|
#endif
|
2112
2212
|
|
2113
2213
|
/* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
|
2214
|
+
DEBUGLOG(6, "using safe decode loop");
|
2114
2215
|
while (1) {
|
2115
2216
|
assert(ip < iend);
|
2116
2217
|
token = *ip++;
|
2117
2218
|
length = token >> ML_BITS; /* literal length */
|
2219
|
+
DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
|
2118
2220
|
|
2119
2221
|
/* A two-stage shortcut for the most common case:
|
2120
2222
|
* 1) If the literal length is 0..14, and there is enough space,
|
@@ -2135,6 +2237,7 @@ LZ4_decompress_generic(
|
|
2135
2237
|
/* The second stage: prepare for match copying, decode full info.
|
2136
2238
|
* If it doesn't work out, the info won't be wasted. */
|
2137
2239
|
length = token & ML_MASK; /* match length */
|
2240
|
+
DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);
|
2138
2241
|
offset = LZ4_readLE16(ip); ip += 2;
|
2139
2242
|
match = op - offset;
|
2140
2243
|
assert(match <= op); /* check overflow */
|
@@ -2166,11 +2269,12 @@ LZ4_decompress_generic(
|
|
2166
2269
|
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
|
2167
2270
|
}
|
2168
2271
|
|
2169
|
-
/* copy literals */
|
2170
|
-
cpy = op+length;
|
2171
2272
|
#if LZ4_FAST_DEC_LOOP
|
2172
2273
|
safe_literal_copy:
|
2173
2274
|
#endif
|
2275
|
+
/* copy literals */
|
2276
|
+
cpy = op+length;
|
2277
|
+
|
2174
2278
|
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
|
2175
2279
|
if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {
|
2176
2280
|
/* We've either hit the input parsing restriction or the output parsing restriction.
|
@@ -2206,9 +2310,10 @@ LZ4_decompress_generic(
|
|
2206
2310
|
* so check that we exactly consume the input and don't overrun the output buffer.
|
2207
2311
|
*/
|
2208
2312
|
if ((ip+length != iend) || (cpy > oend)) {
|
2209
|
-
DEBUGLOG(
|
2210
|
-
DEBUGLOG(
|
2211
|
-
DEBUGLOG(
|
2313
|
+
DEBUGLOG(5, "should have been last run of literals")
|
2314
|
+
DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
|
2315
|
+
DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", cpy, oend-MFLIMIT);
|
2316
|
+
DEBUGLOG(5, "after writing %u bytes / %i bytes available", (unsigned)(op-(BYTE*)dst), outputSize);
|
2212
2317
|
goto _output_error;
|
2213
2318
|
}
|
2214
2319
|
}
|
@@ -2234,6 +2339,7 @@ LZ4_decompress_generic(
|
|
2234
2339
|
|
2235
2340
|
/* get matchlength */
|
2236
2341
|
length = token & ML_MASK;
|
2342
|
+
DEBUGLOG(7, "blockPos%6u: matchLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
|
2237
2343
|
|
2238
2344
|
_copy_match:
|
2239
2345
|
if (length == ML_MASK) {
|
@@ -2323,7 +2429,7 @@ LZ4_decompress_generic(
|
|
2323
2429
|
while (op < cpy) { *op++ = *match++; }
|
2324
2430
|
} else {
|
2325
2431
|
LZ4_memcpy(op, match, 8);
|
2326
|
-
if (length > 16)
|
2432
|
+
if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
|
2327
2433
|
}
|
2328
2434
|
op = cpy; /* wildcopy correction */
|
2329
2435
|
}
|
@@ -2418,6 +2524,7 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
|
|
2418
2524
|
int compressedSize, int maxOutputSize,
|
2419
2525
|
const void* dictStart, size_t dictSize)
|
2420
2526
|
{
|
2527
|
+
DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict");
|
2421
2528
|
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
|
2422
2529
|
decode_full_block, usingExtDict,
|
2423
2530
|
(BYTE*)dest, (const BYTE*)dictStart, dictSize);
|