extlz4 0.2.5 → 0.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/HISTORY.ja.md +16 -1
- data/README.md +49 -51
- data/Rakefile +22 -0
- data/bin/extlz4 +1 -1
- data/contrib/lz4/LICENSE +2 -1
- data/contrib/lz4/Makefile.inc +111 -0
- data/contrib/lz4/NEWS +97 -0
- data/contrib/lz4/README.md +41 -36
- data/contrib/lz4/build/README.md +55 -0
- data/contrib/lz4/build/VS2010/datagen/datagen.vcxproj +169 -0
- data/contrib/lz4/build/VS2010/frametest/frametest.vcxproj +176 -0
- data/contrib/lz4/build/VS2010/fullbench/fullbench.vcxproj +176 -0
- data/contrib/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +180 -0
- data/contrib/lz4/build/VS2010/fuzzer/fuzzer.vcxproj +173 -0
- data/contrib/lz4/build/VS2010/liblz4/liblz4.vcxproj +175 -0
- data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc +51 -0
- data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj +179 -0
- data/contrib/lz4/build/VS2010/lz4/lz4.rc +51 -0
- data/contrib/lz4/build/VS2010/lz4/lz4.vcxproj +189 -0
- data/contrib/lz4/build/VS2010/lz4.sln +98 -0
- data/contrib/lz4/build/VS2017/datagen/datagen.vcxproj +173 -0
- data/contrib/lz4/build/VS2017/frametest/frametest.vcxproj +180 -0
- data/contrib/lz4/build/VS2017/fullbench/fullbench.vcxproj +180 -0
- data/contrib/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj +184 -0
- data/contrib/lz4/build/VS2017/fuzzer/fuzzer.vcxproj +177 -0
- data/contrib/lz4/build/VS2017/liblz4/liblz4.vcxproj +179 -0
- data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc +51 -0
- data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj +183 -0
- data/contrib/lz4/build/VS2017/lz4/lz4.rc +51 -0
- data/contrib/lz4/build/VS2017/lz4/lz4.vcxproj +175 -0
- data/contrib/lz4/build/VS2017/lz4.sln +103 -0
- data/contrib/lz4/build/VS2022/datagen/datagen.vcxproj +173 -0
- data/contrib/lz4/build/VS2022/frametest/frametest.vcxproj +180 -0
- data/contrib/lz4/build/VS2022/fullbench/fullbench.vcxproj +180 -0
- data/contrib/lz4/build/VS2022/fullbench-dll/fullbench-dll.vcxproj +184 -0
- data/contrib/lz4/build/VS2022/fuzzer/fuzzer.vcxproj +177 -0
- data/contrib/lz4/build/VS2022/liblz4/liblz4.vcxproj +179 -0
- data/contrib/lz4/build/VS2022/liblz4-dll/liblz4-dll.rc +51 -0
- data/contrib/lz4/build/VS2022/liblz4-dll/liblz4-dll.vcxproj +183 -0
- data/contrib/lz4/build/VS2022/lz4.sln +103 -0
- data/contrib/lz4/build/cmake/CMakeLists.txt +273 -0
- data/contrib/lz4/build/cmake/lz4Config.cmake.in +2 -0
- data/contrib/lz4/lib/LICENSE +1 -1
- data/contrib/lz4/lib/README.md +111 -15
- data/contrib/lz4/lib/liblz4-dll.rc.in +35 -0
- data/contrib/lz4/lib/liblz4.pc.in +3 -3
- data/contrib/lz4/lib/lz4.c +1891 -733
- data/contrib/lz4/lib/lz4.h +597 -234
- data/contrib/lz4/lib/lz4file.c +311 -0
- data/contrib/lz4/lib/lz4file.h +93 -0
- data/contrib/lz4/lib/lz4frame.c +896 -493
- data/contrib/lz4/lib/lz4frame.h +408 -107
- data/contrib/lz4/lib/lz4frame_static.h +5 -112
- data/contrib/lz4/lib/lz4hc.c +1039 -301
- data/contrib/lz4/lib/lz4hc.h +264 -123
- data/contrib/lz4/lib/xxhash.c +376 -240
- data/contrib/lz4/lib/xxhash.h +128 -93
- data/contrib/lz4/ossfuzz/Makefile +79 -0
- data/contrib/lz4/ossfuzz/compress_frame_fuzzer.c +48 -0
- data/contrib/lz4/ossfuzz/compress_fuzzer.c +58 -0
- data/contrib/lz4/ossfuzz/compress_hc_fuzzer.c +64 -0
- data/contrib/lz4/ossfuzz/decompress_frame_fuzzer.c +75 -0
- data/contrib/lz4/ossfuzz/decompress_fuzzer.c +78 -0
- data/contrib/lz4/ossfuzz/fuzz.h +48 -0
- data/contrib/lz4/ossfuzz/fuzz_data_producer.c +77 -0
- data/contrib/lz4/ossfuzz/fuzz_data_producer.h +36 -0
- data/contrib/lz4/ossfuzz/fuzz_helpers.h +95 -0
- data/contrib/lz4/ossfuzz/lz4_helpers.c +51 -0
- data/contrib/lz4/ossfuzz/lz4_helpers.h +13 -0
- data/contrib/lz4/ossfuzz/ossfuzz.sh +23 -0
- data/contrib/lz4/ossfuzz/round_trip_frame_fuzzer.c +43 -0
- data/contrib/lz4/ossfuzz/round_trip_frame_uncompressed_fuzzer.c +134 -0
- data/contrib/lz4/ossfuzz/round_trip_fuzzer.c +117 -0
- data/contrib/lz4/ossfuzz/round_trip_hc_fuzzer.c +44 -0
- data/contrib/lz4/ossfuzz/round_trip_stream_fuzzer.c +302 -0
- data/contrib/lz4/ossfuzz/standaloneengine.c +74 -0
- data/contrib/lz4/ossfuzz/travisoss.sh +26 -0
- data/ext/blockapi.c +13 -48
- data/ext/extlz4.c +2 -0
- data/ext/extlz4.h +17 -0
- data/ext/frameapi.c +3 -14
- data/ext/hashargs.c +9 -3
- data/ext/hashargs.h +1 -1
- data/ext/lz4_amalgam.c +0 -23
- data/gemstub.rb +5 -16
- data/lib/extlz4/oldstream.rb +1 -1
- data/lib/extlz4.rb +51 -3
- data/test/common.rb +2 -2
- metadata +84 -16
- data/contrib/lz4/circle.yml +0 -38
- data/contrib/lz4/lib/lz4opt.h +0 -356
- data/lib/extlz4/version.rb +0 -3
data/contrib/lz4/lib/lz4frame.c
CHANGED
@@ -1,65 +1,73 @@
|
|
1
1
|
/*
|
2
|
-
LZ4 auto-framing library
|
3
|
-
Copyright (C) 2011-2016, Yann Collet.
|
4
|
-
|
5
|
-
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
6
|
-
|
7
|
-
Redistribution and use in source and binary forms, with or without
|
8
|
-
modification, are permitted provided that the following conditions are
|
9
|
-
met:
|
10
|
-
|
11
|
-
* Redistributions of source code must retain the above copyright
|
12
|
-
notice, this list of conditions and the following disclaimer.
|
13
|
-
* Redistributions in binary form must reproduce the above
|
14
|
-
copyright notice, this list of conditions and the following disclaimer
|
15
|
-
in the documentation and/or other materials provided with the
|
16
|
-
distribution.
|
17
|
-
|
18
|
-
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19
|
-
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20
|
-
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
21
|
-
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22
|
-
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23
|
-
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
24
|
-
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
25
|
-
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
26
|
-
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
27
|
-
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
28
|
-
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29
|
-
|
30
|
-
You can contact the author at :
|
31
|
-
- LZ4 homepage : http://www.lz4.org
|
32
|
-
- LZ4 source repository : https://github.com/lz4/lz4
|
33
|
-
*/
|
2
|
+
* LZ4 auto-framing library
|
3
|
+
* Copyright (C) 2011-2016, Yann Collet.
|
4
|
+
*
|
5
|
+
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
6
|
+
*
|
7
|
+
* Redistribution and use in source and binary forms, with or without
|
8
|
+
* modification, are permitted provided that the following conditions are
|
9
|
+
* met:
|
10
|
+
*
|
11
|
+
* - Redistributions of source code must retain the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer.
|
13
|
+
* - Redistributions in binary form must reproduce the above
|
14
|
+
* copyright notice, this list of conditions and the following disclaimer
|
15
|
+
* in the documentation and/or other materials provided with the
|
16
|
+
* distribution.
|
17
|
+
*
|
18
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
21
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
24
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
25
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
26
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
27
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
28
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29
|
+
*
|
30
|
+
* You can contact the author at :
|
31
|
+
* - LZ4 homepage : http://www.lz4.org
|
32
|
+
* - LZ4 source repository : https://github.com/lz4/lz4
|
33
|
+
*/
|
34
34
|
|
35
35
|
/* LZ4F is a stand-alone API to create LZ4-compressed Frames
|
36
|
-
*
|
37
|
-
*
|
38
|
-
*
|
36
|
+
* in full conformance with specification v1.6.1 .
|
37
|
+
* This library rely upon memory management capabilities (malloc, free)
|
38
|
+
* provided either by <stdlib.h>,
|
39
|
+
* or redirected towards another library of user's choice
|
40
|
+
* (see Memory Routines below).
|
41
|
+
*/
|
39
42
|
|
40
43
|
|
41
44
|
/*-************************************
|
42
45
|
* Compiler Options
|
43
46
|
**************************************/
|
44
47
|
#ifdef _MSC_VER /* Visual Studio */
|
45
|
-
# pragma warning(disable : 4127)
|
48
|
+
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
46
49
|
#endif
|
47
50
|
|
48
51
|
|
49
52
|
/*-************************************
|
50
|
-
*
|
53
|
+
* Tuning parameters
|
51
54
|
**************************************/
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
55
|
+
/*
|
56
|
+
* LZ4F_HEAPMODE :
|
57
|
+
* Select how default compression functions will allocate memory for their hash table,
|
58
|
+
* in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
|
59
|
+
*/
|
60
|
+
#ifndef LZ4F_HEAPMODE
|
61
|
+
# define LZ4F_HEAPMODE 0
|
62
|
+
#endif
|
57
63
|
|
58
64
|
|
59
65
|
/*-************************************
|
60
|
-
*
|
66
|
+
* Library declarations
|
61
67
|
**************************************/
|
62
|
-
#
|
68
|
+
#define LZ4F_STATIC_LINKING_ONLY
|
69
|
+
#include "lz4frame.h"
|
70
|
+
#define LZ4_STATIC_LINKING_ONLY
|
63
71
|
#include "lz4.h"
|
64
72
|
#define LZ4_HC_STATIC_LINKING_ONLY
|
65
73
|
#include "lz4hc.h"
|
@@ -67,11 +75,92 @@ You can contact the author at :
|
|
67
75
|
#include "xxhash.h"
|
68
76
|
|
69
77
|
|
78
|
+
/*-************************************
|
79
|
+
* Memory routines
|
80
|
+
**************************************/
|
81
|
+
/*
|
82
|
+
* User may redirect invocations of
|
83
|
+
* malloc(), calloc() and free()
|
84
|
+
* towards another library or solution of their choice
|
85
|
+
* by modifying below section.
|
86
|
+
**/
|
87
|
+
|
88
|
+
#include <string.h> /* memset, memcpy, memmove */
|
89
|
+
#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
|
90
|
+
# define MEM_INIT(p,v,s) memset((p),(v),(s))
|
91
|
+
#endif
|
92
|
+
|
93
|
+
#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
|
94
|
+
# include <stdlib.h> /* malloc, calloc, free */
|
95
|
+
# define ALLOC(s) malloc(s)
|
96
|
+
# define ALLOC_AND_ZERO(s) calloc(1,(s))
|
97
|
+
# define FREEMEM(p) free(p)
|
98
|
+
#endif
|
99
|
+
|
100
|
+
static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem)
|
101
|
+
{
|
102
|
+
/* custom calloc defined : use it */
|
103
|
+
if (cmem.customCalloc != NULL) {
|
104
|
+
return cmem.customCalloc(cmem.opaqueState, s);
|
105
|
+
}
|
106
|
+
/* nothing defined : use default <stdlib.h>'s calloc() */
|
107
|
+
if (cmem.customAlloc == NULL) {
|
108
|
+
return ALLOC_AND_ZERO(s);
|
109
|
+
}
|
110
|
+
/* only custom alloc defined : use it, and combine it with memset() */
|
111
|
+
{ void* const p = cmem.customAlloc(cmem.opaqueState, s);
|
112
|
+
if (p != NULL) MEM_INIT(p, 0, s);
|
113
|
+
return p;
|
114
|
+
} }
|
115
|
+
|
116
|
+
static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
|
117
|
+
{
|
118
|
+
/* custom malloc defined : use it */
|
119
|
+
if (cmem.customAlloc != NULL) {
|
120
|
+
return cmem.customAlloc(cmem.opaqueState, s);
|
121
|
+
}
|
122
|
+
/* nothing defined : use default <stdlib.h>'s malloc() */
|
123
|
+
return ALLOC(s);
|
124
|
+
}
|
125
|
+
|
126
|
+
static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
|
127
|
+
{
|
128
|
+
/* custom malloc defined : use it */
|
129
|
+
if (cmem.customFree != NULL) {
|
130
|
+
cmem.customFree(cmem.opaqueState, p);
|
131
|
+
return;
|
132
|
+
}
|
133
|
+
/* nothing defined : use default <stdlib.h>'s free() */
|
134
|
+
FREEMEM(p);
|
135
|
+
}
|
136
|
+
|
137
|
+
|
70
138
|
/*-************************************
|
71
139
|
* Debug
|
72
140
|
**************************************/
|
141
|
+
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
|
142
|
+
# include <assert.h>
|
143
|
+
#else
|
144
|
+
# ifndef assert
|
145
|
+
# define assert(condition) ((void)0)
|
146
|
+
# endif
|
147
|
+
#endif
|
148
|
+
|
73
149
|
#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
74
150
|
|
151
|
+
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
|
152
|
+
# include <stdio.h>
|
153
|
+
static int g_debuglog_enable = 1;
|
154
|
+
# define DEBUGLOG(l, ...) { \
|
155
|
+
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
|
156
|
+
fprintf(stderr, __FILE__ ": "); \
|
157
|
+
fprintf(stderr, __VA_ARGS__); \
|
158
|
+
fprintf(stderr, " \n"); \
|
159
|
+
} }
|
160
|
+
#else
|
161
|
+
# define DEBUGLOG(l, ...) {} /* disabled */
|
162
|
+
#endif
|
163
|
+
|
75
164
|
|
76
165
|
/*-************************************
|
77
166
|
* Basic Types
|
@@ -92,13 +181,13 @@ You can contact the author at :
|
|
92
181
|
#endif
|
93
182
|
|
94
183
|
|
95
|
-
/* unoptimized version; solves
|
184
|
+
/* unoptimized version; solves endianness & alignment issues */
|
96
185
|
static U32 LZ4F_readLE32 (const void* src)
|
97
186
|
{
|
98
187
|
const BYTE* const srcPtr = (const BYTE*)src;
|
99
188
|
U32 value32 = srcPtr[0];
|
100
|
-
value32 += (srcPtr[1]<<8
|
101
|
-
value32 += (srcPtr[2]<<16
|
189
|
+
value32 += ((U32)srcPtr[1])<< 8;
|
190
|
+
value32 += ((U32)srcPtr[2])<<16;
|
102
191
|
value32 += ((U32)srcPtr[3])<<24;
|
103
192
|
return value32;
|
104
193
|
}
|
@@ -143,9 +232,11 @@ static void LZ4F_writeLE64 (void* dst, U64 value64)
|
|
143
232
|
/*-************************************
|
144
233
|
* Constants
|
145
234
|
**************************************/
|
146
|
-
#
|
147
|
-
#define
|
148
|
-
#define
|
235
|
+
#ifndef LZ4_SRC_INCLUDED /* avoid double definition */
|
236
|
+
# define KB *(1<<10)
|
237
|
+
# define MB *(1<<20)
|
238
|
+
# define GB *(1<<30)
|
239
|
+
#endif
|
149
240
|
|
150
241
|
#define _1BIT 0x01
|
151
242
|
#define _2BITS 0x03
|
@@ -153,34 +244,39 @@ static void LZ4F_writeLE64 (void* dst, U64 value64)
|
|
153
244
|
#define _4BITS 0x0F
|
154
245
|
#define _8BITS 0xFF
|
155
246
|
|
156
|
-
#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
|
157
|
-
#define LZ4F_MAGICNUMBER 0x184D2204U
|
158
247
|
#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
|
159
248
|
#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
|
160
249
|
|
161
|
-
static const size_t minFHSize = 7
|
250
|
+
static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */
|
162
251
|
static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */
|
163
|
-
static const size_t BHSize =
|
252
|
+
static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
|
253
|
+
static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
|
164
254
|
|
165
255
|
|
166
256
|
/*-************************************
|
167
257
|
* Structures and local types
|
168
258
|
**************************************/
|
259
|
+
|
260
|
+
typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_blockCompression_t;
|
261
|
+
|
169
262
|
typedef struct LZ4F_cctx_s
|
170
263
|
{
|
264
|
+
LZ4F_CustomMem cmem;
|
171
265
|
LZ4F_preferences_t prefs;
|
172
266
|
U32 version;
|
173
267
|
U32 cStage;
|
174
268
|
const LZ4F_CDict* cdict;
|
175
269
|
size_t maxBlockSize;
|
176
270
|
size_t maxBufferSize;
|
177
|
-
BYTE* tmpBuff;
|
178
|
-
BYTE* tmpIn;
|
179
|
-
size_t tmpInSize;
|
271
|
+
BYTE* tmpBuff; /* internal buffer, for streaming */
|
272
|
+
BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */
|
273
|
+
size_t tmpInSize; /* amount of data to compress after tmpIn */
|
180
274
|
U64 totalInSize;
|
181
275
|
XXH32_state_t xxh;
|
182
276
|
void* lz4CtxPtr;
|
183
|
-
|
277
|
+
U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
|
278
|
+
U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
|
279
|
+
LZ4F_blockCompression_t blockCompression;
|
184
280
|
} LZ4F_cctx_t;
|
185
281
|
|
186
282
|
|
@@ -209,33 +305,39 @@ LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
|
|
209
305
|
return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
|
210
306
|
}
|
211
307
|
|
212
|
-
static LZ4F_errorCode_t
|
308
|
+
static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
|
213
309
|
{
|
214
310
|
/* A compilation error here means sizeof(ptrdiff_t) is not large enough */
|
215
311
|
LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
|
216
312
|
return (LZ4F_errorCode_t)-(ptrdiff_t)code;
|
217
313
|
}
|
218
314
|
|
315
|
+
#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
|
316
|
+
|
317
|
+
#define RETURN_ERROR_IF(c,e) if (c) RETURN_ERROR(e)
|
318
|
+
|
319
|
+
#define FORWARD_IF_ERROR(r) if (LZ4F_isError(r)) return (r)
|
320
|
+
|
219
321
|
unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
|
220
322
|
|
221
323
|
int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; }
|
222
324
|
|
325
|
+
size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID)
|
326
|
+
{
|
327
|
+
static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
|
328
|
+
|
329
|
+
if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
|
330
|
+
if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
|
331
|
+
RETURN_ERROR(maxBlockSize_invalid);
|
332
|
+
{ int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB;
|
333
|
+
return blockSizes[blockSizeIdx];
|
334
|
+
} }
|
223
335
|
|
224
336
|
/*-************************************
|
225
337
|
* Private functions
|
226
338
|
**************************************/
|
227
339
|
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
|
228
340
|
|
229
|
-
static size_t LZ4F_getBlockSize(unsigned blockSizeID)
|
230
|
-
{
|
231
|
-
static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
|
232
|
-
|
233
|
-
if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
|
234
|
-
blockSizeID -= 4;
|
235
|
-
if (blockSizeID > 3) return err0r(LZ4F_ERROR_maxBlockSize_invalid);
|
236
|
-
return blockSizes[blockSizeID];
|
237
|
-
}
|
238
|
-
|
239
341
|
static BYTE LZ4F_headerChecksum (const void* header, size_t length)
|
240
342
|
{
|
241
343
|
U32 const xxh = XXH32(header, length, 0);
|
@@ -270,9 +372,9 @@ static size_t LZ4F_compressBound_internal(size_t srcSize,
|
|
270
372
|
const LZ4F_preferences_t* preferencesPtr,
|
271
373
|
size_t alreadyBuffered)
|
272
374
|
{
|
273
|
-
LZ4F_preferences_t prefsNull;
|
274
|
-
memset(&prefsNull, 0, sizeof(prefsNull));
|
375
|
+
LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
|
275
376
|
prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
|
377
|
+
prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
|
276
378
|
{ const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
|
277
379
|
U32 const flush = prefsPtr->autoFlush | (srcSize==0);
|
278
380
|
LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
|
@@ -281,15 +383,14 @@ static size_t LZ4F_compressBound_internal(size_t srcSize,
|
|
281
383
|
size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
|
282
384
|
size_t const maxSrcSize = srcSize + bufferedSize;
|
283
385
|
unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
|
284
|
-
size_t const partialBlockSize =
|
386
|
+
size_t const partialBlockSize = maxSrcSize & (blockSize-1);
|
285
387
|
size_t const lastBlockSize = flush ? partialBlockSize : 0;
|
286
388
|
unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
|
287
389
|
|
288
|
-
size_t const
|
289
|
-
size_t const
|
290
|
-
size_t const frameEnd = 4 + (prefsPtr->frameInfo.contentChecksumFlag*4);
|
390
|
+
size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag;
|
391
|
+
size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize);
|
291
392
|
|
292
|
-
return ((
|
393
|
+
return ((BHSize + blockCRCSize) * nbBlocks) +
|
293
394
|
(blockSize * nbFullBlocks) + lastBlockSize + frameEnd;
|
294
395
|
}
|
295
396
|
}
|
@@ -300,7 +401,7 @@ size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* prefere
|
|
300
401
|
size_t const headerSize = maxFHSize; /* max header size, including optional fields */
|
301
402
|
|
302
403
|
if (preferencesPtr!=NULL) prefs = *preferencesPtr;
|
303
|
-
else
|
404
|
+
else MEM_INIT(&prefs, 0, sizeof(prefs));
|
304
405
|
prefs.autoFlush = 1;
|
305
406
|
|
306
407
|
return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
|
@@ -316,27 +417,22 @@ size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* prefere
|
|
316
417
|
* @return : number of bytes written into dstBuffer,
|
317
418
|
* or an error code if it fails (can be tested using LZ4F_isError())
|
318
419
|
*/
|
319
|
-
size_t LZ4F_compressFrame_usingCDict(
|
420
|
+
size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
|
421
|
+
void* dstBuffer, size_t dstCapacity,
|
320
422
|
const void* srcBuffer, size_t srcSize,
|
321
423
|
const LZ4F_CDict* cdict,
|
322
424
|
const LZ4F_preferences_t* preferencesPtr)
|
323
425
|
{
|
324
|
-
LZ4F_cctx_t cctxI;
|
325
|
-
LZ4_stream_t lz4ctx; /* pretty large on stack */
|
326
426
|
LZ4F_preferences_t prefs;
|
327
427
|
LZ4F_compressOptions_t options;
|
328
428
|
BYTE* const dstStart = (BYTE*) dstBuffer;
|
329
429
|
BYTE* dstPtr = dstStart;
|
330
430
|
BYTE* const dstEnd = dstStart + dstCapacity;
|
331
431
|
|
332
|
-
memset(&cctxI, 0, sizeof(cctxI));
|
333
|
-
cctxI.version = LZ4F_VERSION;
|
334
|
-
cctxI.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
|
335
|
-
|
336
432
|
if (preferencesPtr!=NULL)
|
337
433
|
prefs = *preferencesPtr;
|
338
434
|
else
|
339
|
-
|
435
|
+
MEM_INIT(&prefs, 0, sizeof(prefs));
|
340
436
|
if (prefs.frameInfo.contentSize != 0)
|
341
437
|
prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
|
342
438
|
|
@@ -345,33 +441,27 @@ size_t LZ4F_compressFrame_usingCDict(void* dstBuffer, size_t dstCapacity,
|
|
345
441
|
if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
|
346
442
|
prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */
|
347
443
|
|
348
|
-
|
349
|
-
cctxI.lz4CtxPtr = &lz4ctx;
|
350
|
-
cctxI.lz4CtxLevel = 1;
|
351
|
-
} /* fast compression context pre-created on stack */
|
352
|
-
|
353
|
-
memset(&options, 0, sizeof(options));
|
444
|
+
MEM_INIT(&options, 0, sizeof(options));
|
354
445
|
options.stableSrc = 1;
|
355
446
|
|
356
|
-
|
357
|
-
return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
|
447
|
+
RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall);
|
358
448
|
|
359
|
-
{ size_t const headerSize = LZ4F_compressBegin_usingCDict(
|
360
|
-
|
449
|
+
{ size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
|
450
|
+
FORWARD_IF_ERROR(headerSize);
|
361
451
|
dstPtr += headerSize; /* header size */ }
|
362
452
|
|
363
|
-
|
364
|
-
|
453
|
+
assert(dstEnd >= dstPtr);
|
454
|
+
{ size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
|
455
|
+
FORWARD_IF_ERROR(cSize);
|
365
456
|
dstPtr += cSize; }
|
366
457
|
|
367
|
-
|
368
|
-
|
458
|
+
assert(dstEnd >= dstPtr);
|
459
|
+
{ size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
|
460
|
+
FORWARD_IF_ERROR(tailSize);
|
369
461
|
dstPtr += tailSize; }
|
370
462
|
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
return (dstPtr - dstStart);
|
463
|
+
assert(dstEnd >= dstStart);
|
464
|
+
return (size_t)(dstPtr - dstStart);
|
375
465
|
}
|
376
466
|
|
377
467
|
|
@@ -386,9 +476,42 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
|
|
386
476
|
const void* srcBuffer, size_t srcSize,
|
387
477
|
const LZ4F_preferences_t* preferencesPtr)
|
388
478
|
{
|
389
|
-
|
390
|
-
|
391
|
-
|
479
|
+
size_t result;
|
480
|
+
#if (LZ4F_HEAPMODE)
|
481
|
+
LZ4F_cctx_t* cctxPtr;
|
482
|
+
result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION);
|
483
|
+
FORWARD_IF_ERROR(result);
|
484
|
+
#else
|
485
|
+
LZ4F_cctx_t cctx;
|
486
|
+
LZ4_stream_t lz4ctx;
|
487
|
+
LZ4F_cctx_t* const cctxPtr = &cctx;
|
488
|
+
|
489
|
+
MEM_INIT(&cctx, 0, sizeof(cctx));
|
490
|
+
cctx.version = LZ4F_VERSION;
|
491
|
+
cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
|
492
|
+
if ( preferencesPtr == NULL
|
493
|
+
|| preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) {
|
494
|
+
LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
|
495
|
+
cctxPtr->lz4CtxPtr = &lz4ctx;
|
496
|
+
cctxPtr->lz4CtxAlloc = 1;
|
497
|
+
cctxPtr->lz4CtxState = 1;
|
498
|
+
}
|
499
|
+
#endif
|
500
|
+
DEBUGLOG(4, "LZ4F_compressFrame");
|
501
|
+
|
502
|
+
result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity,
|
503
|
+
srcBuffer, srcSize,
|
504
|
+
NULL, preferencesPtr);
|
505
|
+
|
506
|
+
#if (LZ4F_HEAPMODE)
|
507
|
+
LZ4F_freeCompressionContext(cctxPtr);
|
508
|
+
#else
|
509
|
+
if ( preferencesPtr != NULL
|
510
|
+
&& preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) {
|
511
|
+
LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
|
512
|
+
}
|
513
|
+
#endif
|
514
|
+
return result;
|
392
515
|
}
|
393
516
|
|
394
517
|
|
@@ -397,48 +520,61 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
|
|
397
520
|
*****************************************************/
|
398
521
|
|
399
522
|
struct LZ4F_CDict_s {
|
523
|
+
LZ4F_CustomMem cmem;
|
400
524
|
void* dictContent;
|
401
525
|
LZ4_stream_t* fastCtx;
|
402
526
|
LZ4_streamHC_t* HCCtx;
|
403
527
|
}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
|
404
528
|
|
405
|
-
|
406
|
-
|
407
|
-
* LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
|
408
|
-
* LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
|
409
|
-
* `dictBuffer` can be released after LZ4F_CDict creation, since its content is copied within CDict
|
410
|
-
* @return : digested dictionary for compression, or NULL if failed */
|
411
|
-
LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
|
529
|
+
LZ4F_CDict*
|
530
|
+
LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize)
|
412
531
|
{
|
413
532
|
const char* dictStart = (const char*)dictBuffer;
|
414
|
-
LZ4F_CDict* cdict = (LZ4F_CDict*)
|
533
|
+
LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem);
|
534
|
+
DEBUGLOG(4, "LZ4F_createCDict_advanced");
|
415
535
|
if (!cdict) return NULL;
|
536
|
+
cdict->cmem = cmem;
|
416
537
|
if (dictSize > 64 KB) {
|
417
538
|
dictStart += dictSize - 64 KB;
|
418
539
|
dictSize = 64 KB;
|
419
540
|
}
|
420
|
-
cdict->dictContent =
|
421
|
-
cdict->fastCtx =
|
422
|
-
cdict->
|
541
|
+
cdict->dictContent = LZ4F_malloc(dictSize, cmem);
|
542
|
+
cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
|
543
|
+
if (cdict->fastCtx)
|
544
|
+
LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
|
545
|
+
cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
|
546
|
+
if (cdict->HCCtx)
|
547
|
+
LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t));
|
423
548
|
if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
|
424
549
|
LZ4F_freeCDict(cdict);
|
425
550
|
return NULL;
|
426
551
|
}
|
427
552
|
memcpy(cdict->dictContent, dictStart, dictSize);
|
428
|
-
LZ4_resetStream(cdict->fastCtx);
|
429
553
|
LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
|
430
|
-
|
554
|
+
LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
|
431
555
|
LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
|
432
556
|
return cdict;
|
433
557
|
}
|
434
558
|
|
559
|
+
/*! LZ4F_createCDict() :
|
560
|
+
* When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
|
561
|
+
* LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
|
562
|
+
* LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
|
563
|
+
* @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
|
564
|
+
* @return : digested dictionary for compression, or NULL if failed */
|
565
|
+
LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
|
566
|
+
{
|
567
|
+
DEBUGLOG(4, "LZ4F_createCDict");
|
568
|
+
return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize);
|
569
|
+
}
|
570
|
+
|
435
571
|
void LZ4F_freeCDict(LZ4F_CDict* cdict)
|
436
572
|
{
|
437
573
|
if (cdict==NULL) return; /* support free on NULL */
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
574
|
+
LZ4F_free(cdict->dictContent, cdict->cmem);
|
575
|
+
LZ4F_free(cdict->fastCtx, cdict->cmem);
|
576
|
+
LZ4F_free(cdict->HCCtx, cdict->cmem);
|
577
|
+
LZ4F_free(cdict, cdict->cmem);
|
442
578
|
}
|
443
579
|
|
444
580
|
|
@@ -446,6 +582,20 @@ void LZ4F_freeCDict(LZ4F_CDict* cdict)
|
|
446
582
|
* Advanced compression functions
|
447
583
|
***********************************/
|
448
584
|
|
585
|
+
LZ4F_cctx*
|
586
|
+
LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
|
587
|
+
{
|
588
|
+
LZ4F_cctx* const cctxPtr =
|
589
|
+
(LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem);
|
590
|
+
if (cctxPtr==NULL) return NULL;
|
591
|
+
|
592
|
+
cctxPtr->cmem = customMem;
|
593
|
+
cctxPtr->version = version;
|
594
|
+
cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */
|
595
|
+
|
596
|
+
return cctxPtr;
|
597
|
+
}
|
598
|
+
|
449
599
|
/*! LZ4F_createCompressionContext() :
|
450
600
|
* The first thing to do is to create a compressionContext object, which will be used in all compression operations.
|
451
601
|
* This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
|
@@ -453,39 +603,75 @@ void LZ4F_freeCDict(LZ4F_CDict* cdict)
|
|
453
603
|
* The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
|
454
604
|
* If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
|
455
605
|
* Object can release its memory using LZ4F_freeCompressionContext();
|
456
|
-
|
457
|
-
LZ4F_errorCode_t
|
606
|
+
**/
|
607
|
+
LZ4F_errorCode_t
|
608
|
+
LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
|
458
609
|
{
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
cctxPtr->version = version;
|
463
|
-
cctxPtr->cStage = 0; /* Next stage : init stream */
|
464
|
-
|
465
|
-
*LZ4F_compressionContextPtr = (LZ4F_compressionContext_t)cctxPtr;
|
610
|
+
assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */
|
611
|
+
/* in case it nonetheless happen in production */
|
612
|
+
RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null);
|
466
613
|
|
614
|
+
*LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version);
|
615
|
+
RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed);
|
467
616
|
return LZ4F_OK_NoError;
|
468
617
|
}
|
469
618
|
|
470
619
|
|
471
|
-
LZ4F_errorCode_t LZ4F_freeCompressionContext(
|
620
|
+
LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
|
472
621
|
{
|
473
|
-
LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)LZ4F_compressionContext;
|
474
|
-
|
475
622
|
if (cctxPtr != NULL) { /* support free on NULL */
|
476
|
-
|
477
|
-
|
478
|
-
|
623
|
+
LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
|
624
|
+
LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
|
625
|
+
LZ4F_free(cctxPtr, cctxPtr->cmem);
|
479
626
|
}
|
480
|
-
|
481
627
|
return LZ4F_OK_NoError;
|
482
628
|
}
|
483
629
|
|
484
630
|
|
631
|
+
/**
|
632
|
+
* This function prepares the internal LZ4(HC) stream for a new compression,
|
633
|
+
* resetting the context and attaching the dictionary, if there is one.
|
634
|
+
*
|
635
|
+
* It needs to be called at the beginning of each independent compression
|
636
|
+
* stream (i.e., at the beginning of a frame in blockLinked mode, or at the
|
637
|
+
* beginning of each block in blockIndependent mode).
|
638
|
+
*/
|
639
|
+
static void LZ4F_initStream(void* ctx,
|
640
|
+
const LZ4F_CDict* cdict,
|
641
|
+
int level,
|
642
|
+
LZ4F_blockMode_t blockMode) {
|
643
|
+
if (level < LZ4HC_CLEVEL_MIN) {
|
644
|
+
if (cdict != NULL || blockMode == LZ4F_blockLinked) {
|
645
|
+
/* In these cases, we will call LZ4_compress_fast_continue(),
|
646
|
+
* which needs an already reset context. Otherwise, we'll call a
|
647
|
+
* one-shot API. The non-continued APIs internally perform their own
|
648
|
+
* resets at the beginning of their calls, where they know what
|
649
|
+
* tableType they need the context to be in. So in that case this
|
650
|
+
* would be misguided / wasted work. */
|
651
|
+
LZ4_resetStream_fast((LZ4_stream_t*)ctx);
|
652
|
+
}
|
653
|
+
LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
|
654
|
+
} else {
|
655
|
+
LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
|
656
|
+
LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
|
657
|
+
}
|
658
|
+
}
|
659
|
+
|
660
|
+
static int ctxTypeID_to_size(int ctxTypeID) {
|
661
|
+
switch(ctxTypeID) {
|
662
|
+
case 1:
|
663
|
+
return LZ4_sizeofState();
|
664
|
+
case 2:
|
665
|
+
return LZ4_sizeofStateHC();
|
666
|
+
default:
|
667
|
+
return 0;
|
668
|
+
}
|
669
|
+
}
|
670
|
+
|
485
671
|
/*! LZ4F_compressBegin_usingCDict() :
|
486
|
-
* init streaming compression
|
487
|
-
*
|
488
|
-
* @return : number of bytes written into dstBuffer for the header
|
672
|
+
* init streaming compression AND writes frame header into @dstBuffer.
|
673
|
+
* @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
674
|
+
* @return : number of bytes written into @dstBuffer for the header
|
489
675
|
* or an error code (can be tested using LZ4F_isError())
|
490
676
|
*/
|
491
677
|
size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
@@ -493,26 +679,45 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
|
493
679
|
const LZ4F_CDict* cdict,
|
494
680
|
const LZ4F_preferences_t* preferencesPtr)
|
495
681
|
{
|
496
|
-
LZ4F_preferences_t prefNull;
|
682
|
+
LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES;
|
497
683
|
BYTE* const dstStart = (BYTE*)dstBuffer;
|
498
684
|
BYTE* dstPtr = dstStart;
|
499
|
-
BYTE* headerStart;
|
500
685
|
|
501
|
-
|
502
|
-
memset(&prefNull, 0, sizeof(prefNull));
|
686
|
+
RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall);
|
503
687
|
if (preferencesPtr == NULL) preferencesPtr = &prefNull;
|
504
688
|
cctxPtr->prefs = *preferencesPtr;
|
505
689
|
|
506
|
-
/*
|
507
|
-
{
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
690
|
+
/* cctx Management */
|
691
|
+
{ U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
|
692
|
+
int requiredSize = ctxTypeID_to_size(ctxTypeID);
|
693
|
+
int allocatedSize = ctxTypeID_to_size(cctxPtr->lz4CtxAlloc);
|
694
|
+
if (allocatedSize < requiredSize) {
|
695
|
+
/* not enough space allocated */
|
696
|
+
LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
|
697
|
+
if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
|
698
|
+
/* must take ownership of memory allocation,
|
699
|
+
* in order to respect custom allocator contract */
|
700
|
+
cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctxPtr->cmem);
|
701
|
+
if (cctxPtr->lz4CtxPtr)
|
702
|
+
LZ4_initStream(cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
|
703
|
+
} else {
|
704
|
+
cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctxPtr->cmem);
|
705
|
+
if (cctxPtr->lz4CtxPtr)
|
706
|
+
LZ4_initStreamHC(cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
|
707
|
+
}
|
708
|
+
RETURN_ERROR_IF(cctxPtr->lz4CtxPtr == NULL, allocation_failed);
|
709
|
+
cctxPtr->lz4CtxAlloc = ctxTypeID;
|
710
|
+
cctxPtr->lz4CtxState = ctxTypeID;
|
711
|
+
} else if (cctxPtr->lz4CtxState != ctxTypeID) {
|
712
|
+
/* otherwise, a sufficient buffer is already allocated,
|
713
|
+
* but we need to reset it to the correct context type */
|
714
|
+
if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
|
715
|
+
LZ4_initStream((LZ4_stream_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
|
716
|
+
} else {
|
717
|
+
LZ4_initStreamHC((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
|
718
|
+
LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
|
719
|
+
}
|
720
|
+
cctxPtr->lz4CtxState = ctxTypeID;
|
516
721
|
} }
|
517
722
|
|
518
723
|
/* Buffer Management */
|
@@ -521,79 +726,71 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
|
|
521
726
|
cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
|
522
727
|
|
523
728
|
{ size_t const requiredBuffSize = preferencesPtr->autoFlush ?
|
524
|
-
(cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
|
525
|
-
cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
|
729
|
+
((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
|
730
|
+
cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
|
526
731
|
|
527
732
|
if (cctxPtr->maxBufferSize < requiredBuffSize) {
|
528
733
|
cctxPtr->maxBufferSize = 0;
|
529
|
-
|
530
|
-
cctxPtr->tmpBuff = (BYTE*)
|
531
|
-
|
734
|
+
LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
|
735
|
+
cctxPtr->tmpBuff = (BYTE*)LZ4F_calloc(requiredBuffSize, cctxPtr->cmem);
|
736
|
+
RETURN_ERROR_IF(cctxPtr->tmpBuff == NULL, allocation_failed);
|
532
737
|
cctxPtr->maxBufferSize = requiredBuffSize;
|
533
738
|
} }
|
534
739
|
cctxPtr->tmpIn = cctxPtr->tmpBuff;
|
535
740
|
cctxPtr->tmpInSize = 0;
|
536
|
-
XXH32_reset(&(cctxPtr->xxh), 0);
|
741
|
+
(void)XXH32_reset(&(cctxPtr->xxh), 0);
|
537
742
|
|
538
743
|
/* context init */
|
539
744
|
cctxPtr->cdict = cdict;
|
540
745
|
if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
|
541
746
|
/* frame init only for blockLinked : blockIndependent will be init at each block */
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
memcpy(cctxPtr->lz4CtxPtr, cdict->HCCtx, sizeof(*cdict->HCCtx));
|
547
|
-
LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
|
548
|
-
}
|
549
|
-
} else {
|
550
|
-
if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
|
551
|
-
LZ4_resetStream((LZ4_stream_t*)(cctxPtr->lz4CtxPtr));
|
552
|
-
else
|
553
|
-
LZ4_resetStreamHC((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), cctxPtr->prefs.compressionLevel);
|
554
|
-
}
|
747
|
+
LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked);
|
748
|
+
}
|
749
|
+
if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
|
750
|
+
LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
|
555
751
|
}
|
556
752
|
|
557
753
|
/* Magic Number */
|
558
754
|
LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
|
559
755
|
dstPtr += 4;
|
560
|
-
headerStart = dstPtr;
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
756
|
+
{ BYTE* const headerStart = dstPtr;
|
757
|
+
|
758
|
+
/* FLG Byte */
|
759
|
+
*dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
|
760
|
+
+ ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
|
761
|
+
+ ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
|
762
|
+
+ ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
|
763
|
+
+ ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
|
764
|
+
+ (cctxPtr->prefs.frameInfo.dictID > 0) );
|
765
|
+
/* BD Byte */
|
766
|
+
*dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
|
767
|
+
/* Optional Frame content size field */
|
768
|
+
if (cctxPtr->prefs.frameInfo.contentSize) {
|
769
|
+
LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
|
770
|
+
dstPtr += 8;
|
771
|
+
cctxPtr->totalInSize = 0;
|
772
|
+
}
|
773
|
+
/* Optional dictionary ID field */
|
774
|
+
if (cctxPtr->prefs.frameInfo.dictID) {
|
775
|
+
LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
|
776
|
+
dstPtr += 4;
|
777
|
+
}
|
778
|
+
/* Header CRC Byte */
|
779
|
+
*dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
|
780
|
+
dstPtr++;
|
581
781
|
}
|
582
|
-
/* Header CRC Byte */
|
583
|
-
*dstPtr = LZ4F_headerChecksum(headerStart, dstPtr - headerStart);
|
584
|
-
dstPtr++;
|
585
782
|
|
586
783
|
cctxPtr->cStage = 1; /* header written, now request input data block */
|
587
|
-
return (dstPtr - dstStart);
|
784
|
+
return (size_t)(dstPtr - dstStart);
|
588
785
|
}
|
589
786
|
|
590
787
|
|
591
788
|
/*! LZ4F_compressBegin() :
|
592
|
-
* init streaming compression
|
593
|
-
*
|
594
|
-
*
|
789
|
+
* init streaming compression AND writes frame header into @dstBuffer.
|
790
|
+
* @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
791
|
+
* @preferencesPtr can be NULL, in which case default parameters are selected.
|
595
792
|
* @return : number of bytes written into dstBuffer for the header
|
596
|
-
*
|
793
|
+
* or an error code (can be tested using LZ4F_isError())
|
597
794
|
*/
|
598
795
|
size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
|
599
796
|
void* dstBuffer, size_t dstCapacity,
|
@@ -604,13 +801,16 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
|
|
604
801
|
}
|
605
802
|
|
606
803
|
|
607
|
-
/*
|
608
|
-
*
|
609
|
-
*
|
610
|
-
*
|
804
|
+
/* LZ4F_compressBound() :
|
805
|
+
* @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
|
806
|
+
* LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
|
807
|
+
* This function cannot fail.
|
611
808
|
*/
|
612
809
|
size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
|
613
810
|
{
|
811
|
+
if (preferencesPtr && preferencesPtr->autoFlush) {
|
812
|
+
return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
|
813
|
+
}
|
614
814
|
return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
|
615
815
|
}
|
616
816
|
|
@@ -619,55 +819,64 @@ typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize
|
|
619
819
|
|
620
820
|
|
621
821
|
/*! LZ4F_makeBlock():
|
622
|
-
* compress a single block, add header and checksum
|
623
|
-
* assumption : dst buffer capacity is >= srcSize
|
624
|
-
|
822
|
+
* compress a single block, add header and optional checksum.
|
823
|
+
* assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
|
824
|
+
*/
|
825
|
+
static size_t LZ4F_makeBlock(void* dst,
|
826
|
+
const void* src, size_t srcSize,
|
625
827
|
compressFunc_t compress, void* lz4ctx, int level,
|
626
|
-
|
828
|
+
const LZ4F_CDict* cdict,
|
829
|
+
LZ4F_blockChecksum_t crcFlag)
|
627
830
|
{
|
628
831
|
BYTE* const cSizePtr = (BYTE*)dst;
|
629
|
-
U32 cSize
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
832
|
+
U32 cSize;
|
833
|
+
assert(compress != NULL);
|
834
|
+
cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
|
835
|
+
(int)(srcSize), (int)(srcSize-1),
|
836
|
+
level, cdict);
|
837
|
+
|
838
|
+
if (cSize == 0 || cSize >= srcSize) {
|
634
839
|
cSize = (U32)srcSize;
|
635
840
|
LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
|
636
|
-
memcpy(cSizePtr+
|
841
|
+
memcpy(cSizePtr+BHSize, src, srcSize);
|
842
|
+
} else {
|
843
|
+
LZ4F_writeLE32(cSizePtr, cSize);
|
637
844
|
}
|
638
845
|
if (crcFlag) {
|
639
|
-
U32 const crc32 = XXH32(cSizePtr+
|
640
|
-
LZ4F_writeLE32(cSizePtr+
|
846
|
+
U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */
|
847
|
+
LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32);
|
641
848
|
}
|
642
|
-
return
|
849
|
+
return BHSize + cSize + ((U32)crcFlag)*BFSize;
|
643
850
|
}
|
644
851
|
|
645
852
|
|
646
853
|
static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
|
647
854
|
{
|
648
|
-
int const acceleration = (level <
|
855
|
+
int const acceleration = (level < 0) ? -level + 1 : 1;
|
856
|
+
DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize);
|
857
|
+
LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
|
649
858
|
if (cdict) {
|
650
|
-
memcpy(ctx, cdict->fastCtx, sizeof(*cdict->fastCtx));
|
651
859
|
return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
|
860
|
+
} else {
|
861
|
+
return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
|
652
862
|
}
|
653
|
-
return LZ4_compress_fast_extState(ctx, src, dst, srcSize, dstCapacity, acceleration);
|
654
863
|
}
|
655
864
|
|
656
865
|
static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
|
657
866
|
{
|
658
|
-
int const acceleration = (level <
|
867
|
+
int const acceleration = (level < 0) ? -level + 1 : 1;
|
659
868
|
(void)cdict; /* init once at beginning of frame */
|
869
|
+
DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize);
|
660
870
|
return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
|
661
871
|
}
|
662
872
|
|
663
873
|
static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
|
664
874
|
{
|
875
|
+
LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
|
665
876
|
if (cdict) {
|
666
|
-
memcpy(ctx, cdict->HCCtx, sizeof(*cdict->HCCtx));
|
667
|
-
LZ4_setCompressionLevel((LZ4_streamHC_t*)ctx, level);
|
668
877
|
return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
|
669
878
|
}
|
670
|
-
return
|
879
|
+
return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
|
671
880
|
}
|
672
881
|
|
673
882
|
static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
|
@@ -676,8 +885,15 @@ static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst,
|
|
676
885
|
return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
|
677
886
|
}
|
678
887
|
|
679
|
-
static
|
888
|
+
static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
|
680
889
|
{
|
890
|
+
(void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict;
|
891
|
+
return 0;
|
892
|
+
}
|
893
|
+
|
894
|
+
static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_blockCompression_t compressMode)
|
895
|
+
{
|
896
|
+
if (compressMode == LZ4B_UNCOMPRESSED) return LZ4F_doNotCompressBlock;
|
681
897
|
if (level < LZ4HC_CLEVEL_MIN) {
|
682
898
|
if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
|
683
899
|
return LZ4F_compressBlock_continue;
|
@@ -686,6 +902,7 @@ static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int lev
|
|
686
902
|
return LZ4F_compressBlockHC_continue;
|
687
903
|
}
|
688
904
|
|
905
|
+
/* Save history (up to 64KB) into @tmpBuff */
|
689
906
|
static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
|
690
907
|
{
|
691
908
|
if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
|
@@ -695,36 +912,57 @@ static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
|
|
695
912
|
|
696
913
|
typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
|
697
914
|
|
698
|
-
|
915
|
+
static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } };
|
916
|
+
|
917
|
+
|
918
|
+
/*! LZ4F_compressUpdateImpl() :
|
699
919
|
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
|
700
|
-
*
|
701
|
-
*
|
920
|
+
* When successful, the function always entirely consumes @srcBuffer.
|
921
|
+
* src data is either buffered or compressed into @dstBuffer.
|
922
|
+
* If the block compression does not match the compression of the previous block, the old data is flushed
|
923
|
+
* and operations continue with the new compression mode.
|
924
|
+
* @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on.
|
925
|
+
* @compressOptionsPtr is optional : provide NULL to mean "default".
|
702
926
|
* @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
|
703
927
|
* or an error code if it fails (which can be tested using LZ4F_isError())
|
928
|
+
* After an error, the state is left in a UB state, and must be re-initialized.
|
704
929
|
*/
|
705
|
-
size_t
|
706
|
-
|
930
|
+
static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
|
931
|
+
void* dstBuffer, size_t dstCapacity,
|
707
932
|
const void* srcBuffer, size_t srcSize,
|
708
|
-
const LZ4F_compressOptions_t* compressOptionsPtr
|
709
|
-
|
710
|
-
|
933
|
+
const LZ4F_compressOptions_t* compressOptionsPtr,
|
934
|
+
LZ4F_blockCompression_t blockCompression)
|
935
|
+
{
|
711
936
|
size_t const blockSize = cctxPtr->maxBlockSize;
|
712
937
|
const BYTE* srcPtr = (const BYTE*)srcBuffer;
|
713
938
|
const BYTE* const srcEnd = srcPtr + srcSize;
|
714
939
|
BYTE* const dstStart = (BYTE*)dstBuffer;
|
715
940
|
BYTE* dstPtr = dstStart;
|
716
941
|
LZ4F_lastBlockStatus lastBlockCompressed = notDone;
|
717
|
-
compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
|
718
|
-
|
942
|
+
compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression);
|
943
|
+
size_t bytesWritten;
|
944
|
+
DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
|
945
|
+
|
946
|
+
RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */
|
947
|
+
if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
|
948
|
+
RETURN_ERROR(dstMaxSize_tooSmall);
|
949
|
+
|
950
|
+
if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize)
|
951
|
+
RETURN_ERROR(dstMaxSize_tooSmall);
|
952
|
+
|
953
|
+
/* flush currently written block, to continue with new block compression */
|
954
|
+
if (cctxPtr->blockCompression != blockCompression) {
|
955
|
+
bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
|
956
|
+
dstPtr += bytesWritten;
|
957
|
+
cctxPtr->blockCompression = blockCompression;
|
958
|
+
}
|
719
959
|
|
720
|
-
if (
|
721
|
-
if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
|
722
|
-
memset(&cOptionsNull, 0, sizeof(cOptionsNull));
|
723
|
-
if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull;
|
960
|
+
if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull;
|
724
961
|
|
725
962
|
/* complete tmp buffer */
|
726
963
|
if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
|
727
964
|
size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
|
965
|
+
assert(blockSize > cctxPtr->tmpInSize);
|
728
966
|
if (sizeToCopy > srcSize) {
|
729
967
|
/* add src to tmpIn buffer */
|
730
968
|
memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
|
@@ -737,130 +975,202 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
|
|
737
975
|
memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
|
738
976
|
srcPtr += sizeToCopy;
|
739
977
|
|
740
|
-
dstPtr += LZ4F_makeBlock(dstPtr,
|
978
|
+
dstPtr += LZ4F_makeBlock(dstPtr,
|
979
|
+
cctxPtr->tmpIn, blockSize,
|
741
980
|
compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
|
742
|
-
cctxPtr->cdict,
|
743
|
-
|
981
|
+
cctxPtr->cdict,
|
982
|
+
cctxPtr->prefs.frameInfo.blockChecksumFlag);
|
744
983
|
if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
|
745
984
|
cctxPtr->tmpInSize = 0;
|
746
|
-
|
747
|
-
}
|
985
|
+
} }
|
748
986
|
|
749
987
|
while ((size_t)(srcEnd - srcPtr) >= blockSize) {
|
750
988
|
/* compress full blocks */
|
751
989
|
lastBlockCompressed = fromSrcBuffer;
|
752
|
-
dstPtr += LZ4F_makeBlock(dstPtr,
|
990
|
+
dstPtr += LZ4F_makeBlock(dstPtr,
|
991
|
+
srcPtr, blockSize,
|
753
992
|
compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
|
754
|
-
cctxPtr->cdict,
|
993
|
+
cctxPtr->cdict,
|
994
|
+
cctxPtr->prefs.frameInfo.blockChecksumFlag);
|
755
995
|
srcPtr += blockSize;
|
756
996
|
}
|
757
997
|
|
758
998
|
if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
|
759
|
-
/*
|
999
|
+
/* autoFlush : remaining input (< blockSize) is compressed */
|
760
1000
|
lastBlockCompressed = fromSrcBuffer;
|
761
|
-
dstPtr += LZ4F_makeBlock(dstPtr,
|
1001
|
+
dstPtr += LZ4F_makeBlock(dstPtr,
|
1002
|
+
srcPtr, (size_t)(srcEnd - srcPtr),
|
762
1003
|
compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
|
763
|
-
cctxPtr->cdict,
|
764
|
-
|
1004
|
+
cctxPtr->cdict,
|
1005
|
+
cctxPtr->prefs.frameInfo.blockChecksumFlag);
|
1006
|
+
srcPtr = srcEnd;
|
765
1007
|
}
|
766
1008
|
|
767
|
-
/* preserve dictionary
|
1009
|
+
/* preserve dictionary within @tmpBuff whenever necessary */
|
768
1010
|
if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
|
1011
|
+
/* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */
|
1012
|
+
assert(blockCompression == LZ4B_COMPRESSED);
|
769
1013
|
if (compressOptionsPtr->stableSrc) {
|
770
|
-
cctxPtr->tmpIn = cctxPtr->tmpBuff;
|
1014
|
+
cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */
|
771
1015
|
} else {
|
772
1016
|
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
|
773
|
-
|
1017
|
+
assert(0 <= realDictSize && realDictSize <= 64 KB);
|
774
1018
|
cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
|
775
1019
|
}
|
776
1020
|
}
|
777
1021
|
|
778
1022
|
/* keep tmpIn within limits */
|
779
|
-
if ((cctxPtr->
|
780
|
-
|
1023
|
+
if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */
|
1024
|
+
&& (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */
|
781
1025
|
{
|
1026
|
+
/* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
|
1027
|
+
* note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
|
782
1028
|
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
|
783
1029
|
cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
|
1030
|
+
assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize));
|
784
1031
|
}
|
785
1032
|
|
786
1033
|
/* some input data left, necessarily < blockSize */
|
787
1034
|
if (srcPtr < srcEnd) {
|
788
1035
|
/* fill tmp buffer */
|
789
|
-
size_t const sizeToCopy = srcEnd - srcPtr;
|
1036
|
+
size_t const sizeToCopy = (size_t)(srcEnd - srcPtr);
|
790
1037
|
memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
|
791
1038
|
cctxPtr->tmpInSize = sizeToCopy;
|
792
1039
|
}
|
793
1040
|
|
794
1041
|
if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
|
795
|
-
XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
|
1042
|
+
(void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
|
796
1043
|
|
797
1044
|
cctxPtr->totalInSize += srcSize;
|
798
|
-
return dstPtr - dstStart;
|
1045
|
+
return (size_t)(dstPtr - dstStart);
|
1046
|
+
}
|
1047
|
+
|
1048
|
+
/*! LZ4F_compressUpdate() :
|
1049
|
+
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
|
1050
|
+
* When successful, the function always entirely consumes @srcBuffer.
|
1051
|
+
* src data is either buffered or compressed into @dstBuffer.
|
1052
|
+
* If previously an uncompressed block was written, buffered data is flushed
|
1053
|
+
* before appending compressed data is continued.
|
1054
|
+
* @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
|
1055
|
+
* @compressOptionsPtr is optional : provide NULL to mean "default".
|
1056
|
+
* @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
|
1057
|
+
* or an error code if it fails (which can be tested using LZ4F_isError())
|
1058
|
+
* After an error, the state is left in a UB state, and must be re-initialized.
|
1059
|
+
*/
|
1060
|
+
size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
|
1061
|
+
void* dstBuffer, size_t dstCapacity,
|
1062
|
+
const void* srcBuffer, size_t srcSize,
|
1063
|
+
const LZ4F_compressOptions_t* compressOptionsPtr)
|
1064
|
+
{
|
1065
|
+
return LZ4F_compressUpdateImpl(cctxPtr,
|
1066
|
+
dstBuffer, dstCapacity,
|
1067
|
+
srcBuffer, srcSize,
|
1068
|
+
compressOptionsPtr, LZ4B_COMPRESSED);
|
1069
|
+
}
|
1070
|
+
|
1071
|
+
/*! LZ4F_compressUpdate() :
|
1072
|
+
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
|
1073
|
+
* When successful, the function always entirely consumes @srcBuffer.
|
1074
|
+
* src data is either buffered or compressed into @dstBuffer.
|
1075
|
+
* If previously an uncompressed block was written, buffered data is flushed
|
1076
|
+
* before appending compressed data is continued.
|
1077
|
+
* This is only supported when LZ4F_blockIndependent is used
|
1078
|
+
* @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
|
1079
|
+
* @compressOptionsPtr is optional : provide NULL to mean "default".
|
1080
|
+
* @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
|
1081
|
+
* or an error code if it fails (which can be tested using LZ4F_isError())
|
1082
|
+
* After an error, the state is left in a UB state, and must be re-initialized.
|
1083
|
+
*/
|
1084
|
+
size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
|
1085
|
+
void* dstBuffer, size_t dstCapacity,
|
1086
|
+
const void* srcBuffer, size_t srcSize,
|
1087
|
+
const LZ4F_compressOptions_t* compressOptionsPtr) {
|
1088
|
+
RETURN_ERROR_IF(cctxPtr->prefs.frameInfo.blockMode != LZ4F_blockIndependent, blockMode_invalid);
|
1089
|
+
return LZ4F_compressUpdateImpl(cctxPtr,
|
1090
|
+
dstBuffer, dstCapacity,
|
1091
|
+
srcBuffer, srcSize,
|
1092
|
+
compressOptionsPtr, LZ4B_UNCOMPRESSED);
|
799
1093
|
}
|
800
1094
|
|
801
1095
|
|
802
1096
|
/*! LZ4F_flush() :
|
803
|
-
*
|
804
|
-
*
|
805
|
-
* The result of the function is the number of bytes written into dstBuffer
|
806
|
-
*
|
1097
|
+
* When compressed data must be sent immediately, without waiting for a block to be filled,
|
1098
|
+
* invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
|
1099
|
+
* The result of the function is the number of bytes written into dstBuffer.
|
1100
|
+
* It can be zero, this means there was no data left within LZ4F_cctx.
|
807
1101
|
* The function outputs an error code if it fails (can be tested using LZ4F_isError())
|
808
|
-
*
|
1102
|
+
* LZ4F_compressOptions_t* is optional. NULL is a valid argument.
|
809
1103
|
*/
|
810
|
-
size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
|
1104
|
+
size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
|
1105
|
+
void* dstBuffer, size_t dstCapacity,
|
1106
|
+
const LZ4F_compressOptions_t* compressOptionsPtr)
|
811
1107
|
{
|
812
1108
|
BYTE* const dstStart = (BYTE*)dstBuffer;
|
813
1109
|
BYTE* dstPtr = dstStart;
|
814
1110
|
compressFunc_t compress;
|
815
1111
|
|
816
1112
|
if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
|
817
|
-
|
818
|
-
|
819
|
-
(void)compressOptionsPtr; /* not yet
|
1113
|
+
RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized);
|
1114
|
+
RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall);
|
1115
|
+
(void)compressOptionsPtr; /* not useful (yet) */
|
820
1116
|
|
821
1117
|
/* select compression function */
|
822
|
-
compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
|
1118
|
+
compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompression);
|
823
1119
|
|
824
1120
|
/* compress tmp buffer */
|
825
|
-
dstPtr += LZ4F_makeBlock(dstPtr,
|
1121
|
+
dstPtr += LZ4F_makeBlock(dstPtr,
|
1122
|
+
cctxPtr->tmpIn, cctxPtr->tmpInSize,
|
826
1123
|
compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
|
827
|
-
cctxPtr->cdict,
|
828
|
-
|
1124
|
+
cctxPtr->cdict,
|
1125
|
+
cctxPtr->prefs.frameInfo.blockChecksumFlag);
|
1126
|
+
assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity));
|
1127
|
+
|
1128
|
+
if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
|
1129
|
+
cctxPtr->tmpIn += cctxPtr->tmpInSize;
|
829
1130
|
cctxPtr->tmpInSize = 0;
|
830
1131
|
|
831
1132
|
/* keep tmpIn within limits */
|
832
1133
|
if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
|
833
|
-
int realDictSize = LZ4F_localSaveDict(cctxPtr);
|
1134
|
+
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
|
834
1135
|
cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
|
835
1136
|
}
|
836
1137
|
|
837
|
-
return dstPtr - dstStart;
|
1138
|
+
return (size_t)(dstPtr - dstStart);
|
838
1139
|
}
|
839
1140
|
|
840
1141
|
|
841
1142
|
/*! LZ4F_compressEnd() :
|
842
|
-
*
|
843
|
-
*
|
844
|
-
*
|
845
|
-
*
|
846
|
-
*
|
847
|
-
*
|
848
|
-
*
|
1143
|
+
* When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
|
1144
|
+
* It will flush whatever data remained within compressionContext (like LZ4_flush())
|
1145
|
+
* but also properly finalize the frame, with an endMark and an (optional) checksum.
|
1146
|
+
* LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
|
1147
|
+
* @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
|
1148
|
+
* or an error code if it fails (can be tested using LZ4F_isError())
|
1149
|
+
* The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
|
849
1150
|
*/
|
850
|
-
size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
|
1151
|
+
size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
|
1152
|
+
void* dstBuffer, size_t dstCapacity,
|
1153
|
+
const LZ4F_compressOptions_t* compressOptionsPtr)
|
851
1154
|
{
|
852
1155
|
BYTE* const dstStart = (BYTE*)dstBuffer;
|
853
1156
|
BYTE* dstPtr = dstStart;
|
854
1157
|
|
855
|
-
size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer,
|
856
|
-
|
1158
|
+
size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
|
1159
|
+
DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
|
1160
|
+
FORWARD_IF_ERROR(flushSize);
|
857
1161
|
dstPtr += flushSize;
|
858
1162
|
|
1163
|
+
assert(flushSize <= dstCapacity);
|
1164
|
+
dstCapacity -= flushSize;
|
1165
|
+
|
1166
|
+
RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall);
|
859
1167
|
LZ4F_writeLE32(dstPtr, 0);
|
860
|
-
dstPtr+=4; /* endMark */
|
1168
|
+
dstPtr += 4; /* endMark */
|
861
1169
|
|
862
1170
|
if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
|
863
1171
|
U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
|
1172
|
+
RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall);
|
1173
|
+
DEBUGLOG(5,"Writing 32-bit content checksum");
|
864
1174
|
LZ4F_writeLE32(dstPtr, xxh);
|
865
1175
|
dstPtr+=4; /* content Checksum */
|
866
1176
|
}
|
@@ -870,10 +1180,10 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstMaxSize,
|
|
870
1180
|
|
871
1181
|
if (cctxPtr->prefs.frameInfo.contentSize) {
|
872
1182
|
if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
|
873
|
-
|
1183
|
+
RETURN_ERROR(frameSize_wrong);
|
874
1184
|
}
|
875
1185
|
|
876
|
-
return dstPtr - dstStart;
|
1186
|
+
return (size_t)(dstPtr - dstStart);
|
877
1187
|
}
|
878
1188
|
|
879
1189
|
|
@@ -887,14 +1197,14 @@ typedef enum {
|
|
887
1197
|
dstage_getBlockHeader, dstage_storeBlockHeader,
|
888
1198
|
dstage_copyDirect, dstage_getBlockChecksum,
|
889
1199
|
dstage_getCBlock, dstage_storeCBlock,
|
890
|
-
|
891
|
-
dstage_decodeCBlock_intoTmp, dstage_flushOut,
|
1200
|
+
dstage_flushOut,
|
892
1201
|
dstage_getSuffix, dstage_storeSuffix,
|
893
1202
|
dstage_getSFrameSize, dstage_storeSFrameSize,
|
894
1203
|
dstage_skipSkippable
|
895
1204
|
} dStage_t;
|
896
1205
|
|
897
1206
|
struct LZ4F_dctx_s {
|
1207
|
+
LZ4F_CustomMem cmem;
|
898
1208
|
LZ4F_frameInfo_t frameInfo;
|
899
1209
|
U32 version;
|
900
1210
|
dStage_t dStage;
|
@@ -912,23 +1222,37 @@ struct LZ4F_dctx_s {
|
|
912
1222
|
size_t tmpOutStart;
|
913
1223
|
XXH32_state_t xxh;
|
914
1224
|
XXH32_state_t blockChecksum;
|
1225
|
+
int skipChecksum;
|
915
1226
|
BYTE header[LZ4F_HEADER_SIZE_MAX];
|
916
1227
|
}; /* typedef'd to LZ4F_dctx in lz4frame.h */
|
917
1228
|
|
918
1229
|
|
1230
|
+
LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
|
1231
|
+
{
|
1232
|
+
LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem);
|
1233
|
+
if (dctx == NULL) return NULL;
|
1234
|
+
|
1235
|
+
dctx->cmem = customMem;
|
1236
|
+
dctx->version = version;
|
1237
|
+
return dctx;
|
1238
|
+
}
|
1239
|
+
|
919
1240
|
/*! LZ4F_createDecompressionContext() :
|
920
1241
|
* Create a decompressionContext object, which will track all decompression operations.
|
921
1242
|
* Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
|
922
1243
|
* Object can later be released using LZ4F_freeDecompressionContext().
|
923
1244
|
* @return : if != 0, there was an error during context creation.
|
924
1245
|
*/
|
925
|
-
LZ4F_errorCode_t
|
1246
|
+
LZ4F_errorCode_t
|
1247
|
+
LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
|
926
1248
|
{
|
927
|
-
|
928
|
-
|
1249
|
+
assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */
|
1250
|
+
RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */
|
929
1251
|
|
930
|
-
|
931
|
-
*LZ4F_decompressionContextPtr
|
1252
|
+
*LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber);
|
1253
|
+
if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */
|
1254
|
+
RETURN_ERROR(allocation_failed);
|
1255
|
+
}
|
932
1256
|
return LZ4F_OK_NoError;
|
933
1257
|
}
|
934
1258
|
|
@@ -937,9 +1261,9 @@ LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
|
|
937
1261
|
LZ4F_errorCode_t result = LZ4F_OK_NoError;
|
938
1262
|
if (dctx != NULL) { /* can accept NULL input, like free() */
|
939
1263
|
result = (LZ4F_errorCode_t)dctx->dStage;
|
940
|
-
|
941
|
-
|
942
|
-
|
1264
|
+
LZ4F_free(dctx->tmpIn, dctx->cmem);
|
1265
|
+
LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
|
1266
|
+
LZ4F_free(dctx, dctx->cmem);
|
943
1267
|
}
|
944
1268
|
return result;
|
945
1269
|
}
|
@@ -952,31 +1276,7 @@ void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
|
|
952
1276
|
dctx->dStage = dstage_getFrameHeader;
|
953
1277
|
dctx->dict = NULL;
|
954
1278
|
dctx->dictSize = 0;
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
/*! LZ4F_headerSize() :
|
959
|
-
* @return : size of frame header
|
960
|
-
* or an error code, which can be tested using LZ4F_isError()
|
961
|
-
*/
|
962
|
-
static size_t LZ4F_headerSize(const void* src, size_t srcSize)
|
963
|
-
{
|
964
|
-
/* minimal srcSize to determine header size */
|
965
|
-
if (srcSize < 5) return err0r(LZ4F_ERROR_frameHeader_incomplete);
|
966
|
-
|
967
|
-
/* special case : skippable frames */
|
968
|
-
if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) return 8;
|
969
|
-
|
970
|
-
/* control magic number */
|
971
|
-
if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
|
972
|
-
return err0r(LZ4F_ERROR_frameType_unknown);
|
973
|
-
|
974
|
-
/* Frame Header Size */
|
975
|
-
{ BYTE const FLG = ((const BYTE*)src)[4];
|
976
|
-
U32 const contentSizeFlag = (FLG>>3) & _1BIT;
|
977
|
-
U32 const dictIDFlag = FLG & _1BIT;
|
978
|
-
return minFHSize + (contentSizeFlag*8) + (dictIDFlag*4);
|
979
|
-
}
|
1279
|
+
dctx->skipChecksum = 0;
|
980
1280
|
}
|
981
1281
|
|
982
1282
|
|
@@ -994,9 +1294,10 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|
994
1294
|
size_t frameHeaderSize;
|
995
1295
|
const BYTE* srcPtr = (const BYTE*)src;
|
996
1296
|
|
1297
|
+
DEBUGLOG(5, "LZ4F_decodeHeader");
|
997
1298
|
/* need to decode header to get frameInfo */
|
998
|
-
|
999
|
-
|
1299
|
+
RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */
|
1300
|
+
MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
|
1000
1301
|
|
1001
1302
|
/* special case : skippable frames */
|
1002
1303
|
if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
|
@@ -1009,12 +1310,15 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|
1009
1310
|
} else {
|
1010
1311
|
dctx->dStage = dstage_getSFrameSize;
|
1011
1312
|
return 4;
|
1012
|
-
|
1013
|
-
}
|
1313
|
+
} }
|
1014
1314
|
|
1015
1315
|
/* control magic number */
|
1016
|
-
|
1017
|
-
|
1316
|
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
1317
|
+
if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
|
1318
|
+
DEBUGLOG(4, "frame header error : unknown magic number");
|
1319
|
+
RETURN_ERROR(frameType_unknown);
|
1320
|
+
}
|
1321
|
+
#endif
|
1018
1322
|
dctx->frameInfo.frameType = LZ4F_frame;
|
1019
1323
|
|
1020
1324
|
/* Flags */
|
@@ -1026,12 +1330,12 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|
1026
1330
|
contentChecksumFlag = (FLG>>2) & _1BIT;
|
1027
1331
|
dictIDFlag = FLG & _1BIT;
|
1028
1332
|
/* validate */
|
1029
|
-
if (((FLG>>1)&_1BIT) != 0)
|
1030
|
-
if (version != 1)
|
1333
|
+
if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
|
1334
|
+
if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */
|
1031
1335
|
}
|
1032
1336
|
|
1033
1337
|
/* Frame Header Size */
|
1034
|
-
frameHeaderSize = minFHSize + (contentSizeFlag
|
1338
|
+
frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
|
1035
1339
|
|
1036
1340
|
if (srcSize < frameHeaderSize) {
|
1037
1341
|
/* not enough input to fully decode frame header */
|
@@ -1046,26 +1350,27 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|
1046
1350
|
{ U32 const BD = srcPtr[5];
|
1047
1351
|
blockSizeID = (BD>>4) & _3BITS;
|
1048
1352
|
/* validate */
|
1049
|
-
if (((BD>>7)&_1BIT) != 0)
|
1050
|
-
if (blockSizeID < 4)
|
1051
|
-
if (((BD>>0)&_4BITS) != 0)
|
1353
|
+
if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
|
1354
|
+
if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */
|
1355
|
+
if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */
|
1052
1356
|
}
|
1053
1357
|
|
1054
1358
|
/* check header */
|
1359
|
+
assert(frameHeaderSize > 5);
|
1360
|
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
1055
1361
|
{ BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
|
1056
|
-
|
1057
|
-
return err0r(LZ4F_ERROR_headerChecksum_invalid);
|
1362
|
+
RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid);
|
1058
1363
|
}
|
1364
|
+
#endif
|
1059
1365
|
|
1060
1366
|
/* save */
|
1061
1367
|
dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
|
1062
1368
|
dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
|
1063
1369
|
dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
|
1064
1370
|
dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
|
1065
|
-
dctx->maxBlockSize = LZ4F_getBlockSize(blockSizeID);
|
1371
|
+
dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
|
1066
1372
|
if (contentSizeFlag)
|
1067
|
-
dctx->frameRemainingSize =
|
1068
|
-
dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
|
1373
|
+
dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
|
1069
1374
|
if (dictIDFlag)
|
1070
1375
|
dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
|
1071
1376
|
|
@@ -1075,6 +1380,36 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|
1075
1380
|
}
|
1076
1381
|
|
1077
1382
|
|
1383
|
+
/*! LZ4F_headerSize() :
|
1384
|
+
* @return : size of frame header
|
1385
|
+
* or an error code, which can be tested using LZ4F_isError()
|
1386
|
+
*/
|
1387
|
+
size_t LZ4F_headerSize(const void* src, size_t srcSize)
|
1388
|
+
{
|
1389
|
+
RETURN_ERROR_IF(src == NULL, srcPtr_wrong);
|
1390
|
+
|
1391
|
+
/* minimal srcSize to determine header size */
|
1392
|
+
if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH)
|
1393
|
+
RETURN_ERROR(frameHeader_incomplete);
|
1394
|
+
|
1395
|
+
/* special case : skippable frames */
|
1396
|
+
if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START)
|
1397
|
+
return 8;
|
1398
|
+
|
1399
|
+
/* control magic number */
|
1400
|
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
1401
|
+
if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
|
1402
|
+
RETURN_ERROR(frameType_unknown);
|
1403
|
+
#endif
|
1404
|
+
|
1405
|
+
/* Frame Header Size */
|
1406
|
+
{ BYTE const FLG = ((const BYTE*)src)[4];
|
1407
|
+
U32 const contentSizeFlag = (FLG>>3) & _1BIT;
|
1408
|
+
U32 const dictIDFlag = FLG & _1BIT;
|
1409
|
+
return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
|
1410
|
+
}
|
1411
|
+
}
|
1412
|
+
|
1078
1413
|
/*! LZ4F_getFrameInfo() :
|
1079
1414
|
* This function extracts frame parameters (max blockSize, frame checksum, etc.).
|
1080
1415
|
* Usage is optional. Objective is to provide relevant information for allocation purposes.
|
@@ -1090,10 +1425,12 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|
1090
1425
|
* note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
|
1091
1426
|
* note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
|
1092
1427
|
*/
|
1093
|
-
LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
|
1094
|
-
|
1428
|
+
LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
|
1429
|
+
LZ4F_frameInfo_t* frameInfoPtr,
|
1430
|
+
const void* srcBuffer, size_t* srcSizePtr)
|
1095
1431
|
{
|
1096
|
-
|
1432
|
+
LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader);
|
1433
|
+
if (dctx->dStage > dstage_storeFrameHeader) {
|
1097
1434
|
/* frameInfo already decoded */
|
1098
1435
|
size_t o=0, i=0;
|
1099
1436
|
*srcSizePtr = 0;
|
@@ -1104,55 +1441,66 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, LZ4F_frameInfo_t* frameInfoP
|
|
1104
1441
|
if (dctx->dStage == dstage_storeFrameHeader) {
|
1105
1442
|
/* frame decoding already started, in the middle of header => automatic fail */
|
1106
1443
|
*srcSizePtr = 0;
|
1107
|
-
|
1444
|
+
RETURN_ERROR(frameDecoding_alreadyStarted);
|
1108
1445
|
} else {
|
1109
|
-
size_t decodeResult;
|
1110
1446
|
size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
|
1111
1447
|
if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
|
1112
1448
|
if (*srcSizePtr < hSize) {
|
1113
1449
|
*srcSizePtr=0;
|
1114
|
-
|
1450
|
+
RETURN_ERROR(frameHeader_incomplete);
|
1115
1451
|
}
|
1116
1452
|
|
1117
|
-
decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
|
1118
|
-
|
1119
|
-
|
1120
|
-
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
} }
|
1453
|
+
{ size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
|
1454
|
+
if (LZ4F_isError(decodeResult)) {
|
1455
|
+
*srcSizePtr = 0;
|
1456
|
+
} else {
|
1457
|
+
*srcSizePtr = decodeResult;
|
1458
|
+
decodeResult = BHSize; /* block header size */
|
1459
|
+
}
|
1460
|
+
*frameInfoPtr = dctx->frameInfo;
|
1461
|
+
return decodeResult;
|
1462
|
+
} } }
|
1127
1463
|
}
|
1128
1464
|
|
1129
1465
|
|
1130
1466
|
/* LZ4F_updateDict() :
|
1131
|
-
* only used for LZ4F_blockLinked mode
|
1132
|
-
|
1467
|
+
* only used for LZ4F_blockLinked mode
|
1468
|
+
* Condition : @dstPtr != NULL
|
1469
|
+
*/
|
1470
|
+
static void LZ4F_updateDict(LZ4F_dctx* dctx,
|
1471
|
+
const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
|
1472
|
+
unsigned withinTmp)
|
1133
1473
|
{
|
1134
|
-
|
1135
|
-
|
1474
|
+
assert(dstPtr != NULL);
|
1475
|
+
if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */
|
1476
|
+
assert(dctx->dict != NULL);
|
1136
1477
|
|
1137
|
-
if (dctx->dict + dctx->dictSize == dstPtr) { /*
|
1478
|
+
if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
|
1138
1479
|
dctx->dictSize += dstSize;
|
1139
1480
|
return;
|
1140
1481
|
}
|
1141
1482
|
|
1142
|
-
|
1143
|
-
|
1144
|
-
dctx->
|
1483
|
+
assert(dstPtr >= dstBufferStart);
|
1484
|
+
if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */
|
1485
|
+
dctx->dict = (const BYTE*)dstBufferStart;
|
1486
|
+
dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize;
|
1145
1487
|
return;
|
1146
1488
|
}
|
1147
1489
|
|
1148
|
-
|
1149
|
-
|
1490
|
+
assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
|
1491
|
+
|
1492
|
+
/* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
|
1493
|
+
assert(dctx->tmpOutBuffer != NULL);
|
1494
|
+
|
1495
|
+
if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
|
1496
|
+
/* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
|
1497
|
+
assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
|
1150
1498
|
dctx->dictSize += dstSize;
|
1151
1499
|
return;
|
1152
1500
|
}
|
1153
1501
|
|
1154
1502
|
if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
|
1155
|
-
size_t const preserveSize = dctx->tmpOut - dctx->tmpOutBuffer;
|
1503
|
+
size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
|
1156
1504
|
size_t copySize = 64 KB - dctx->tmpOutSize;
|
1157
1505
|
const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
|
1158
1506
|
if (dctx->tmpOutSize > 64 KB) copySize = 0;
|
@@ -1167,7 +1515,7 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx, const BYTE* dstPtr, size_t dstSize,
|
|
1167
1515
|
|
1168
1516
|
if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */
|
1169
1517
|
if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */
|
1170
|
-
size_t const preserveSize = 64 KB - dstSize;
|
1518
|
+
size_t const preserveSize = 64 KB - dstSize;
|
1171
1519
|
memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
|
1172
1520
|
dctx->dictSize = preserveSize;
|
1173
1521
|
}
|
@@ -1177,7 +1525,7 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx, const BYTE* dstPtr, size_t dstSize,
|
|
1177
1525
|
}
|
1178
1526
|
|
1179
1527
|
/* join dict & dest into tmp */
|
1180
|
-
{ size_t preserveSize = 64 KB - dstSize;
|
1528
|
+
{ size_t preserveSize = 64 KB - dstSize;
|
1181
1529
|
if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize;
|
1182
1530
|
memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
|
1183
1531
|
memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize);
|
@@ -1187,7 +1535,6 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx, const BYTE* dstPtr, size_t dstSize,
|
|
1187
1535
|
}
|
1188
1536
|
|
1189
1537
|
|
1190
|
-
|
1191
1538
|
/*! LZ4F_decompress() :
|
1192
1539
|
* Call this function repetitively to regenerate compressed data in srcBuffer.
|
1193
1540
|
* The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
|
@@ -1216,17 +1563,22 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1216
1563
|
const BYTE* const srcEnd = srcStart + *srcSizePtr;
|
1217
1564
|
const BYTE* srcPtr = srcStart;
|
1218
1565
|
BYTE* const dstStart = (BYTE*)dstBuffer;
|
1219
|
-
BYTE* const dstEnd = dstStart + *dstSizePtr;
|
1566
|
+
BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
|
1220
1567
|
BYTE* dstPtr = dstStart;
|
1221
1568
|
const BYTE* selectedIn = NULL;
|
1222
1569
|
unsigned doAnotherStage = 1;
|
1223
1570
|
size_t nextSrcSizeHint = 1;
|
1224
1571
|
|
1225
1572
|
|
1226
|
-
|
1573
|
+
DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
|
1574
|
+
srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
|
1575
|
+
if (dstBuffer == NULL) assert(*dstSizePtr == 0);
|
1576
|
+
MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
|
1227
1577
|
if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
|
1228
1578
|
*srcSizePtr = 0;
|
1229
1579
|
*dstSizePtr = 0;
|
1580
|
+
assert(dctx != NULL);
|
1581
|
+
dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */
|
1230
1582
|
|
1231
1583
|
/* behaves as a state machine */
|
1232
1584
|
|
@@ -1236,19 +1588,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1236
1588
|
{
|
1237
1589
|
|
1238
1590
|
case dstage_getFrameHeader:
|
1591
|
+
DEBUGLOG(6, "dstage_getFrameHeader");
|
1239
1592
|
if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
|
1240
|
-
size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, srcEnd-srcPtr); /* will update dStage appropriately */
|
1241
|
-
|
1593
|
+
size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
|
1594
|
+
FORWARD_IF_ERROR(hSize);
|
1242
1595
|
srcPtr += hSize;
|
1243
1596
|
break;
|
1244
1597
|
}
|
1245
1598
|
dctx->tmpInSize = 0;
|
1246
1599
|
if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */
|
1247
|
-
dctx->tmpInTarget = minFHSize; /* minimum to
|
1600
|
+
dctx->tmpInTarget = minFHSize; /* minimum size to decode header */
|
1248
1601
|
dctx->dStage = dstage_storeFrameHeader;
|
1249
1602
|
/* fall-through */
|
1250
1603
|
|
1251
1604
|
case dstage_storeFrameHeader:
|
1605
|
+
DEBUGLOG(6, "dstage_storeFrameHeader");
|
1252
1606
|
{ size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
|
1253
1607
|
memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
|
1254
1608
|
dctx->tmpInSize += sizeToCopy;
|
@@ -1259,26 +1613,23 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1259
1613
|
doAnotherStage = 0; /* not enough src data, ask for some more */
|
1260
1614
|
break;
|
1261
1615
|
}
|
1262
|
-
|
1263
|
-
if (LZ4F_isError(hSize)) return hSize;
|
1264
|
-
}
|
1616
|
+
FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */
|
1265
1617
|
break;
|
1266
1618
|
|
1267
1619
|
case dstage_init:
|
1268
|
-
|
1620
|
+
DEBUGLOG(6, "dstage_init");
|
1621
|
+
if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
|
1269
1622
|
/* internal buffers allocation */
|
1270
1623
|
{ size_t const bufferNeeded = dctx->maxBlockSize
|
1271
|
-
+ ((dctx->frameInfo.blockMode==LZ4F_blockLinked)
|
1624
|
+
+ ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0);
|
1272
1625
|
if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */
|
1273
1626
|
dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/
|
1274
|
-
|
1275
|
-
dctx->tmpIn = (BYTE*)
|
1276
|
-
|
1277
|
-
|
1278
|
-
|
1279
|
-
dctx->tmpOutBuffer
|
1280
|
-
if (dctx->tmpOutBuffer== NULL)
|
1281
|
-
return err0r(LZ4F_ERROR_allocation_failed);
|
1627
|
+
LZ4F_free(dctx->tmpIn, dctx->cmem);
|
1628
|
+
dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem);
|
1629
|
+
RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed);
|
1630
|
+
LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
|
1631
|
+
dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem);
|
1632
|
+
RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed);
|
1282
1633
|
dctx->maxBufferSize = bufferNeeded;
|
1283
1634
|
} }
|
1284
1635
|
dctx->tmpInSize = 0;
|
@@ -1318,19 +1669,23 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1318
1669
|
} /* if (dctx->dStage == dstage_storeBlockHeader) */
|
1319
1670
|
|
1320
1671
|
/* decode block header */
|
1321
|
-
{
|
1322
|
-
size_t const
|
1323
|
-
|
1672
|
+
{ U32 const blockHeader = LZ4F_readLE32(selectedIn);
|
1673
|
+
size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
|
1674
|
+
size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
|
1675
|
+
if (blockHeader==0) { /* frameEnd signal, no more block */
|
1676
|
+
DEBUGLOG(5, "end of frame");
|
1324
1677
|
dctx->dStage = dstage_getSuffix;
|
1325
1678
|
break;
|
1326
1679
|
}
|
1327
|
-
if (nextCBlockSize > dctx->maxBlockSize)
|
1328
|
-
|
1329
|
-
|
1680
|
+
if (nextCBlockSize > dctx->maxBlockSize) {
|
1681
|
+
RETURN_ERROR(maxBlockSize_invalid);
|
1682
|
+
}
|
1683
|
+
if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
|
1330
1684
|
/* next block is uncompressed */
|
1331
1685
|
dctx->tmpInTarget = nextCBlockSize;
|
1686
|
+
DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
|
1332
1687
|
if (dctx->frameInfo.blockChecksumFlag) {
|
1333
|
-
XXH32_reset(&dctx->blockChecksum, 0);
|
1688
|
+
(void)XXH32_reset(&dctx->blockChecksum, 0);
|
1334
1689
|
}
|
1335
1690
|
dctx->dStage = dstage_copyDirect;
|
1336
1691
|
break;
|
@@ -1338,28 +1693,36 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1338
1693
|
/* next block is a compressed block */
|
1339
1694
|
dctx->tmpInTarget = nextCBlockSize + crcSize;
|
1340
1695
|
dctx->dStage = dstage_getCBlock;
|
1341
|
-
if (dstPtr==dstEnd) {
|
1342
|
-
nextSrcSizeHint =
|
1696
|
+
if (dstPtr==dstEnd || srcPtr==srcEnd) {
|
1697
|
+
nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
|
1343
1698
|
doAnotherStage = 0;
|
1344
1699
|
}
|
1345
1700
|
break;
|
1346
1701
|
}
|
1347
1702
|
|
1348
1703
|
case dstage_copyDirect: /* uncompressed block */
|
1349
|
-
|
1350
|
-
|
1351
|
-
|
1352
|
-
|
1353
|
-
|
1354
|
-
|
1355
|
-
|
1356
|
-
|
1357
|
-
|
1358
|
-
|
1704
|
+
DEBUGLOG(6, "dstage_copyDirect");
|
1705
|
+
{ size_t sizeToCopy;
|
1706
|
+
if (dstPtr == NULL) {
|
1707
|
+
sizeToCopy = 0;
|
1708
|
+
} else {
|
1709
|
+
size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
|
1710
|
+
sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
|
1711
|
+
memcpy(dstPtr, srcPtr, sizeToCopy);
|
1712
|
+
if (!dctx->skipChecksum) {
|
1713
|
+
if (dctx->frameInfo.blockChecksumFlag) {
|
1714
|
+
(void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
|
1715
|
+
}
|
1716
|
+
if (dctx->frameInfo.contentChecksumFlag)
|
1717
|
+
(void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
|
1718
|
+
}
|
1719
|
+
if (dctx->frameInfo.contentSize)
|
1720
|
+
dctx->frameRemainingSize -= sizeToCopy;
|
1359
1721
|
|
1360
|
-
|
1361
|
-
|
1362
|
-
|
1722
|
+
/* history management (linked blocks only)*/
|
1723
|
+
if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
|
1724
|
+
LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
|
1725
|
+
} }
|
1363
1726
|
|
1364
1727
|
srcPtr += sizeToCopy;
|
1365
1728
|
dstPtr += sizeToCopy;
|
@@ -1372,15 +1735,16 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1372
1735
|
break;
|
1373
1736
|
}
|
1374
1737
|
dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
|
1375
|
-
nextSrcSizeHint = dctx->tmpInTarget +
|
1376
|
-
+ dctx->frameInfo.contentChecksumFlag * 4 /* block checksum */
|
1377
|
-
+ BHSize /* next header size */;
|
1378
|
-
doAnotherStage = 0;
|
1379
|
-
break;
|
1380
1738
|
}
|
1739
|
+
nextSrcSizeHint = dctx->tmpInTarget +
|
1740
|
+
+(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
|
1741
|
+
+ BHSize /* next header size */;
|
1742
|
+
doAnotherStage = 0;
|
1743
|
+
break;
|
1381
1744
|
|
1382
1745
|
/* check block checksum for recently transferred uncompressed block */
|
1383
1746
|
case dstage_getBlockChecksum:
|
1747
|
+
DEBUGLOG(6, "dstage_getBlockChecksum");
|
1384
1748
|
{ const void* crcSrc;
|
1385
1749
|
if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
|
1386
1750
|
crcSrc = srcPtr;
|
@@ -1397,16 +1761,26 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1397
1761
|
}
|
1398
1762
|
crcSrc = dctx->header;
|
1399
1763
|
}
|
1400
|
-
|
1764
|
+
if (!dctx->skipChecksum) {
|
1765
|
+
U32 const readCRC = LZ4F_readLE32(crcSrc);
|
1401
1766
|
U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
|
1402
|
-
|
1403
|
-
|
1404
|
-
|
1405
|
-
|
1767
|
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
1768
|
+
DEBUGLOG(6, "compare block checksum");
|
1769
|
+
if (readCRC != calcCRC) {
|
1770
|
+
DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
|
1771
|
+
readCRC, calcCRC);
|
1772
|
+
RETURN_ERROR(blockChecksum_invalid);
|
1773
|
+
}
|
1774
|
+
#else
|
1775
|
+
(void)readCRC;
|
1776
|
+
(void)calcCRC;
|
1777
|
+
#endif
|
1778
|
+
} }
|
1406
1779
|
dctx->dStage = dstage_getBlockHeader; /* new block */
|
1407
1780
|
break;
|
1408
1781
|
|
1409
1782
|
case dstage_getCBlock:
|
1783
|
+
DEBUGLOG(6, "dstage_getCBlock");
|
1410
1784
|
if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
|
1411
1785
|
dctx->tmpInSize = 0;
|
1412
1786
|
dctx->dStage = dstage_storeCBlock;
|
@@ -1415,9 +1789,8 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1415
1789
|
/* input large enough to read full block directly */
|
1416
1790
|
selectedIn = srcPtr;
|
1417
1791
|
srcPtr += dctx->tmpInTarget;
|
1418
|
-
dctx->dStage = dstage_decodeCBlock;
|
1419
|
-
break;
|
1420
1792
|
|
1793
|
+
if (0) /* always jump over next block */
|
1421
1794
|
case dstage_storeCBlock:
|
1422
1795
|
{ size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
|
1423
1796
|
size_t const inputLeft = (size_t)(srcEnd-srcPtr);
|
@@ -1426,106 +1799,132 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1426
1799
|
dctx->tmpInSize += sizeToCopy;
|
1427
1800
|
srcPtr += sizeToCopy;
|
1428
1801
|
if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */
|
1429
|
-
nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
|
1430
|
-
|
1802
|
+
nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
|
1803
|
+
+ (dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
|
1804
|
+
+ BHSize /* next header size */;
|
1805
|
+
doAnotherStage = 0;
|
1431
1806
|
break;
|
1432
1807
|
}
|
1433
1808
|
selectedIn = dctx->tmpIn;
|
1434
|
-
dctx->dStage = dstage_decodeCBlock;
|
1435
1809
|
}
|
1436
|
-
/* fall-through */
|
1437
1810
|
|
1438
|
-
|
1439
|
-
|
1811
|
+
/* At this stage, input is large enough to decode a block */
|
1812
|
+
|
1813
|
+
/* First, decode and control block checksum if it exists */
|
1440
1814
|
if (dctx->frameInfo.blockChecksumFlag) {
|
1815
|
+
assert(dctx->tmpInTarget >= 4);
|
1441
1816
|
dctx->tmpInTarget -= 4;
|
1817
|
+
assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
|
1442
1818
|
{ U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
|
1443
1819
|
U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
|
1444
|
-
|
1445
|
-
|
1820
|
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
1821
|
+
RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid);
|
1822
|
+
#else
|
1823
|
+
(void)readBlockCrc;
|
1824
|
+
(void)calcBlockCrc;
|
1825
|
+
#endif
|
1446
1826
|
} }
|
1447
|
-
if ((size_t)(dstEnd-dstPtr) < dctx->maxBlockSize) /* not enough place into dst : decode into tmpOut */
|
1448
|
-
dctx->dStage = dstage_decodeCBlock_intoTmp;
|
1449
|
-
else
|
1450
|
-
dctx->dStage = dstage_decodeCBlock_intoDst;
|
1451
|
-
break;
|
1452
1827
|
|
1453
|
-
|
1454
|
-
|
1828
|
+
/* decode directly into destination buffer if there is enough room */
|
1829
|
+
if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize)
|
1830
|
+
/* unless the dictionary is stored in tmpOut:
|
1831
|
+
* in which case it's faster to decode within tmpOut
|
1832
|
+
* to benefit from prefix speedup */
|
1833
|
+
&& !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) )
|
1834
|
+
{
|
1835
|
+
const char* dict = (const char*)dctx->dict;
|
1836
|
+
size_t dictSize = dctx->dictSize;
|
1837
|
+
int decodedSize;
|
1838
|
+
assert(dstPtr != NULL);
|
1839
|
+
if (dict && dictSize > 1 GB) {
|
1840
|
+
/* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */
|
1841
|
+
dict += dictSize - 64 KB;
|
1842
|
+
dictSize = 64 KB;
|
1843
|
+
}
|
1844
|
+
decodedSize = LZ4_decompress_safe_usingDict(
|
1455
1845
|
(const char*)selectedIn, (char*)dstPtr,
|
1456
1846
|
(int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
|
1457
|
-
|
1458
|
-
|
1459
|
-
if (dctx->frameInfo.contentChecksumFlag)
|
1460
|
-
XXH32_update(&(dctx->xxh), dstPtr, decodedSize);
|
1847
|
+
dict, (int)dictSize);
|
1848
|
+
RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
|
1849
|
+
if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum))
|
1850
|
+
XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
|
1461
1851
|
if (dctx->frameInfo.contentSize)
|
1462
|
-
dctx->frameRemainingSize -= decodedSize;
|
1852
|
+
dctx->frameRemainingSize -= (size_t)decodedSize;
|
1463
1853
|
|
1464
1854
|
/* dictionary management */
|
1465
|
-
if (dctx->frameInfo.blockMode==LZ4F_blockLinked)
|
1466
|
-
LZ4F_updateDict(dctx, dstPtr, decodedSize, dstStart, 0);
|
1855
|
+
if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
|
1856
|
+
LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
|
1857
|
+
}
|
1467
1858
|
|
1468
1859
|
dstPtr += decodedSize;
|
1469
|
-
dctx->dStage = dstage_getBlockHeader;
|
1860
|
+
dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */
|
1470
1861
|
break;
|
1471
1862
|
}
|
1472
1863
|
|
1473
|
-
case dstage_decodeCBlock_intoTmp:
|
1474
1864
|
/* not enough place into dst : decode into tmpOut */
|
1475
|
-
|
1865
|
+
|
1866
|
+
/* manage dictionary */
|
1476
1867
|
if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
|
1477
1868
|
if (dctx->dict == dctx->tmpOutBuffer) {
|
1869
|
+
/* truncate dictionary to 64 KB if too big */
|
1478
1870
|
if (dctx->dictSize > 128 KB) {
|
1479
1871
|
memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB);
|
1480
1872
|
dctx->dictSize = 64 KB;
|
1481
1873
|
}
|
1482
1874
|
dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize;
|
1483
|
-
} else { /* dict not within
|
1875
|
+
} else { /* dict not within tmpOut */
|
1484
1876
|
size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB);
|
1485
1877
|
dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace;
|
1486
|
-
|
1487
|
-
}
|
1878
|
+
} }
|
1488
1879
|
|
1489
|
-
/* Decode block */
|
1490
|
-
{
|
1880
|
+
/* Decode block into tmpOut */
|
1881
|
+
{ const char* dict = (const char*)dctx->dict;
|
1882
|
+
size_t dictSize = dctx->dictSize;
|
1883
|
+
int decodedSize;
|
1884
|
+
if (dict && dictSize > 1 GB) {
|
1885
|
+
/* the dictSize param is an int, avoid truncation / sign issues */
|
1886
|
+
dict += dictSize - 64 KB;
|
1887
|
+
dictSize = 64 KB;
|
1888
|
+
}
|
1889
|
+
decodedSize = LZ4_decompress_safe_usingDict(
|
1491
1890
|
(const char*)selectedIn, (char*)dctx->tmpOut,
|
1492
1891
|
(int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
|
1493
|
-
|
1494
|
-
|
1495
|
-
|
1496
|
-
|
1497
|
-
XXH32_update(&(dctx->xxh), dctx->tmpOut, decodedSize);
|
1892
|
+
dict, (int)dictSize);
|
1893
|
+
RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
|
1894
|
+
if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum)
|
1895
|
+
XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize);
|
1498
1896
|
if (dctx->frameInfo.contentSize)
|
1499
|
-
dctx->frameRemainingSize -= decodedSize;
|
1500
|
-
dctx->tmpOutSize = decodedSize;
|
1897
|
+
dctx->frameRemainingSize -= (size_t)decodedSize;
|
1898
|
+
dctx->tmpOutSize = (size_t)decodedSize;
|
1501
1899
|
dctx->tmpOutStart = 0;
|
1502
1900
|
dctx->dStage = dstage_flushOut;
|
1503
1901
|
}
|
1504
1902
|
/* fall-through */
|
1505
1903
|
|
1506
1904
|
case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
|
1507
|
-
|
1905
|
+
DEBUGLOG(6, "dstage_flushOut");
|
1906
|
+
if (dstPtr != NULL) {
|
1907
|
+
size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
|
1508
1908
|
memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
|
1509
1909
|
|
1510
1910
|
/* dictionary management */
|
1511
|
-
if (dctx->frameInfo.blockMode==LZ4F_blockLinked)
|
1512
|
-
LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1);
|
1911
|
+
if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
|
1912
|
+
LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/);
|
1513
1913
|
|
1514
1914
|
dctx->tmpOutStart += sizeToCopy;
|
1515
1915
|
dstPtr += sizeToCopy;
|
1516
|
-
|
1517
|
-
|
1518
|
-
|
1519
|
-
break;
|
1520
|
-
}
|
1521
|
-
nextSrcSizeHint = BHSize;
|
1522
|
-
doAnotherStage = 0; /* still some data to flush */
|
1916
|
+
}
|
1917
|
+
if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
|
1918
|
+
dctx->dStage = dstage_getBlockHeader; /* get next block */
|
1523
1919
|
break;
|
1524
1920
|
}
|
1921
|
+
/* could not flush everything : stop there, just request a block header */
|
1922
|
+
doAnotherStage = 0;
|
1923
|
+
nextSrcSizeHint = BHSize;
|
1924
|
+
break;
|
1525
1925
|
|
1526
1926
|
case dstage_getSuffix:
|
1527
|
-
|
1528
|
-
return err0r(LZ4F_ERROR_frameSize_wrong); /* incorrect frame size decoded */
|
1927
|
+
RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */
|
1529
1928
|
if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
|
1530
1929
|
nextSrcSizeHint = 0;
|
1531
1930
|
LZ4F_resetDecompressionContext(dctx);
|
@@ -1556,16 +1955,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1556
1955
|
selectedIn = dctx->tmpIn;
|
1557
1956
|
} /* if (dctx->dStage == dstage_storeSuffix) */
|
1558
1957
|
|
1559
|
-
/* case dstage_checkSuffix: */ /* no direct
|
1560
|
-
|
1958
|
+
/* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
|
1959
|
+
if (!dctx->skipChecksum) {
|
1960
|
+
U32 const readCRC = LZ4F_readLE32(selectedIn);
|
1561
1961
|
U32 const resultCRC = XXH32_digest(&(dctx->xxh));
|
1562
|
-
|
1563
|
-
|
1564
|
-
|
1565
|
-
|
1566
|
-
|
1567
|
-
|
1962
|
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
1963
|
+
RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid);
|
1964
|
+
#else
|
1965
|
+
(void)readCRC;
|
1966
|
+
(void)resultCRC;
|
1967
|
+
#endif
|
1568
1968
|
}
|
1969
|
+
nextSrcSizeHint = 0;
|
1970
|
+
LZ4F_resetDecompressionContext(dctx);
|
1971
|
+
doAnotherStage = 0;
|
1972
|
+
break;
|
1569
1973
|
|
1570
1974
|
case dstage_getSFrameSize:
|
1571
1975
|
if ((srcEnd - srcPtr) >= 4) {
|
@@ -1580,8 +1984,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1580
1984
|
|
1581
1985
|
if (dctx->dStage == dstage_storeSFrameSize)
|
1582
1986
|
case dstage_storeSFrameSize:
|
1583
|
-
{
|
1584
|
-
size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,
|
1987
|
+
{ size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,
|
1585
1988
|
(size_t)(srcEnd - srcPtr) );
|
1586
1989
|
memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
|
1587
1990
|
srcPtr += sizeToCopy;
|
@@ -1595,7 +1998,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1595
1998
|
selectedIn = dctx->header + 4;
|
1596
1999
|
} /* if (dctx->dStage == dstage_storeSFrameSize) */
|
1597
2000
|
|
1598
|
-
/* case dstage_decodeSFrameSize: */ /* no direct
|
2001
|
+
/* case dstage_decodeSFrameSize: */ /* no direct entry */
|
1599
2002
|
{ size_t const SFrameSize = LZ4F_readLE32(selectedIn);
|
1600
2003
|
dctx->frameInfo.contentSize = SFrameSize;
|
1601
2004
|
dctx->tmpInTarget = SFrameSize;
|
@@ -1614,25 +2017,26 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1614
2017
|
LZ4F_resetDecompressionContext(dctx);
|
1615
2018
|
break;
|
1616
2019
|
}
|
1617
|
-
}
|
2020
|
+
} /* switch (dctx->dStage) */
|
1618
2021
|
} /* while (doAnotherStage) */
|
1619
2022
|
|
1620
|
-
/* preserve history within
|
2023
|
+
/* preserve history within tmpOut whenever necessary */
|
1621
2024
|
LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
|
1622
2025
|
if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
|
1623
2026
|
&& (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
|
2027
|
+
&& (dctx->dict != NULL) /* dictionary exists */
|
1624
2028
|
&& (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
|
1625
2029
|
&& ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
|
1626
2030
|
{
|
1627
2031
|
if (dctx->dStage == dstage_flushOut) {
|
1628
|
-
size_t const preserveSize = dctx->tmpOut - dctx->tmpOutBuffer;
|
2032
|
+
size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
|
1629
2033
|
size_t copySize = 64 KB - dctx->tmpOutSize;
|
1630
2034
|
const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
|
1631
2035
|
if (dctx->tmpOutSize > 64 KB) copySize = 0;
|
1632
2036
|
if (copySize > preserveSize) copySize = preserveSize;
|
2037
|
+
assert(dctx->tmpOutBuffer != NULL);
|
1633
2038
|
|
1634
|
-
|
1635
|
-
memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
|
2039
|
+
memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
|
1636
2040
|
|
1637
2041
|
dctx->dict = dctx->tmpOutBuffer;
|
1638
2042
|
dctx->dictSize = preserveSize + dctx->tmpOutStart;
|
@@ -1640,8 +2044,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1640
2044
|
const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
|
1641
2045
|
size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
|
1642
2046
|
|
1643
|
-
|
1644
|
-
memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
|
2047
|
+
memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
|
1645
2048
|
|
1646
2049
|
dctx->dict = dctx->tmpOutBuffer;
|
1647
2050
|
dctx->dictSize = newDictSize;
|
@@ -1649,8 +2052,8 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|
1649
2052
|
}
|
1650
2053
|
}
|
1651
2054
|
|
1652
|
-
*srcSizePtr = (srcPtr - srcStart);
|
1653
|
-
*dstSizePtr = (dstPtr - dstStart);
|
2055
|
+
*srcSizePtr = (size_t)(srcPtr - srcStart);
|
2056
|
+
*dstSizePtr = (size_t)(dstPtr - dstStart);
|
1654
2057
|
return nextSrcSizeHint;
|
1655
2058
|
}
|
1656
2059
|
|