zstd 1.1.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +15 -0
- data/.travis.yml +5 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +21 -0
- data/README.md +64 -0
- data/Rakefile +19 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/exe/zstd +3 -0
- data/ext/zstd/extconf.rb +20 -0
- data/ext/zstd/libzstd/.gitignore +2 -0
- data/ext/zstd/libzstd/LICENSE +1262 -0
- data/ext/zstd/libzstd/Makefile +133 -0
- data/ext/zstd/libzstd/PATENTS +1272 -0
- data/ext/zstd/libzstd/README.md +77 -0
- data/ext/zstd/libzstd/common/bitstream.h +414 -0
- data/ext/zstd/libzstd/common/entropy_common.c +227 -0
- data/ext/zstd/libzstd/common/error_private.c +43 -0
- data/ext/zstd/libzstd/common/error_private.h +76 -0
- data/ext/zstd/libzstd/common/fse.h +668 -0
- data/ext/zstd/libzstd/common/fse_decompress.c +329 -0
- data/ext/zstd/libzstd/common/huf.h +238 -0
- data/ext/zstd/libzstd/common/mem.h +372 -0
- data/ext/zstd/libzstd/common/xxhash.c +867 -0
- data/ext/zstd/libzstd/common/xxhash.h +309 -0
- data/ext/zstd/libzstd/common/zstd_common.c +77 -0
- data/ext/zstd/libzstd/common/zstd_errors.h +60 -0
- data/ext/zstd/libzstd/common/zstd_internal.h +270 -0
- data/ext/zstd/libzstd/compress/fse_compress.c +850 -0
- data/ext/zstd/libzstd/compress/huf_compress.c +609 -0
- data/ext/zstd/libzstd/compress/zstd_compress.c +3291 -0
- data/ext/zstd/libzstd/compress/zstd_opt.h +919 -0
- data/ext/zstd/libzstd/decompress/huf_decompress.c +885 -0
- data/ext/zstd/libzstd/decompress/zstd_decompress.c +2154 -0
- data/ext/zstd/libzstd/deprecated/zbuff.h +210 -0
- data/ext/zstd/libzstd/deprecated/zbuff_compress.c +145 -0
- data/ext/zstd/libzstd/deprecated/zbuff_decompress.c +74 -0
- data/ext/zstd/libzstd/dictBuilder/divsufsort.c +1913 -0
- data/ext/zstd/libzstd/dictBuilder/divsufsort.h +67 -0
- data/ext/zstd/libzstd/dictBuilder/zdict.c +1012 -0
- data/ext/zstd/libzstd/dictBuilder/zdict.h +111 -0
- data/ext/zstd/libzstd/dll/example/Makefile +47 -0
- data/ext/zstd/libzstd/dll/example/README.md +69 -0
- data/ext/zstd/libzstd/dll/example/build_package.bat +17 -0
- data/ext/zstd/libzstd/dll/example/fullbench-dll.sln +25 -0
- data/ext/zstd/libzstd/dll/example/fullbench-dll.vcxproj +179 -0
- data/ext/zstd/libzstd/dll/libzstd.def +86 -0
- data/ext/zstd/libzstd/legacy/zstd_legacy.h +259 -0
- data/ext/zstd/libzstd/legacy/zstd_v01.c +2095 -0
- data/ext/zstd/libzstd/legacy/zstd_v01.h +80 -0
- data/ext/zstd/libzstd/legacy/zstd_v02.c +3518 -0
- data/ext/zstd/libzstd/legacy/zstd_v02.h +79 -0
- data/ext/zstd/libzstd/legacy/zstd_v03.c +3159 -0
- data/ext/zstd/libzstd/legacy/zstd_v03.h +79 -0
- data/ext/zstd/libzstd/legacy/zstd_v04.c +3795 -0
- data/ext/zstd/libzstd/legacy/zstd_v04.h +128 -0
- data/ext/zstd/libzstd/legacy/zstd_v05.c +4056 -0
- data/ext/zstd/libzstd/legacy/zstd_v05.h +149 -0
- data/ext/zstd/libzstd/legacy/zstd_v06.c +4167 -0
- data/ext/zstd/libzstd/legacy/zstd_v06.h +159 -0
- data/ext/zstd/libzstd/legacy/zstd_v07.c +4540 -0
- data/ext/zstd/libzstd/legacy/zstd_v07.h +173 -0
- data/ext/zstd/libzstd/libzstd.pc.in +14 -0
- data/ext/zstd/libzstd/zstd.h +673 -0
- data/ext/zstd/zstd.c +185 -0
- data/ext/zstd/zstd.h +7 -0
- data/lib/zstd/version.rb +3 -0
- data/lib/zstd.rb +6 -0
- data/zstd.gemspec +38 -0
- metadata +172 -0
@@ -0,0 +1,2095 @@
|
|
1
|
+
/**
|
2
|
+
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* This source code is licensed under the BSD-style license found in the
|
6
|
+
* LICENSE file in the root directory of this source tree. An additional grant
|
7
|
+
* of patent rights can be found in the PATENTS file in the same directory.
|
8
|
+
*/
|
9
|
+
|
10
|
+
|
11
|
+
/******************************************
|
12
|
+
* Includes
|
13
|
+
******************************************/
|
14
|
+
#include <stddef.h> /* size_t, ptrdiff_t */
|
15
|
+
#include "zstd_v01.h"
|
16
|
+
#include "error_private.h"
|
17
|
+
|
18
|
+
|
19
|
+
/******************************************
|
20
|
+
* Static allocation
|
21
|
+
******************************************/
|
22
|
+
/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
|
23
|
+
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
|
24
|
+
|
25
|
+
/* You can statically allocate Huff0 DTable as a table of unsigned short using below macro */
|
26
|
+
#define HUF_DTABLE_SIZE_U16(maxTableLog) (1 + (1<<maxTableLog))
|
27
|
+
#define HUF_CREATE_STATIC_DTABLE(DTable, maxTableLog) \
|
28
|
+
unsigned short DTable[HUF_DTABLE_SIZE_U16(maxTableLog)] = { maxTableLog }
|
29
|
+
|
30
|
+
|
31
|
+
/******************************************
|
32
|
+
* Error Management
|
33
|
+
******************************************/
|
34
|
+
#define FSE_LIST_ERRORS(ITEM) \
|
35
|
+
ITEM(FSE_OK_NoError) ITEM(FSE_ERROR_GENERIC) \
|
36
|
+
ITEM(FSE_ERROR_tableLog_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooSmall) \
|
37
|
+
ITEM(FSE_ERROR_dstSize_tooSmall) ITEM(FSE_ERROR_srcSize_wrong)\
|
38
|
+
ITEM(FSE_ERROR_corruptionDetected) \
|
39
|
+
ITEM(FSE_ERROR_maxCode)
|
40
|
+
|
41
|
+
#define FSE_GENERATE_ENUM(ENUM) ENUM,
|
42
|
+
typedef enum { FSE_LIST_ERRORS(FSE_GENERATE_ENUM) } FSE_errorCodes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
|
43
|
+
|
44
|
+
|
45
|
+
/******************************************
|
46
|
+
* FSE symbol compression API
|
47
|
+
******************************************/
|
48
|
+
/*
|
49
|
+
This API consists of small unitary functions, which highly benefit from being inlined.
|
50
|
+
You will want to enable link-time-optimization to ensure these functions are properly inlined in your binary.
|
51
|
+
Visual seems to do it automatically.
|
52
|
+
For gcc or clang, you'll need to add -flto flag at compilation and linking stages.
|
53
|
+
If none of these solutions is applicable, include "fse.c" directly.
|
54
|
+
*/
|
55
|
+
|
56
|
+
typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */
|
57
|
+
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
|
58
|
+
|
59
|
+
typedef struct
|
60
|
+
{
|
61
|
+
size_t bitContainer;
|
62
|
+
int bitPos;
|
63
|
+
char* startPtr;
|
64
|
+
char* ptr;
|
65
|
+
char* endPtr;
|
66
|
+
} FSE_CStream_t;
|
67
|
+
|
68
|
+
typedef struct
|
69
|
+
{
|
70
|
+
ptrdiff_t value;
|
71
|
+
const void* stateTable;
|
72
|
+
const void* symbolTT;
|
73
|
+
unsigned stateLog;
|
74
|
+
} FSE_CState_t;
|
75
|
+
|
76
|
+
typedef struct
|
77
|
+
{
|
78
|
+
size_t bitContainer;
|
79
|
+
unsigned bitsConsumed;
|
80
|
+
const char* ptr;
|
81
|
+
const char* start;
|
82
|
+
} FSE_DStream_t;
|
83
|
+
|
84
|
+
typedef struct
|
85
|
+
{
|
86
|
+
size_t state;
|
87
|
+
const void* table; /* precise table may vary, depending on U16 */
|
88
|
+
} FSE_DState_t;
|
89
|
+
|
90
|
+
typedef enum { FSE_DStream_unfinished = 0,
|
91
|
+
FSE_DStream_endOfBuffer = 1,
|
92
|
+
FSE_DStream_completed = 2,
|
93
|
+
FSE_DStream_tooFar = 3 } FSE_DStream_status; /* result of FSE_reloadDStream() */
|
94
|
+
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... ?! */
|
95
|
+
|
96
|
+
|
97
|
+
/****************************************************************
|
98
|
+
* Tuning parameters
|
99
|
+
****************************************************************/
|
100
|
+
/* MEMORY_USAGE :
|
101
|
+
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
102
|
+
* Increasing memory usage improves compression ratio
|
103
|
+
* Reduced memory usage can improve speed, due to cache effect
|
104
|
+
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
|
105
|
+
#define FSE_MAX_MEMORY_USAGE 14
|
106
|
+
#define FSE_DEFAULT_MEMORY_USAGE 13
|
107
|
+
|
108
|
+
/* FSE_MAX_SYMBOL_VALUE :
|
109
|
+
* Maximum symbol value authorized.
|
110
|
+
* Required for proper stack allocation */
|
111
|
+
#define FSE_MAX_SYMBOL_VALUE 255
|
112
|
+
|
113
|
+
|
114
|
+
/****************************************************************
|
115
|
+
* template functions type & suffix
|
116
|
+
****************************************************************/
|
117
|
+
#define FSE_FUNCTION_TYPE BYTE
|
118
|
+
#define FSE_FUNCTION_EXTENSION
|
119
|
+
|
120
|
+
|
121
|
+
/****************************************************************
|
122
|
+
* Byte symbol type
|
123
|
+
****************************************************************/
|
124
|
+
typedef struct
|
125
|
+
{
|
126
|
+
unsigned short newState;
|
127
|
+
unsigned char symbol;
|
128
|
+
unsigned char nbBits;
|
129
|
+
} FSE_decode_t; /* size == U32 */
|
130
|
+
|
131
|
+
|
132
|
+
|
133
|
+
/****************************************************************
|
134
|
+
* Compiler specifics
|
135
|
+
****************************************************************/
|
136
|
+
#ifdef _MSC_VER /* Visual Studio */
|
137
|
+
# define FORCE_INLINE static __forceinline
|
138
|
+
# include <intrin.h> /* For Visual 2005 */
|
139
|
+
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
140
|
+
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
|
141
|
+
#else
|
142
|
+
# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
143
|
+
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
144
|
+
# ifdef __GNUC__
|
145
|
+
# define FORCE_INLINE static inline __attribute__((always_inline))
|
146
|
+
# else
|
147
|
+
# define FORCE_INLINE static inline
|
148
|
+
# endif
|
149
|
+
# else
|
150
|
+
# define FORCE_INLINE static
|
151
|
+
# endif /* __STDC_VERSION__ */
|
152
|
+
#endif
|
153
|
+
|
154
|
+
|
155
|
+
/****************************************************************
|
156
|
+
* Includes
|
157
|
+
****************************************************************/
|
158
|
+
#include <stdlib.h> /* malloc, free, qsort */
|
159
|
+
#include <string.h> /* memcpy, memset */
|
160
|
+
#include <stdio.h> /* printf (debug) */
|
161
|
+
|
162
|
+
|
163
|
+
#ifndef MEM_ACCESS_MODULE
|
164
|
+
#define MEM_ACCESS_MODULE
|
165
|
+
/****************************************************************
|
166
|
+
* Basic Types
|
167
|
+
*****************************************************************/
|
168
|
+
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
169
|
+
# include <stdint.h>
|
170
|
+
typedef uint8_t BYTE;
|
171
|
+
typedef uint16_t U16;
|
172
|
+
typedef int16_t S16;
|
173
|
+
typedef uint32_t U32;
|
174
|
+
typedef int32_t S32;
|
175
|
+
typedef uint64_t U64;
|
176
|
+
typedef int64_t S64;
|
177
|
+
#else
|
178
|
+
typedef unsigned char BYTE;
|
179
|
+
typedef unsigned short U16;
|
180
|
+
typedef signed short S16;
|
181
|
+
typedef unsigned int U32;
|
182
|
+
typedef signed int S32;
|
183
|
+
typedef unsigned long long U64;
|
184
|
+
typedef signed long long S64;
|
185
|
+
#endif
|
186
|
+
|
187
|
+
#endif /* MEM_ACCESS_MODULE */
|
188
|
+
|
189
|
+
/****************************************************************
|
190
|
+
* Memory I/O
|
191
|
+
*****************************************************************/
|
192
|
+
/* FSE_FORCE_MEMORY_ACCESS
|
193
|
+
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
194
|
+
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
195
|
+
* The below switch allow to select different access method for improved performance.
|
196
|
+
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
197
|
+
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
|
198
|
+
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
199
|
+
* Method 2 : direct access. This method is portable but violate C standard.
|
200
|
+
* It can generate buggy code on targets generating assembly depending on alignment.
|
201
|
+
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
|
202
|
+
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
|
203
|
+
* Prefer these methods in priority order (0 > 1 > 2)
|
204
|
+
*/
|
205
|
+
#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
206
|
+
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
207
|
+
# define FSE_FORCE_MEMORY_ACCESS 2
|
208
|
+
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
|
209
|
+
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
|
210
|
+
# define FSE_FORCE_MEMORY_ACCESS 1
|
211
|
+
# endif
|
212
|
+
#endif
|
213
|
+
|
214
|
+
|
215
|
+
static unsigned FSE_32bits(void)
|
216
|
+
{
|
217
|
+
return sizeof(void*)==4;
|
218
|
+
}
|
219
|
+
|
220
|
+
static unsigned FSE_isLittleEndian(void)
|
221
|
+
{
|
222
|
+
const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
223
|
+
return one.c[0];
|
224
|
+
}
|
225
|
+
|
226
|
+
#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2)
|
227
|
+
|
228
|
+
static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; }
|
229
|
+
static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
230
|
+
static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
231
|
+
|
232
|
+
#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1)
|
233
|
+
|
234
|
+
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
235
|
+
/* currently only defined for gcc and icc */
|
236
|
+
typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
|
237
|
+
|
238
|
+
static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
|
239
|
+
static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
240
|
+
static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
|
241
|
+
|
242
|
+
#else
|
243
|
+
|
244
|
+
static U16 FSE_read16(const void* memPtr)
|
245
|
+
{
|
246
|
+
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
247
|
+
}
|
248
|
+
|
249
|
+
static U32 FSE_read32(const void* memPtr)
|
250
|
+
{
|
251
|
+
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
252
|
+
}
|
253
|
+
|
254
|
+
static U64 FSE_read64(const void* memPtr)
|
255
|
+
{
|
256
|
+
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
257
|
+
}
|
258
|
+
|
259
|
+
#endif // FSE_FORCE_MEMORY_ACCESS
|
260
|
+
|
261
|
+
static U16 FSE_readLE16(const void* memPtr)
|
262
|
+
{
|
263
|
+
if (FSE_isLittleEndian())
|
264
|
+
return FSE_read16(memPtr);
|
265
|
+
else
|
266
|
+
{
|
267
|
+
const BYTE* p = (const BYTE*)memPtr;
|
268
|
+
return (U16)(p[0] + (p[1]<<8));
|
269
|
+
}
|
270
|
+
}
|
271
|
+
|
272
|
+
static U32 FSE_readLE32(const void* memPtr)
|
273
|
+
{
|
274
|
+
if (FSE_isLittleEndian())
|
275
|
+
return FSE_read32(memPtr);
|
276
|
+
else
|
277
|
+
{
|
278
|
+
const BYTE* p = (const BYTE*)memPtr;
|
279
|
+
return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
|
280
|
+
}
|
281
|
+
}
|
282
|
+
|
283
|
+
|
284
|
+
static U64 FSE_readLE64(const void* memPtr)
|
285
|
+
{
|
286
|
+
if (FSE_isLittleEndian())
|
287
|
+
return FSE_read64(memPtr);
|
288
|
+
else
|
289
|
+
{
|
290
|
+
const BYTE* p = (const BYTE*)memPtr;
|
291
|
+
return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
|
292
|
+
+ ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
|
293
|
+
}
|
294
|
+
}
|
295
|
+
|
296
|
+
static size_t FSE_readLEST(const void* memPtr)
|
297
|
+
{
|
298
|
+
if (FSE_32bits())
|
299
|
+
return (size_t)FSE_readLE32(memPtr);
|
300
|
+
else
|
301
|
+
return (size_t)FSE_readLE64(memPtr);
|
302
|
+
}
|
303
|
+
|
304
|
+
|
305
|
+
|
306
|
+
/****************************************************************
|
307
|
+
* Constants
|
308
|
+
*****************************************************************/
|
309
|
+
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
|
310
|
+
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
|
311
|
+
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
|
312
|
+
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
|
313
|
+
#define FSE_MIN_TABLELOG 5
|
314
|
+
|
315
|
+
#define FSE_TABLELOG_ABSOLUTE_MAX 15
|
316
|
+
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
|
317
|
+
#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
|
318
|
+
#endif
|
319
|
+
|
320
|
+
|
321
|
+
/****************************************************************
|
322
|
+
* Error Management
|
323
|
+
****************************************************************/
|
324
|
+
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
325
|
+
|
326
|
+
|
327
|
+
/****************************************************************
|
328
|
+
* Complex types
|
329
|
+
****************************************************************/
|
330
|
+
typedef struct
|
331
|
+
{
|
332
|
+
int deltaFindState;
|
333
|
+
U32 deltaNbBits;
|
334
|
+
} FSE_symbolCompressionTransform; /* total 8 bytes */
|
335
|
+
|
336
|
+
typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
|
337
|
+
|
338
|
+
/****************************************************************
|
339
|
+
* Internal functions
|
340
|
+
****************************************************************/
|
341
|
+
FORCE_INLINE unsigned FSE_highbit32 (register U32 val)
|
342
|
+
{
|
343
|
+
# if defined(_MSC_VER) /* Visual */
|
344
|
+
unsigned long r;
|
345
|
+
_BitScanReverse ( &r, val );
|
346
|
+
return (unsigned) r;
|
347
|
+
# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */
|
348
|
+
return 31 - __builtin_clz (val);
|
349
|
+
# else /* Software version */
|
350
|
+
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
351
|
+
U32 v = val;
|
352
|
+
unsigned r;
|
353
|
+
v |= v >> 1;
|
354
|
+
v |= v >> 2;
|
355
|
+
v |= v >> 4;
|
356
|
+
v |= v >> 8;
|
357
|
+
v |= v >> 16;
|
358
|
+
r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
|
359
|
+
return r;
|
360
|
+
# endif
|
361
|
+
}
|
362
|
+
|
363
|
+
|
364
|
+
/****************************************************************
|
365
|
+
* Templates
|
366
|
+
****************************************************************/
|
367
|
+
/*
|
368
|
+
designed to be included
|
369
|
+
for type-specific functions (template emulation in C)
|
370
|
+
Objective is to write these functions only once, for improved maintenance
|
371
|
+
*/
|
372
|
+
|
373
|
+
/* safety checks */
|
374
|
+
#ifndef FSE_FUNCTION_EXTENSION
|
375
|
+
# error "FSE_FUNCTION_EXTENSION must be defined"
|
376
|
+
#endif
|
377
|
+
#ifndef FSE_FUNCTION_TYPE
|
378
|
+
# error "FSE_FUNCTION_TYPE must be defined"
|
379
|
+
#endif
|
380
|
+
|
381
|
+
/* Function names */
|
382
|
+
#define FSE_CAT(X,Y) X##Y
|
383
|
+
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
384
|
+
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
385
|
+
|
386
|
+
|
387
|
+
|
388
|
+
static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
|
389
|
+
|
390
|
+
#define FSE_DECODE_TYPE FSE_decode_t
|
391
|
+
|
392
|
+
|
393
|
+
typedef struct {
|
394
|
+
U16 tableLog;
|
395
|
+
U16 fastMode;
|
396
|
+
} FSE_DTableHeader; /* sizeof U32 */
|
397
|
+
|
398
|
+
static size_t FSE_buildDTable
|
399
|
+
(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
400
|
+
{
|
401
|
+
void* ptr = dt;
|
402
|
+
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
|
403
|
+
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1; /* because dt is unsigned, 32-bits aligned on 32-bits */
|
404
|
+
const U32 tableSize = 1 << tableLog;
|
405
|
+
const U32 tableMask = tableSize-1;
|
406
|
+
const U32 step = FSE_tableStep(tableSize);
|
407
|
+
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
|
408
|
+
U32 position = 0;
|
409
|
+
U32 highThreshold = tableSize-1;
|
410
|
+
const S16 largeLimit= (S16)(1 << (tableLog-1));
|
411
|
+
U32 noLarge = 1;
|
412
|
+
U32 s;
|
413
|
+
|
414
|
+
/* Sanity Checks */
|
415
|
+
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge;
|
416
|
+
if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge;
|
417
|
+
|
418
|
+
/* Init, lay down lowprob symbols */
|
419
|
+
DTableH[0].tableLog = (U16)tableLog;
|
420
|
+
for (s=0; s<=maxSymbolValue; s++)
|
421
|
+
{
|
422
|
+
if (normalizedCounter[s]==-1)
|
423
|
+
{
|
424
|
+
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
|
425
|
+
symbolNext[s] = 1;
|
426
|
+
}
|
427
|
+
else
|
428
|
+
{
|
429
|
+
if (normalizedCounter[s] >= largeLimit) noLarge=0;
|
430
|
+
symbolNext[s] = normalizedCounter[s];
|
431
|
+
}
|
432
|
+
}
|
433
|
+
|
434
|
+
/* Spread symbols */
|
435
|
+
for (s=0; s<=maxSymbolValue; s++)
|
436
|
+
{
|
437
|
+
int i;
|
438
|
+
for (i=0; i<normalizedCounter[s]; i++)
|
439
|
+
{
|
440
|
+
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
|
441
|
+
position = (position + step) & tableMask;
|
442
|
+
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
|
443
|
+
}
|
444
|
+
}
|
445
|
+
|
446
|
+
if (position!=0) return (size_t)-FSE_ERROR_GENERIC; /* position must reach all cells once, otherwise normalizedCounter is incorrect */
|
447
|
+
|
448
|
+
/* Build Decoding table */
|
449
|
+
{
|
450
|
+
U32 i;
|
451
|
+
for (i=0; i<tableSize; i++)
|
452
|
+
{
|
453
|
+
FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
|
454
|
+
U16 nextState = symbolNext[symbol]++;
|
455
|
+
tableDecode[i].nbBits = (BYTE) (tableLog - FSE_highbit32 ((U32)nextState) );
|
456
|
+
tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
|
457
|
+
}
|
458
|
+
}
|
459
|
+
|
460
|
+
DTableH->fastMode = (U16)noLarge;
|
461
|
+
return 0;
|
462
|
+
}
|
463
|
+
|
464
|
+
|
465
|
+
/******************************************
|
466
|
+
* FSE byte symbol
|
467
|
+
******************************************/
|
468
|
+
#ifndef FSE_COMMONDEFS_ONLY
|
469
|
+
|
470
|
+
static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); }
|
471
|
+
|
472
|
+
static short FSE_abs(short a)
|
473
|
+
{
|
474
|
+
return a<0? -a : a;
|
475
|
+
}
|
476
|
+
|
477
|
+
|
478
|
+
/****************************************************************
|
479
|
+
* Header bitstream management
|
480
|
+
****************************************************************/
|
481
|
+
static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
482
|
+
const void* headerBuffer, size_t hbSize)
|
483
|
+
{
|
484
|
+
const BYTE* const istart = (const BYTE*) headerBuffer;
|
485
|
+
const BYTE* const iend = istart + hbSize;
|
486
|
+
const BYTE* ip = istart;
|
487
|
+
int nbBits;
|
488
|
+
int remaining;
|
489
|
+
int threshold;
|
490
|
+
U32 bitStream;
|
491
|
+
int bitCount;
|
492
|
+
unsigned charnum = 0;
|
493
|
+
int previous0 = 0;
|
494
|
+
|
495
|
+
if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong;
|
496
|
+
bitStream = FSE_readLE32(ip);
|
497
|
+
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
|
498
|
+
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge;
|
499
|
+
bitStream >>= 4;
|
500
|
+
bitCount = 4;
|
501
|
+
*tableLogPtr = nbBits;
|
502
|
+
remaining = (1<<nbBits)+1;
|
503
|
+
threshold = 1<<nbBits;
|
504
|
+
nbBits++;
|
505
|
+
|
506
|
+
while ((remaining>1) && (charnum<=*maxSVPtr))
|
507
|
+
{
|
508
|
+
if (previous0)
|
509
|
+
{
|
510
|
+
unsigned n0 = charnum;
|
511
|
+
while ((bitStream & 0xFFFF) == 0xFFFF)
|
512
|
+
{
|
513
|
+
n0+=24;
|
514
|
+
if (ip < iend-5)
|
515
|
+
{
|
516
|
+
ip+=2;
|
517
|
+
bitStream = FSE_readLE32(ip) >> bitCount;
|
518
|
+
}
|
519
|
+
else
|
520
|
+
{
|
521
|
+
bitStream >>= 16;
|
522
|
+
bitCount+=16;
|
523
|
+
}
|
524
|
+
}
|
525
|
+
while ((bitStream & 3) == 3)
|
526
|
+
{
|
527
|
+
n0+=3;
|
528
|
+
bitStream>>=2;
|
529
|
+
bitCount+=2;
|
530
|
+
}
|
531
|
+
n0 += bitStream & 3;
|
532
|
+
bitCount += 2;
|
533
|
+
if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall;
|
534
|
+
while (charnum < n0) normalizedCounter[charnum++] = 0;
|
535
|
+
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
|
536
|
+
{
|
537
|
+
ip += bitCount>>3;
|
538
|
+
bitCount &= 7;
|
539
|
+
bitStream = FSE_readLE32(ip) >> bitCount;
|
540
|
+
}
|
541
|
+
else
|
542
|
+
bitStream >>= 2;
|
543
|
+
}
|
544
|
+
{
|
545
|
+
const short max = (short)((2*threshold-1)-remaining);
|
546
|
+
short count;
|
547
|
+
|
548
|
+
if ((bitStream & (threshold-1)) < (U32)max)
|
549
|
+
{
|
550
|
+
count = (short)(bitStream & (threshold-1));
|
551
|
+
bitCount += nbBits-1;
|
552
|
+
}
|
553
|
+
else
|
554
|
+
{
|
555
|
+
count = (short)(bitStream & (2*threshold-1));
|
556
|
+
if (count >= threshold) count -= max;
|
557
|
+
bitCount += nbBits;
|
558
|
+
}
|
559
|
+
|
560
|
+
count--; /* extra accuracy */
|
561
|
+
remaining -= FSE_abs(count);
|
562
|
+
normalizedCounter[charnum++] = count;
|
563
|
+
previous0 = !count;
|
564
|
+
while (remaining < threshold)
|
565
|
+
{
|
566
|
+
nbBits--;
|
567
|
+
threshold >>= 1;
|
568
|
+
}
|
569
|
+
|
570
|
+
{
|
571
|
+
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
|
572
|
+
{
|
573
|
+
ip += bitCount>>3;
|
574
|
+
bitCount &= 7;
|
575
|
+
}
|
576
|
+
else
|
577
|
+
{
|
578
|
+
bitCount -= (int)(8 * (iend - 4 - ip));
|
579
|
+
ip = iend - 4;
|
580
|
+
}
|
581
|
+
bitStream = FSE_readLE32(ip) >> (bitCount & 31);
|
582
|
+
}
|
583
|
+
}
|
584
|
+
}
|
585
|
+
if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC;
|
586
|
+
*maxSVPtr = charnum-1;
|
587
|
+
|
588
|
+
ip += (bitCount+7)>>3;
|
589
|
+
if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong;
|
590
|
+
return ip-istart;
|
591
|
+
}
|
592
|
+
|
593
|
+
|
594
|
+
/*********************************************************
|
595
|
+
* Decompression (Byte symbols)
|
596
|
+
*********************************************************/
|
597
|
+
static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
|
598
|
+
{
|
599
|
+
void* ptr = dt;
|
600
|
+
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
|
601
|
+
FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
|
602
|
+
|
603
|
+
DTableH->tableLog = 0;
|
604
|
+
DTableH->fastMode = 0;
|
605
|
+
|
606
|
+
cell->newState = 0;
|
607
|
+
cell->symbol = symbolValue;
|
608
|
+
cell->nbBits = 0;
|
609
|
+
|
610
|
+
return 0;
|
611
|
+
}
|
612
|
+
|
613
|
+
|
614
|
+
static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
|
615
|
+
{
|
616
|
+
void* ptr = dt;
|
617
|
+
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
|
618
|
+
FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
|
619
|
+
const unsigned tableSize = 1 << nbBits;
|
620
|
+
const unsigned tableMask = tableSize - 1;
|
621
|
+
const unsigned maxSymbolValue = tableMask;
|
622
|
+
unsigned s;
|
623
|
+
|
624
|
+
/* Sanity checks */
|
625
|
+
if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC; /* min size */
|
626
|
+
|
627
|
+
/* Build Decoding Table */
|
628
|
+
DTableH->tableLog = (U16)nbBits;
|
629
|
+
DTableH->fastMode = 1;
|
630
|
+
for (s=0; s<=maxSymbolValue; s++)
|
631
|
+
{
|
632
|
+
dinfo[s].newState = 0;
|
633
|
+
dinfo[s].symbol = (BYTE)s;
|
634
|
+
dinfo[s].nbBits = (BYTE)nbBits;
|
635
|
+
}
|
636
|
+
|
637
|
+
return 0;
|
638
|
+
}
|
639
|
+
|
640
|
+
|
641
|
+
/* FSE_initDStream
|
642
|
+
* Initialize a FSE_DStream_t.
|
643
|
+
* srcBuffer must point at the beginning of an FSE block.
|
644
|
+
* The function result is the size of the FSE_block (== srcSize).
|
645
|
+
* If srcSize is too small, the function will return an errorCode;
|
646
|
+
*/
|
647
|
+
static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
|
648
|
+
{
|
649
|
+
if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong;
|
650
|
+
|
651
|
+
if (srcSize >= sizeof(size_t))
|
652
|
+
{
|
653
|
+
U32 contain32;
|
654
|
+
bitD->start = (const char*)srcBuffer;
|
655
|
+
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
|
656
|
+
bitD->bitContainer = FSE_readLEST(bitD->ptr);
|
657
|
+
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
|
658
|
+
if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */
|
659
|
+
bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
|
660
|
+
}
|
661
|
+
else
|
662
|
+
{
|
663
|
+
U32 contain32;
|
664
|
+
bitD->start = (const char*)srcBuffer;
|
665
|
+
bitD->ptr = bitD->start;
|
666
|
+
bitD->bitContainer = *(const BYTE*)(bitD->start);
|
667
|
+
switch(srcSize)
|
668
|
+
{
|
669
|
+
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
|
670
|
+
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
|
671
|
+
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
|
672
|
+
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
|
673
|
+
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
|
674
|
+
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
|
675
|
+
default:;
|
676
|
+
}
|
677
|
+
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
|
678
|
+
if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */
|
679
|
+
bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
|
680
|
+
bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
|
681
|
+
}
|
682
|
+
|
683
|
+
return srcSize;
|
684
|
+
}
|
685
|
+
|
686
|
+
|
687
|
+
/*!FSE_lookBits
|
688
|
+
* Provides next n bits from the bitContainer.
|
689
|
+
* bitContainer is not modified (bits are still present for next read/look)
|
690
|
+
* On 32-bits, maxNbBits==25
|
691
|
+
* On 64-bits, maxNbBits==57
|
692
|
+
* return : value extracted.
|
693
|
+
*/
|
694
|
+
static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits)
|
695
|
+
{
|
696
|
+
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
|
697
|
+
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
|
698
|
+
}
|
699
|
+
|
700
|
+
static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */
|
701
|
+
{
|
702
|
+
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
|
703
|
+
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
|
704
|
+
}
|
705
|
+
|
706
|
+
static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits)
|
707
|
+
{
|
708
|
+
bitD->bitsConsumed += nbBits;
|
709
|
+
}
|
710
|
+
|
711
|
+
|
712
|
+
/*!FSE_readBits
|
713
|
+
* Read next n bits from the bitContainer.
|
714
|
+
* On 32-bits, don't read more than maxNbBits==25
|
715
|
+
* On 64-bits, don't read more than maxNbBits==57
|
716
|
+
* Use the fast variant *only* if n >= 1.
|
717
|
+
* return : value extracted.
|
718
|
+
*/
|
719
|
+
static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits)
|
720
|
+
{
|
721
|
+
size_t value = FSE_lookBits(bitD, nbBits);
|
722
|
+
FSE_skipBits(bitD, nbBits);
|
723
|
+
return value;
|
724
|
+
}
|
725
|
+
|
726
|
+
static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */
|
727
|
+
{
|
728
|
+
size_t value = FSE_lookBitsFast(bitD, nbBits);
|
729
|
+
FSE_skipBits(bitD, nbBits);
|
730
|
+
return value;
|
731
|
+
}
|
732
|
+
|
733
|
+
static unsigned FSE_reloadDStream(FSE_DStream_t* bitD)
|
734
|
+
{
|
735
|
+
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
|
736
|
+
return FSE_DStream_tooFar;
|
737
|
+
|
738
|
+
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
|
739
|
+
{
|
740
|
+
bitD->ptr -= bitD->bitsConsumed >> 3;
|
741
|
+
bitD->bitsConsumed &= 7;
|
742
|
+
bitD->bitContainer = FSE_readLEST(bitD->ptr);
|
743
|
+
return FSE_DStream_unfinished;
|
744
|
+
}
|
745
|
+
if (bitD->ptr == bitD->start)
|
746
|
+
{
|
747
|
+
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer;
|
748
|
+
return FSE_DStream_completed;
|
749
|
+
}
|
750
|
+
{
|
751
|
+
U32 nbBytes = bitD->bitsConsumed >> 3;
|
752
|
+
U32 result = FSE_DStream_unfinished;
|
753
|
+
if (bitD->ptr - nbBytes < bitD->start)
|
754
|
+
{
|
755
|
+
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
|
756
|
+
result = FSE_DStream_endOfBuffer;
|
757
|
+
}
|
758
|
+
bitD->ptr -= nbBytes;
|
759
|
+
bitD->bitsConsumed -= nbBytes*8;
|
760
|
+
bitD->bitContainer = FSE_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
|
761
|
+
return result;
|
762
|
+
}
|
763
|
+
}
|
764
|
+
|
765
|
+
|
766
|
+
static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt)
|
767
|
+
{
|
768
|
+
const void* ptr = dt;
|
769
|
+
const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
|
770
|
+
DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog);
|
771
|
+
FSE_reloadDStream(bitD);
|
772
|
+
DStatePtr->table = dt + 1;
|
773
|
+
}
|
774
|
+
|
775
|
+
static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
|
776
|
+
{
|
777
|
+
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
778
|
+
const U32 nbBits = DInfo.nbBits;
|
779
|
+
BYTE symbol = DInfo.symbol;
|
780
|
+
size_t lowBits = FSE_readBits(bitD, nbBits);
|
781
|
+
|
782
|
+
DStatePtr->state = DInfo.newState + lowBits;
|
783
|
+
return symbol;
|
784
|
+
}
|
785
|
+
|
786
|
+
static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
|
787
|
+
{
|
788
|
+
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
789
|
+
const U32 nbBits = DInfo.nbBits;
|
790
|
+
BYTE symbol = DInfo.symbol;
|
791
|
+
size_t lowBits = FSE_readBitsFast(bitD, nbBits);
|
792
|
+
|
793
|
+
DStatePtr->state = DInfo.newState + lowBits;
|
794
|
+
return symbol;
|
795
|
+
}
|
796
|
+
|
797
|
+
/* FSE_endOfDStream
|
798
|
+
Tells if bitD has reached end of bitStream or not */
|
799
|
+
|
800
|
+
static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD)
|
801
|
+
{
|
802
|
+
return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8));
|
803
|
+
}
|
804
|
+
|
805
|
+
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
|
806
|
+
{
|
807
|
+
return DStatePtr->state == 0;
|
808
|
+
}
|
809
|
+
|
810
|
+
|
811
|
+
FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
|
812
|
+
void* dst, size_t maxDstSize,
|
813
|
+
const void* cSrc, size_t cSrcSize,
|
814
|
+
const FSE_DTable* dt, const unsigned fast)
|
815
|
+
{
|
816
|
+
BYTE* const ostart = (BYTE*) dst;
|
817
|
+
BYTE* op = ostart;
|
818
|
+
BYTE* const omax = op + maxDstSize;
|
819
|
+
BYTE* const olimit = omax-3;
|
820
|
+
|
821
|
+
FSE_DStream_t bitD;
|
822
|
+
FSE_DState_t state1;
|
823
|
+
FSE_DState_t state2;
|
824
|
+
size_t errorCode;
|
825
|
+
|
826
|
+
/* Init */
|
827
|
+
errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
|
828
|
+
if (FSE_isError(errorCode)) return errorCode;
|
829
|
+
|
830
|
+
FSE_initDState(&state1, &bitD, dt);
|
831
|
+
FSE_initDState(&state2, &bitD, dt);
|
832
|
+
|
833
|
+
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
|
834
|
+
|
835
|
+
/* 4 symbols per loop */
|
836
|
+
for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op<olimit) ; op+=4)
|
837
|
+
{
|
838
|
+
op[0] = FSE_GETSYMBOL(&state1);
|
839
|
+
|
840
|
+
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
841
|
+
FSE_reloadDStream(&bitD);
|
842
|
+
|
843
|
+
op[1] = FSE_GETSYMBOL(&state2);
|
844
|
+
|
845
|
+
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
846
|
+
{ if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } }
|
847
|
+
|
848
|
+
op[2] = FSE_GETSYMBOL(&state1);
|
849
|
+
|
850
|
+
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
851
|
+
FSE_reloadDStream(&bitD);
|
852
|
+
|
853
|
+
op[3] = FSE_GETSYMBOL(&state2);
|
854
|
+
}
|
855
|
+
|
856
|
+
/* tail */
|
857
|
+
/* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */
|
858
|
+
while (1)
|
859
|
+
{
|
860
|
+
if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
|
861
|
+
break;
|
862
|
+
|
863
|
+
*op++ = FSE_GETSYMBOL(&state1);
|
864
|
+
|
865
|
+
if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
|
866
|
+
break;
|
867
|
+
|
868
|
+
*op++ = FSE_GETSYMBOL(&state2);
|
869
|
+
}
|
870
|
+
|
871
|
+
/* end ? */
|
872
|
+
if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
|
873
|
+
return op-ostart;
|
874
|
+
|
875
|
+
if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */
|
876
|
+
|
877
|
+
return (size_t)-FSE_ERROR_corruptionDetected;
|
878
|
+
}
|
879
|
+
|
880
|
+
|
881
|
+
static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
|
882
|
+
const void* cSrc, size_t cSrcSize,
|
883
|
+
const FSE_DTable* dt)
|
884
|
+
{
|
885
|
+
FSE_DTableHeader DTableH;
|
886
|
+
memcpy(&DTableH, dt, sizeof(DTableH)); /* memcpy() into local variable, to avoid strict aliasing warning */
|
887
|
+
|
888
|
+
/* select fast mode (static) */
|
889
|
+
if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
|
890
|
+
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
|
891
|
+
}
|
892
|
+
|
893
|
+
|
894
|
+
static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
|
895
|
+
{
|
896
|
+
const BYTE* const istart = (const BYTE*)cSrc;
|
897
|
+
const BYTE* ip = istart;
|
898
|
+
short counting[FSE_MAX_SYMBOL_VALUE+1];
|
899
|
+
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
|
900
|
+
unsigned tableLog;
|
901
|
+
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
902
|
+
size_t errorCode;
|
903
|
+
|
904
|
+
if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */
|
905
|
+
|
906
|
+
/* normal FSE decoding mode */
|
907
|
+
errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
|
908
|
+
if (FSE_isError(errorCode)) return errorCode;
|
909
|
+
if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */
|
910
|
+
ip += errorCode;
|
911
|
+
cSrcSize -= errorCode;
|
912
|
+
|
913
|
+
errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
|
914
|
+
if (FSE_isError(errorCode)) return errorCode;
|
915
|
+
|
916
|
+
/* always return, even if it is an error code */
|
917
|
+
return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
|
918
|
+
}
|
919
|
+
|
920
|
+
|
921
|
+
|
922
|
+
/* *******************************************************
|
923
|
+
* Huff0 : Huffman block compression
|
924
|
+
*********************************************************/
|
925
|
+
#define HUF_MAX_SYMBOL_VALUE 255
|
926
|
+
#define HUF_DEFAULT_TABLELOG 12 /* used by default, when not specified */
|
927
|
+
#define HUF_MAX_TABLELOG 12 /* max possible tableLog; for allocation purpose; can be modified */
|
928
|
+
#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
929
|
+
#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
|
930
|
+
# error "HUF_MAX_TABLELOG is too large !"
|
931
|
+
#endif
|
932
|
+
|
933
|
+
typedef struct HUF_CElt_s {
|
934
|
+
U16 val;
|
935
|
+
BYTE nbBits;
|
936
|
+
} HUF_CElt ;
|
937
|
+
|
938
|
+
typedef struct nodeElt_s {
|
939
|
+
U32 count;
|
940
|
+
U16 parent;
|
941
|
+
BYTE byte;
|
942
|
+
BYTE nbBits;
|
943
|
+
} nodeElt;
|
944
|
+
|
945
|
+
|
946
|
+
/* *******************************************************
|
947
|
+
* Huff0 : Huffman block decompression
|
948
|
+
*********************************************************/
|
949
|
+
typedef struct {
|
950
|
+
BYTE byte;
|
951
|
+
BYTE nbBits;
|
952
|
+
} HUF_DElt;
|
953
|
+
|
954
|
+
static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize)
|
955
|
+
{
|
956
|
+
BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
|
957
|
+
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
|
958
|
+
U32 weightTotal;
|
959
|
+
U32 maxBits;
|
960
|
+
const BYTE* ip = (const BYTE*) src;
|
961
|
+
size_t iSize;
|
962
|
+
size_t oSize;
|
963
|
+
U32 n;
|
964
|
+
U32 nextRankStart;
|
965
|
+
void* ptr = DTable+1;
|
966
|
+
HUF_DElt* const dt = (HUF_DElt*)ptr;
|
967
|
+
|
968
|
+
if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
|
969
|
+
iSize = ip[0];
|
970
|
+
|
971
|
+
FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16)); /* if compilation fails here, assertion is false */
|
972
|
+
//memset(huffWeight, 0, sizeof(huffWeight)); /* should not be necessary, but some analyzer complain ... */
|
973
|
+
if (iSize >= 128) /* special header */
|
974
|
+
{
|
975
|
+
if (iSize >= (242)) /* RLE */
|
976
|
+
{
|
977
|
+
static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
|
978
|
+
oSize = l[iSize-242];
|
979
|
+
memset(huffWeight, 1, sizeof(huffWeight));
|
980
|
+
iSize = 0;
|
981
|
+
}
|
982
|
+
else /* Incompressible */
|
983
|
+
{
|
984
|
+
oSize = iSize - 127;
|
985
|
+
iSize = ((oSize+1)/2);
|
986
|
+
if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
|
987
|
+
ip += 1;
|
988
|
+
for (n=0; n<oSize; n+=2)
|
989
|
+
{
|
990
|
+
huffWeight[n] = ip[n/2] >> 4;
|
991
|
+
huffWeight[n+1] = ip[n/2] & 15;
|
992
|
+
}
|
993
|
+
}
|
994
|
+
}
|
995
|
+
else /* header compressed with FSE (normal case) */
|
996
|
+
{
|
997
|
+
if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
|
998
|
+
oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize); /* max 255 values decoded, last one is implied */
|
999
|
+
if (FSE_isError(oSize)) return oSize;
|
1000
|
+
}
|
1001
|
+
|
1002
|
+
/* collect weight stats */
|
1003
|
+
memset(rankVal, 0, sizeof(rankVal));
|
1004
|
+
weightTotal = 0;
|
1005
|
+
for (n=0; n<oSize; n++)
|
1006
|
+
{
|
1007
|
+
if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected;
|
1008
|
+
rankVal[huffWeight[n]]++;
|
1009
|
+
weightTotal += (1 << huffWeight[n]) >> 1;
|
1010
|
+
}
|
1011
|
+
if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected;
|
1012
|
+
|
1013
|
+
/* get last non-null symbol weight (implied, total must be 2^n) */
|
1014
|
+
maxBits = FSE_highbit32(weightTotal) + 1;
|
1015
|
+
if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* DTable is too small */
|
1016
|
+
DTable[0] = (U16)maxBits;
|
1017
|
+
{
|
1018
|
+
U32 total = 1 << maxBits;
|
1019
|
+
U32 rest = total - weightTotal;
|
1020
|
+
U32 verif = 1 << FSE_highbit32(rest);
|
1021
|
+
U32 lastWeight = FSE_highbit32(rest) + 1;
|
1022
|
+
if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected; /* last value must be a clean power of 2 */
|
1023
|
+
huffWeight[oSize] = (BYTE)lastWeight;
|
1024
|
+
rankVal[lastWeight]++;
|
1025
|
+
}
|
1026
|
+
|
1027
|
+
/* check tree construction validity */
|
1028
|
+
if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected; /* by construction : at least 2 elts of rank 1, must be even */
|
1029
|
+
|
1030
|
+
/* Prepare ranks */
|
1031
|
+
nextRankStart = 0;
|
1032
|
+
for (n=1; n<=maxBits; n++)
|
1033
|
+
{
|
1034
|
+
U32 current = nextRankStart;
|
1035
|
+
nextRankStart += (rankVal[n] << (n-1));
|
1036
|
+
rankVal[n] = current;
|
1037
|
+
}
|
1038
|
+
|
1039
|
+
/* fill DTable */
|
1040
|
+
for (n=0; n<=oSize; n++)
|
1041
|
+
{
|
1042
|
+
const U32 w = huffWeight[n];
|
1043
|
+
const U32 length = (1 << w) >> 1;
|
1044
|
+
U32 i;
|
1045
|
+
HUF_DElt D;
|
1046
|
+
D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w);
|
1047
|
+
for (i = rankVal[w]; i < rankVal[w] + length; i++)
|
1048
|
+
dt[i] = D;
|
1049
|
+
rankVal[w] += length;
|
1050
|
+
}
|
1051
|
+
|
1052
|
+
return iSize+1;
|
1053
|
+
}
|
1054
|
+
|
1055
|
+
|
1056
|
+
static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog)
|
1057
|
+
{
|
1058
|
+
const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
|
1059
|
+
const BYTE c = dt[val].byte;
|
1060
|
+
FSE_skipBits(Dstream, dt[val].nbBits);
|
1061
|
+
return c;
|
1062
|
+
}
|
1063
|
+
|
1064
|
+
static size_t HUF_decompress_usingDTable( /* -3% slower when non static */
|
1065
|
+
void* dst, size_t maxDstSize,
|
1066
|
+
const void* cSrc, size_t cSrcSize,
|
1067
|
+
const U16* DTable)
|
1068
|
+
{
|
1069
|
+
BYTE* const ostart = (BYTE*) dst;
|
1070
|
+
BYTE* op = ostart;
|
1071
|
+
BYTE* const omax = op + maxDstSize;
|
1072
|
+
BYTE* const olimit = omax-15;
|
1073
|
+
|
1074
|
+
const void* ptr = DTable;
|
1075
|
+
const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;
|
1076
|
+
const U32 dtLog = DTable[0];
|
1077
|
+
size_t errorCode;
|
1078
|
+
U32 reloadStatus;
|
1079
|
+
|
1080
|
+
/* Init */
|
1081
|
+
|
1082
|
+
const U16* jumpTable = (const U16*)cSrc;
|
1083
|
+
const size_t length1 = FSE_readLE16(jumpTable);
|
1084
|
+
const size_t length2 = FSE_readLE16(jumpTable+1);
|
1085
|
+
const size_t length3 = FSE_readLE16(jumpTable+2);
|
1086
|
+
const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; // check coherency !!
|
1087
|
+
const char* const start1 = (const char*)(cSrc) + 6;
|
1088
|
+
const char* const start2 = start1 + length1;
|
1089
|
+
const char* const start3 = start2 + length2;
|
1090
|
+
const char* const start4 = start3 + length3;
|
1091
|
+
FSE_DStream_t bitD1, bitD2, bitD3, bitD4;
|
1092
|
+
|
1093
|
+
if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
|
1094
|
+
|
1095
|
+
errorCode = FSE_initDStream(&bitD1, start1, length1);
|
1096
|
+
if (FSE_isError(errorCode)) return errorCode;
|
1097
|
+
errorCode = FSE_initDStream(&bitD2, start2, length2);
|
1098
|
+
if (FSE_isError(errorCode)) return errorCode;
|
1099
|
+
errorCode = FSE_initDStream(&bitD3, start3, length3);
|
1100
|
+
if (FSE_isError(errorCode)) return errorCode;
|
1101
|
+
errorCode = FSE_initDStream(&bitD4, start4, length4);
|
1102
|
+
if (FSE_isError(errorCode)) return errorCode;
|
1103
|
+
|
1104
|
+
reloadStatus=FSE_reloadDStream(&bitD2);
|
1105
|
+
|
1106
|
+
/* 16 symbols per loop */
|
1107
|
+
for ( ; (reloadStatus<FSE_DStream_completed) && (op<olimit); /* D2-3-4 are supposed to be synchronized and finish together */
|
1108
|
+
op+=16, reloadStatus = FSE_reloadDStream(&bitD2) | FSE_reloadDStream(&bitD3) | FSE_reloadDStream(&bitD4), FSE_reloadDStream(&bitD1))
|
1109
|
+
{
|
1110
|
+
#define HUF_DECODE_SYMBOL_0(n, Dstream) \
|
1111
|
+
op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog);
|
1112
|
+
|
1113
|
+
#define HUF_DECODE_SYMBOL_1(n, Dstream) \
|
1114
|
+
op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
|
1115
|
+
if (FSE_32bits() && (HUF_MAX_TABLELOG>12)) FSE_reloadDStream(&Dstream)
|
1116
|
+
|
1117
|
+
#define HUF_DECODE_SYMBOL_2(n, Dstream) \
|
1118
|
+
op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
|
1119
|
+
if (FSE_32bits()) FSE_reloadDStream(&Dstream)
|
1120
|
+
|
1121
|
+
HUF_DECODE_SYMBOL_1( 0, bitD1);
|
1122
|
+
HUF_DECODE_SYMBOL_1( 1, bitD2);
|
1123
|
+
HUF_DECODE_SYMBOL_1( 2, bitD3);
|
1124
|
+
HUF_DECODE_SYMBOL_1( 3, bitD4);
|
1125
|
+
HUF_DECODE_SYMBOL_2( 4, bitD1);
|
1126
|
+
HUF_DECODE_SYMBOL_2( 5, bitD2);
|
1127
|
+
HUF_DECODE_SYMBOL_2( 6, bitD3);
|
1128
|
+
HUF_DECODE_SYMBOL_2( 7, bitD4);
|
1129
|
+
HUF_DECODE_SYMBOL_1( 8, bitD1);
|
1130
|
+
HUF_DECODE_SYMBOL_1( 9, bitD2);
|
1131
|
+
HUF_DECODE_SYMBOL_1(10, bitD3);
|
1132
|
+
HUF_DECODE_SYMBOL_1(11, bitD4);
|
1133
|
+
HUF_DECODE_SYMBOL_0(12, bitD1);
|
1134
|
+
HUF_DECODE_SYMBOL_0(13, bitD2);
|
1135
|
+
HUF_DECODE_SYMBOL_0(14, bitD3);
|
1136
|
+
HUF_DECODE_SYMBOL_0(15, bitD4);
|
1137
|
+
}
|
1138
|
+
|
1139
|
+
if (reloadStatus!=FSE_DStream_completed) /* not complete : some bitStream might be FSE_DStream_unfinished */
|
1140
|
+
return (size_t)-FSE_ERROR_corruptionDetected;
|
1141
|
+
|
1142
|
+
/* tail */
|
1143
|
+
{
|
1144
|
+
// bitTail = bitD1; // *much* slower : -20% !??!
|
1145
|
+
FSE_DStream_t bitTail;
|
1146
|
+
bitTail.ptr = bitD1.ptr;
|
1147
|
+
bitTail.bitsConsumed = bitD1.bitsConsumed;
|
1148
|
+
bitTail.bitContainer = bitD1.bitContainer; // required in case of FSE_DStream_endOfBuffer
|
1149
|
+
bitTail.start = start1;
|
1150
|
+
for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op<omax) ; op++)
|
1151
|
+
{
|
1152
|
+
HUF_DECODE_SYMBOL_0(0, bitTail);
|
1153
|
+
}
|
1154
|
+
|
1155
|
+
if (FSE_endOfDStream(&bitTail))
|
1156
|
+
return op-ostart;
|
1157
|
+
}
|
1158
|
+
|
1159
|
+
if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */
|
1160
|
+
|
1161
|
+
return (size_t)-FSE_ERROR_corruptionDetected;
|
1162
|
+
}
|
1163
|
+
|
1164
|
+
|
1165
|
+
static size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
|
1166
|
+
{
|
1167
|
+
HUF_CREATE_STATIC_DTABLE(DTable, HUF_MAX_TABLELOG);
|
1168
|
+
const BYTE* ip = (const BYTE*) cSrc;
|
1169
|
+
size_t errorCode;
|
1170
|
+
|
1171
|
+
errorCode = HUF_readDTable (DTable, cSrc, cSrcSize);
|
1172
|
+
if (FSE_isError(errorCode)) return errorCode;
|
1173
|
+
if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
|
1174
|
+
ip += errorCode;
|
1175
|
+
cSrcSize -= errorCode;
|
1176
|
+
|
1177
|
+
return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable);
|
1178
|
+
}
|
1179
|
+
|
1180
|
+
|
1181
|
+
#endif /* FSE_COMMONDEFS_ONLY */
|
1182
|
+
|
1183
|
+
/*
|
1184
|
+
zstd - standard compression library
|
1185
|
+
Copyright (C) 2014-2015, Yann Collet.
|
1186
|
+
|
1187
|
+
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
1188
|
+
|
1189
|
+
Redistribution and use in source and binary forms, with or without
|
1190
|
+
modification, are permitted provided that the following conditions are
|
1191
|
+
met:
|
1192
|
+
* Redistributions of source code must retain the above copyright
|
1193
|
+
notice, this list of conditions and the following disclaimer.
|
1194
|
+
* Redistributions in binary form must reproduce the above
|
1195
|
+
copyright notice, this list of conditions and the following disclaimer
|
1196
|
+
in the documentation and/or other materials provided with the
|
1197
|
+
distribution.
|
1198
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1199
|
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1200
|
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1201
|
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1202
|
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1203
|
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1204
|
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1205
|
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1206
|
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1207
|
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1208
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1209
|
+
|
1210
|
+
You can contact the author at :
|
1211
|
+
- zstd source repository : https://github.com/Cyan4973/zstd
|
1212
|
+
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
|
1213
|
+
*/
|
1214
|
+
|
1215
|
+
/****************************************************************
|
1216
|
+
* Tuning parameters
|
1217
|
+
*****************************************************************/
|
1218
|
+
/* MEMORY_USAGE :
|
1219
|
+
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
1220
|
+
* Increasing memory usage improves compression ratio
|
1221
|
+
* Reduced memory usage can improve speed, due to cache effect */
|
1222
|
+
#define ZSTD_MEMORY_USAGE 17
|
1223
|
+
|
1224
|
+
|
1225
|
+
/**************************************
|
1226
|
+
CPU Feature Detection
|
1227
|
+
**************************************/
|
1228
|
+
/*
|
1229
|
+
* Automated efficient unaligned memory access detection
|
1230
|
+
* Based on known hardware architectures
|
1231
|
+
* This list will be updated thanks to feedbacks
|
1232
|
+
*/
|
1233
|
+
#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \
|
1234
|
+
|| defined(__ARM_FEATURE_UNALIGNED) \
|
1235
|
+
|| defined(__i386__) || defined(__x86_64__) \
|
1236
|
+
|| defined(_M_IX86) || defined(_M_X64) \
|
1237
|
+
|| defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \
|
1238
|
+
|| (defined(_M_ARM) && (_M_ARM >= 7))
|
1239
|
+
# define ZSTD_UNALIGNED_ACCESS 1
|
1240
|
+
#else
|
1241
|
+
# define ZSTD_UNALIGNED_ACCESS 0
|
1242
|
+
#endif
|
1243
|
+
|
1244
|
+
|
1245
|
+
/********************************************************
|
1246
|
+
* Includes
|
1247
|
+
*********************************************************/
|
1248
|
+
#include <stdlib.h> /* calloc */
|
1249
|
+
#include <string.h> /* memcpy, memmove */
|
1250
|
+
#include <stdio.h> /* debug : printf */
|
1251
|
+
|
1252
|
+
|
1253
|
+
/********************************************************
|
1254
|
+
* Compiler specifics
|
1255
|
+
*********************************************************/
|
1256
|
+
#ifdef __AVX2__
|
1257
|
+
# include <immintrin.h> /* AVX2 intrinsics */
|
1258
|
+
#endif
|
1259
|
+
|
1260
|
+
#ifdef _MSC_VER /* Visual Studio */
|
1261
|
+
# include <intrin.h> /* For Visual 2005 */
|
1262
|
+
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
1263
|
+
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
|
1264
|
+
#endif
|
1265
|
+
|
1266
|
+
|
1267
|
+
#ifndef MEM_ACCESS_MODULE
|
1268
|
+
#define MEM_ACCESS_MODULE
|
1269
|
+
/********************************************************
|
1270
|
+
* Basic Types
|
1271
|
+
*********************************************************/
|
1272
|
+
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
1273
|
+
# include <stdint.h>
|
1274
|
+
typedef uint8_t BYTE;
|
1275
|
+
typedef uint16_t U16;
|
1276
|
+
typedef int16_t S16;
|
1277
|
+
typedef uint32_t U32;
|
1278
|
+
typedef int32_t S32;
|
1279
|
+
typedef uint64_t U64;
|
1280
|
+
#else
|
1281
|
+
typedef unsigned char BYTE;
|
1282
|
+
typedef unsigned short U16;
|
1283
|
+
typedef signed short S16;
|
1284
|
+
typedef unsigned int U32;
|
1285
|
+
typedef signed int S32;
|
1286
|
+
typedef unsigned long long U64;
|
1287
|
+
#endif
|
1288
|
+
|
1289
|
+
#endif /* MEM_ACCESS_MODULE */
|
1290
|
+
|
1291
|
+
|
1292
|
+
/********************************************************
|
1293
|
+
* Constants
|
1294
|
+
*********************************************************/
|
1295
|
+
static const U32 ZSTD_magicNumber = 0xFD2FB51E; /* 3rd version : seqNb header */
|
1296
|
+
|
1297
|
+
#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
|
1298
|
+
#define HASH_TABLESIZE (1 << HASH_LOG)
|
1299
|
+
#define HASH_MASK (HASH_TABLESIZE - 1)
|
1300
|
+
|
1301
|
+
#define KNUTH 2654435761
|
1302
|
+
|
1303
|
+
#define BIT7 128
|
1304
|
+
#define BIT6 64
|
1305
|
+
#define BIT5 32
|
1306
|
+
#define BIT4 16
|
1307
|
+
|
1308
|
+
#define KB *(1 <<10)
|
1309
|
+
#define MB *(1 <<20)
|
1310
|
+
#define GB *(1U<<30)
|
1311
|
+
|
1312
|
+
#define BLOCKSIZE (128 KB) /* define, for static allocation */
|
1313
|
+
|
1314
|
+
#define WORKPLACESIZE (BLOCKSIZE*3)
|
1315
|
+
#define MINMATCH 4
|
1316
|
+
#define MLbits 7
|
1317
|
+
#define LLbits 6
|
1318
|
+
#define Offbits 5
|
1319
|
+
#define MaxML ((1<<MLbits )-1)
|
1320
|
+
#define MaxLL ((1<<LLbits )-1)
|
1321
|
+
#define MaxOff ((1<<Offbits)-1)
|
1322
|
+
#define LitFSELog 11
|
1323
|
+
#define MLFSELog 10
|
1324
|
+
#define LLFSELog 10
|
1325
|
+
#define OffFSELog 9
|
1326
|
+
#define MAX(a,b) ((a)<(b)?(b):(a))
|
1327
|
+
#define MaxSeq MAX(MaxLL, MaxML)
|
1328
|
+
|
1329
|
+
#define LITERAL_NOENTROPY 63
|
1330
|
+
#define COMMAND_NOENTROPY 7 /* to remove */
|
1331
|
+
|
1332
|
+
static const size_t ZSTD_blockHeaderSize = 3;
|
1333
|
+
static const size_t ZSTD_frameHeaderSize = 4;
|
1334
|
+
|
1335
|
+
|
1336
|
+
/********************************************************
|
1337
|
+
* Memory operations
|
1338
|
+
*********************************************************/
|
1339
|
+
static unsigned ZSTD_32bits(void) { return sizeof(void*)==4; }
|
1340
|
+
|
1341
|
+
static unsigned ZSTD_isLittleEndian(void)
|
1342
|
+
{
|
1343
|
+
const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
1344
|
+
return one.c[0];
|
1345
|
+
}
|
1346
|
+
|
1347
|
+
static U16 ZSTD_read16(const void* p) { U16 r; memcpy(&r, p, sizeof(r)); return r; }
|
1348
|
+
|
1349
|
+
static U32 ZSTD_read32(const void* p) { U32 r; memcpy(&r, p, sizeof(r)); return r; }
|
1350
|
+
|
1351
|
+
static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
|
1352
|
+
|
1353
|
+
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
1354
|
+
|
1355
|
+
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
1356
|
+
|
1357
|
+
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
1358
|
+
{
|
1359
|
+
const BYTE* ip = (const BYTE*)src;
|
1360
|
+
BYTE* op = (BYTE*)dst;
|
1361
|
+
BYTE* const oend = op + length;
|
1362
|
+
while (op < oend) COPY8(op, ip);
|
1363
|
+
}
|
1364
|
+
|
1365
|
+
static U16 ZSTD_readLE16(const void* memPtr)
|
1366
|
+
{
|
1367
|
+
if (ZSTD_isLittleEndian()) return ZSTD_read16(memPtr);
|
1368
|
+
else
|
1369
|
+
{
|
1370
|
+
const BYTE* p = (const BYTE*)memPtr;
|
1371
|
+
return (U16)((U16)p[0] + ((U16)p[1]<<8));
|
1372
|
+
}
|
1373
|
+
}
|
1374
|
+
|
1375
|
+
|
1376
|
+
static U32 ZSTD_readLE32(const void* memPtr)
|
1377
|
+
{
|
1378
|
+
if (ZSTD_isLittleEndian())
|
1379
|
+
return ZSTD_read32(memPtr);
|
1380
|
+
else
|
1381
|
+
{
|
1382
|
+
const BYTE* p = (const BYTE*)memPtr;
|
1383
|
+
return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
|
1384
|
+
}
|
1385
|
+
}
|
1386
|
+
|
1387
|
+
static U32 ZSTD_readBE32(const void* memPtr)
|
1388
|
+
{
|
1389
|
+
const BYTE* p = (const BYTE*)memPtr;
|
1390
|
+
return (U32)(((U32)p[0]<<24) + ((U32)p[1]<<16) + ((U32)p[2]<<8) + ((U32)p[3]<<0));
|
1391
|
+
}
|
1392
|
+
|
1393
|
+
|
1394
|
+
/**************************************
|
1395
|
+
* Local structures
|
1396
|
+
***************************************/
|
1397
|
+
typedef struct ZSTD_Cctx_s ZSTD_Cctx;
|
1398
|
+
|
1399
|
+
typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
|
1400
|
+
|
1401
|
+
typedef struct
|
1402
|
+
{
|
1403
|
+
blockType_t blockType;
|
1404
|
+
U32 origSize;
|
1405
|
+
} blockProperties_t;
|
1406
|
+
|
1407
|
+
typedef struct {
|
1408
|
+
void* buffer;
|
1409
|
+
U32* offsetStart;
|
1410
|
+
U32* offset;
|
1411
|
+
BYTE* offCodeStart;
|
1412
|
+
BYTE* offCode;
|
1413
|
+
BYTE* litStart;
|
1414
|
+
BYTE* lit;
|
1415
|
+
BYTE* litLengthStart;
|
1416
|
+
BYTE* litLength;
|
1417
|
+
BYTE* matchLengthStart;
|
1418
|
+
BYTE* matchLength;
|
1419
|
+
BYTE* dumpsStart;
|
1420
|
+
BYTE* dumps;
|
1421
|
+
} seqStore_t;
|
1422
|
+
|
1423
|
+
|
1424
|
+
typedef struct ZSTD_Cctx_s
|
1425
|
+
{
|
1426
|
+
const BYTE* base;
|
1427
|
+
U32 current;
|
1428
|
+
U32 nextUpdate;
|
1429
|
+
seqStore_t seqStore;
|
1430
|
+
#ifdef __AVX2__
|
1431
|
+
__m256i hashTable[HASH_TABLESIZE>>3];
|
1432
|
+
#else
|
1433
|
+
U32 hashTable[HASH_TABLESIZE];
|
1434
|
+
#endif
|
1435
|
+
BYTE buffer[WORKPLACESIZE];
|
1436
|
+
} cctxi_t;
|
1437
|
+
|
1438
|
+
|
1439
|
+
|
1440
|
+
|
1441
|
+
/**************************************
|
1442
|
+
* Error Management
|
1443
|
+
**************************************/
|
1444
|
+
/* published entry point */
|
1445
|
+
unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); }
|
1446
|
+
|
1447
|
+
|
1448
|
+
/**************************************
|
1449
|
+
* Tool functions
|
1450
|
+
**************************************/
|
1451
|
+
#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */
|
1452
|
+
#define ZSTD_VERSION_MINOR 1 /* for new (non-breaking) interface capabilities */
|
1453
|
+
#define ZSTD_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
|
1454
|
+
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
1455
|
+
|
1456
|
+
/**************************************************************
|
1457
|
+
* Decompression code
|
1458
|
+
**************************************************************/
|
1459
|
+
|
1460
|
+
size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
|
1461
|
+
{
|
1462
|
+
const BYTE* const in = (const BYTE* const)src;
|
1463
|
+
BYTE headerFlags;
|
1464
|
+
U32 cSize;
|
1465
|
+
|
1466
|
+
if (srcSize < 3) return ERROR(srcSize_wrong);
|
1467
|
+
|
1468
|
+
headerFlags = *in;
|
1469
|
+
cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
|
1470
|
+
|
1471
|
+
bpPtr->blockType = (blockType_t)(headerFlags >> 6);
|
1472
|
+
bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
|
1473
|
+
|
1474
|
+
if (bpPtr->blockType == bt_end) return 0;
|
1475
|
+
if (bpPtr->blockType == bt_rle) return 1;
|
1476
|
+
return cSize;
|
1477
|
+
}
|
1478
|
+
|
1479
|
+
|
1480
|
+
static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
|
1481
|
+
{
|
1482
|
+
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
|
1483
|
+
memcpy(dst, src, srcSize);
|
1484
|
+
return srcSize;
|
1485
|
+
}
|
1486
|
+
|
1487
|
+
|
1488
|
+
static size_t ZSTD_decompressLiterals(void* ctx,
|
1489
|
+
void* dst, size_t maxDstSize,
|
1490
|
+
const void* src, size_t srcSize)
|
1491
|
+
{
|
1492
|
+
BYTE* op = (BYTE*)dst;
|
1493
|
+
BYTE* const oend = op + maxDstSize;
|
1494
|
+
const BYTE* ip = (const BYTE*)src;
|
1495
|
+
size_t errorCode;
|
1496
|
+
size_t litSize;
|
1497
|
+
|
1498
|
+
/* check : minimum 2, for litSize, +1, for content */
|
1499
|
+
if (srcSize <= 3) return ERROR(corruption_detected);
|
1500
|
+
|
1501
|
+
litSize = ip[1] + (ip[0]<<8);
|
1502
|
+
litSize += ((ip[-3] >> 3) & 7) << 16; // mmmmh....
|
1503
|
+
op = oend - litSize;
|
1504
|
+
|
1505
|
+
(void)ctx;
|
1506
|
+
if (litSize > maxDstSize) return ERROR(dstSize_tooSmall);
|
1507
|
+
errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2);
|
1508
|
+
if (FSE_isError(errorCode)) return ERROR(GENERIC);
|
1509
|
+
return litSize;
|
1510
|
+
}
|
1511
|
+
|
1512
|
+
|
1513
|
+
size_t ZSTDv01_decodeLiteralsBlock(void* ctx,
|
1514
|
+
void* dst, size_t maxDstSize,
|
1515
|
+
const BYTE** litStart, size_t* litSize,
|
1516
|
+
const void* src, size_t srcSize)
|
1517
|
+
{
|
1518
|
+
const BYTE* const istart = (const BYTE* const)src;
|
1519
|
+
const BYTE* ip = istart;
|
1520
|
+
BYTE* const ostart = (BYTE* const)dst;
|
1521
|
+
BYTE* const oend = ostart + maxDstSize;
|
1522
|
+
blockProperties_t litbp;
|
1523
|
+
|
1524
|
+
size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp);
|
1525
|
+
if (ZSTDv01_isError(litcSize)) return litcSize;
|
1526
|
+
if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
1527
|
+
ip += ZSTD_blockHeaderSize;
|
1528
|
+
|
1529
|
+
switch(litbp.blockType)
|
1530
|
+
{
|
1531
|
+
case bt_raw:
|
1532
|
+
*litStart = ip;
|
1533
|
+
ip += litcSize;
|
1534
|
+
*litSize = litcSize;
|
1535
|
+
break;
|
1536
|
+
case bt_rle:
|
1537
|
+
{
|
1538
|
+
size_t rleSize = litbp.origSize;
|
1539
|
+
if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall);
|
1540
|
+
if (!srcSize) return ERROR(srcSize_wrong);
|
1541
|
+
memset(oend - rleSize, *ip, rleSize);
|
1542
|
+
*litStart = oend - rleSize;
|
1543
|
+
*litSize = rleSize;
|
1544
|
+
ip++;
|
1545
|
+
break;
|
1546
|
+
}
|
1547
|
+
case bt_compressed:
|
1548
|
+
{
|
1549
|
+
size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize);
|
1550
|
+
if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize;
|
1551
|
+
*litStart = oend - decodedLitSize;
|
1552
|
+
*litSize = decodedLitSize;
|
1553
|
+
ip += litcSize;
|
1554
|
+
break;
|
1555
|
+
}
|
1556
|
+
case bt_end:
|
1557
|
+
default:
|
1558
|
+
return ERROR(GENERIC);
|
1559
|
+
}
|
1560
|
+
|
1561
|
+
return ip-istart;
|
1562
|
+
}
|
1563
|
+
|
1564
|
+
|
1565
|
+
size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
|
1566
|
+
FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
|
1567
|
+
const void* src, size_t srcSize)
|
1568
|
+
{
|
1569
|
+
const BYTE* const istart = (const BYTE* const)src;
|
1570
|
+
const BYTE* ip = istart;
|
1571
|
+
const BYTE* const iend = istart + srcSize;
|
1572
|
+
U32 LLtype, Offtype, MLtype;
|
1573
|
+
U32 LLlog, Offlog, MLlog;
|
1574
|
+
size_t dumpsLength;
|
1575
|
+
|
1576
|
+
/* check */
|
1577
|
+
if (srcSize < 5) return ERROR(srcSize_wrong);
|
1578
|
+
|
1579
|
+
/* SeqHead */
|
1580
|
+
*nbSeq = ZSTD_readLE16(ip); ip+=2;
|
1581
|
+
LLtype = *ip >> 6;
|
1582
|
+
Offtype = (*ip >> 4) & 3;
|
1583
|
+
MLtype = (*ip >> 2) & 3;
|
1584
|
+
if (*ip & 2)
|
1585
|
+
{
|
1586
|
+
dumpsLength = ip[2];
|
1587
|
+
dumpsLength += ip[1] << 8;
|
1588
|
+
ip += 3;
|
1589
|
+
}
|
1590
|
+
else
|
1591
|
+
{
|
1592
|
+
dumpsLength = ip[1];
|
1593
|
+
dumpsLength += (ip[0] & 1) << 8;
|
1594
|
+
ip += 2;
|
1595
|
+
}
|
1596
|
+
*dumpsPtr = ip;
|
1597
|
+
ip += dumpsLength;
|
1598
|
+
*dumpsLengthPtr = dumpsLength;
|
1599
|
+
|
1600
|
+
/* check */
|
1601
|
+
if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
|
1602
|
+
|
1603
|
+
/* sequences */
|
1604
|
+
{
|
1605
|
+
S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
|
1606
|
+
size_t headerSize;
|
1607
|
+
|
1608
|
+
/* Build DTables */
|
1609
|
+
switch(LLtype)
|
1610
|
+
{
|
1611
|
+
case bt_rle :
|
1612
|
+
LLlog = 0;
|
1613
|
+
FSE_buildDTable_rle(DTableLL, *ip++); break;
|
1614
|
+
case bt_raw :
|
1615
|
+
LLlog = LLbits;
|
1616
|
+
FSE_buildDTable_raw(DTableLL, LLbits); break;
|
1617
|
+
default :
|
1618
|
+
{ U32 max = MaxLL;
|
1619
|
+
headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
|
1620
|
+
if (FSE_isError(headerSize)) return ERROR(GENERIC);
|
1621
|
+
if (LLlog > LLFSELog) return ERROR(corruption_detected);
|
1622
|
+
ip += headerSize;
|
1623
|
+
FSE_buildDTable(DTableLL, norm, max, LLlog);
|
1624
|
+
} }
|
1625
|
+
|
1626
|
+
switch(Offtype)
|
1627
|
+
{
|
1628
|
+
case bt_rle :
|
1629
|
+
Offlog = 0;
|
1630
|
+
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
|
1631
|
+
FSE_buildDTable_rle(DTableOffb, *ip++); break;
|
1632
|
+
case bt_raw :
|
1633
|
+
Offlog = Offbits;
|
1634
|
+
FSE_buildDTable_raw(DTableOffb, Offbits); break;
|
1635
|
+
default :
|
1636
|
+
{ U32 max = MaxOff;
|
1637
|
+
headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
|
1638
|
+
if (FSE_isError(headerSize)) return ERROR(GENERIC);
|
1639
|
+
if (Offlog > OffFSELog) return ERROR(corruption_detected);
|
1640
|
+
ip += headerSize;
|
1641
|
+
FSE_buildDTable(DTableOffb, norm, max, Offlog);
|
1642
|
+
} }
|
1643
|
+
|
1644
|
+
switch(MLtype)
|
1645
|
+
{
|
1646
|
+
case bt_rle :
|
1647
|
+
MLlog = 0;
|
1648
|
+
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
|
1649
|
+
FSE_buildDTable_rle(DTableML, *ip++); break;
|
1650
|
+
case bt_raw :
|
1651
|
+
MLlog = MLbits;
|
1652
|
+
FSE_buildDTable_raw(DTableML, MLbits); break;
|
1653
|
+
default :
|
1654
|
+
{ U32 max = MaxML;
|
1655
|
+
headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
|
1656
|
+
if (FSE_isError(headerSize)) return ERROR(GENERIC);
|
1657
|
+
if (MLlog > MLFSELog) return ERROR(corruption_detected);
|
1658
|
+
ip += headerSize;
|
1659
|
+
FSE_buildDTable(DTableML, norm, max, MLlog);
|
1660
|
+
} } }
|
1661
|
+
|
1662
|
+
return ip-istart;
|
1663
|
+
}
|
1664
|
+
|
1665
|
+
|
1666
|
+
typedef struct {
|
1667
|
+
size_t litLength;
|
1668
|
+
size_t offset;
|
1669
|
+
size_t matchLength;
|
1670
|
+
} seq_t;
|
1671
|
+
|
1672
|
+
typedef struct {
|
1673
|
+
FSE_DStream_t DStream;
|
1674
|
+
FSE_DState_t stateLL;
|
1675
|
+
FSE_DState_t stateOffb;
|
1676
|
+
FSE_DState_t stateML;
|
1677
|
+
size_t prevOffset;
|
1678
|
+
const BYTE* dumps;
|
1679
|
+
const BYTE* dumpsEnd;
|
1680
|
+
} seqState_t;
|
1681
|
+
|
1682
|
+
|
1683
|
+
static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
|
1684
|
+
{
|
1685
|
+
size_t litLength;
|
1686
|
+
size_t prevOffset;
|
1687
|
+
size_t offset;
|
1688
|
+
size_t matchLength;
|
1689
|
+
const BYTE* dumps = seqState->dumps;
|
1690
|
+
const BYTE* const de = seqState->dumpsEnd;
|
1691
|
+
|
1692
|
+
/* Literal length */
|
1693
|
+
litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
|
1694
|
+
prevOffset = litLength ? seq->offset : seqState->prevOffset;
|
1695
|
+
seqState->prevOffset = seq->offset;
|
1696
|
+
if (litLength == MaxLL)
|
1697
|
+
{
|
1698
|
+
U32 add = dumps<de ? *dumps++ : 0;
|
1699
|
+
if (add < 255) litLength += add;
|
1700
|
+
else
|
1701
|
+
{
|
1702
|
+
if (dumps<=(de-3))
|
1703
|
+
{
|
1704
|
+
litLength = ZSTD_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
|
1705
|
+
dumps += 3;
|
1706
|
+
}
|
1707
|
+
}
|
1708
|
+
}
|
1709
|
+
|
1710
|
+
/* Offset */
|
1711
|
+
{
|
1712
|
+
U32 offsetCode, nbBits;
|
1713
|
+
offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));
|
1714
|
+
if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
|
1715
|
+
nbBits = offsetCode - 1;
|
1716
|
+
if (offsetCode==0) nbBits = 0; /* cmove */
|
1717
|
+
offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits);
|
1718
|
+
if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
|
1719
|
+
if (offsetCode==0) offset = prevOffset;
|
1720
|
+
}
|
1721
|
+
|
1722
|
+
/* MatchLength */
|
1723
|
+
matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
|
1724
|
+
if (matchLength == MaxML)
|
1725
|
+
{
|
1726
|
+
U32 add = dumps<de ? *dumps++ : 0;
|
1727
|
+
if (add < 255) matchLength += add;
|
1728
|
+
else
|
1729
|
+
{
|
1730
|
+
if (dumps<=(de-3))
|
1731
|
+
{
|
1732
|
+
matchLength = ZSTD_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
|
1733
|
+
dumps += 3;
|
1734
|
+
}
|
1735
|
+
}
|
1736
|
+
}
|
1737
|
+
matchLength += MINMATCH;
|
1738
|
+
|
1739
|
+
/* save result */
|
1740
|
+
seq->litLength = litLength;
|
1741
|
+
seq->offset = offset;
|
1742
|
+
seq->matchLength = matchLength;
|
1743
|
+
seqState->dumps = dumps;
|
1744
|
+
}
|
1745
|
+
|
1746
|
+
|
1747
|
+
static size_t ZSTD_execSequence(BYTE* op,
|
1748
|
+
seq_t sequence,
|
1749
|
+
const BYTE** litPtr, const BYTE* const litLimit,
|
1750
|
+
BYTE* const base, BYTE* const oend)
|
1751
|
+
{
|
1752
|
+
static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
|
1753
|
+
static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* substracted */
|
1754
|
+
const BYTE* const ostart = op;
|
1755
|
+
const size_t litLength = sequence.litLength;
|
1756
|
+
BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
|
1757
|
+
const BYTE* const litEnd = *litPtr + litLength;
|
1758
|
+
|
1759
|
+
/* check */
|
1760
|
+
if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
|
1761
|
+
if (litEnd > litLimit) return ERROR(corruption_detected);
|
1762
|
+
if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */
|
1763
|
+
|
1764
|
+
/* copy Literals */
|
1765
|
+
if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8))
|
1766
|
+
memmove(op, *litPtr, litLength); /* overwrite risk */
|
1767
|
+
else
|
1768
|
+
ZSTD_wildcopy(op, *litPtr, litLength);
|
1769
|
+
op += litLength;
|
1770
|
+
*litPtr = litEnd; /* update for next sequence */
|
1771
|
+
|
1772
|
+
/* check : last match must be at a minimum distance of 8 from end of dest buffer */
|
1773
|
+
if (oend-op < 8) return ERROR(dstSize_tooSmall);
|
1774
|
+
|
1775
|
+
/* copy Match */
|
1776
|
+
{
|
1777
|
+
const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12);
|
1778
|
+
const BYTE* match = op - sequence.offset; /* possible underflow at op - offset ? */
|
1779
|
+
size_t qutt = 12;
|
1780
|
+
U64 saved[2];
|
1781
|
+
|
1782
|
+
/* check */
|
1783
|
+
if (match < base) return ERROR(corruption_detected);
|
1784
|
+
if (sequence.offset > (size_t)base) return ERROR(corruption_detected);
|
1785
|
+
|
1786
|
+
/* save beginning of literal sequence, in case of write overlap */
|
1787
|
+
if (overlapRisk)
|
1788
|
+
{
|
1789
|
+
if ((endMatch + qutt) > oend) qutt = oend-endMatch;
|
1790
|
+
memcpy(saved, endMatch, qutt);
|
1791
|
+
}
|
1792
|
+
|
1793
|
+
if (sequence.offset < 8)
|
1794
|
+
{
|
1795
|
+
const int dec64 = dec64table[sequence.offset];
|
1796
|
+
op[0] = match[0];
|
1797
|
+
op[1] = match[1];
|
1798
|
+
op[2] = match[2];
|
1799
|
+
op[3] = match[3];
|
1800
|
+
match += dec32table[sequence.offset];
|
1801
|
+
ZSTD_copy4(op+4, match);
|
1802
|
+
match -= dec64;
|
1803
|
+
} else { ZSTD_copy8(op, match); }
|
1804
|
+
op += 8; match += 8;
|
1805
|
+
|
1806
|
+
if (endMatch > oend-(16-MINMATCH))
|
1807
|
+
{
|
1808
|
+
if (op < oend-8)
|
1809
|
+
{
|
1810
|
+
ZSTD_wildcopy(op, match, (oend-8) - op);
|
1811
|
+
match += (oend-8) - op;
|
1812
|
+
op = oend-8;
|
1813
|
+
}
|
1814
|
+
while (op<endMatch) *op++ = *match++;
|
1815
|
+
}
|
1816
|
+
else
|
1817
|
+
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
1818
|
+
|
1819
|
+
/* restore, in case of overlap */
|
1820
|
+
if (overlapRisk) memcpy(endMatch, saved, qutt);
|
1821
|
+
}
|
1822
|
+
|
1823
|
+
return endMatch-ostart;
|
1824
|
+
}
|
1825
|
+
|
1826
|
+
typedef struct ZSTDv01_Dctx_s
|
1827
|
+
{
|
1828
|
+
U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
|
1829
|
+
U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
|
1830
|
+
U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
|
1831
|
+
void* previousDstEnd;
|
1832
|
+
void* base;
|
1833
|
+
size_t expected;
|
1834
|
+
blockType_t bType;
|
1835
|
+
U32 phase;
|
1836
|
+
} dctx_t;
|
1837
|
+
|
1838
|
+
|
1839
|
+
static size_t ZSTD_decompressSequences(
|
1840
|
+
void* ctx,
|
1841
|
+
void* dst, size_t maxDstSize,
|
1842
|
+
const void* seqStart, size_t seqSize,
|
1843
|
+
const BYTE* litStart, size_t litSize)
|
1844
|
+
{
|
1845
|
+
dctx_t* dctx = (dctx_t*)ctx;
|
1846
|
+
const BYTE* ip = (const BYTE*)seqStart;
|
1847
|
+
const BYTE* const iend = ip + seqSize;
|
1848
|
+
BYTE* const ostart = (BYTE* const)dst;
|
1849
|
+
BYTE* op = ostart;
|
1850
|
+
BYTE* const oend = ostart + maxDstSize;
|
1851
|
+
size_t errorCode, dumpsLength;
|
1852
|
+
const BYTE* litPtr = litStart;
|
1853
|
+
const BYTE* const litEnd = litStart + litSize;
|
1854
|
+
int nbSeq;
|
1855
|
+
const BYTE* dumps;
|
1856
|
+
U32* DTableLL = dctx->LLTable;
|
1857
|
+
U32* DTableML = dctx->MLTable;
|
1858
|
+
U32* DTableOffb = dctx->OffTable;
|
1859
|
+
BYTE* const base = (BYTE*) (dctx->base);
|
1860
|
+
|
1861
|
+
/* Build Decoding Tables */
|
1862
|
+
errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
|
1863
|
+
DTableLL, DTableML, DTableOffb,
|
1864
|
+
ip, iend-ip);
|
1865
|
+
if (ZSTDv01_isError(errorCode)) return errorCode;
|
1866
|
+
ip += errorCode;
|
1867
|
+
|
1868
|
+
/* Regen sequences */
|
1869
|
+
{
|
1870
|
+
seq_t sequence;
|
1871
|
+
seqState_t seqState;
|
1872
|
+
|
1873
|
+
memset(&sequence, 0, sizeof(sequence));
|
1874
|
+
seqState.dumps = dumps;
|
1875
|
+
seqState.dumpsEnd = dumps + dumpsLength;
|
1876
|
+
seqState.prevOffset = 1;
|
1877
|
+
errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip);
|
1878
|
+
if (FSE_isError(errorCode)) return ERROR(corruption_detected);
|
1879
|
+
FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
|
1880
|
+
FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
|
1881
|
+
FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
|
1882
|
+
|
1883
|
+
for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; )
|
1884
|
+
{
|
1885
|
+
size_t oneSeqSize;
|
1886
|
+
nbSeq--;
|
1887
|
+
ZSTD_decodeSequence(&sequence, &seqState);
|
1888
|
+
oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
|
1889
|
+
if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize;
|
1890
|
+
op += oneSeqSize;
|
1891
|
+
}
|
1892
|
+
|
1893
|
+
/* check if reached exact end */
|
1894
|
+
if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
|
1895
|
+
if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
|
1896
|
+
|
1897
|
+
/* last literal segment */
|
1898
|
+
{
|
1899
|
+
size_t lastLLSize = litEnd - litPtr;
|
1900
|
+
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
|
1901
|
+
if (op != litPtr) memmove(op, litPtr, lastLLSize);
|
1902
|
+
op += lastLLSize;
|
1903
|
+
}
|
1904
|
+
}
|
1905
|
+
|
1906
|
+
return op-ostart;
|
1907
|
+
}
|
1908
|
+
|
1909
|
+
|
1910
|
+
static size_t ZSTD_decompressBlock(
|
1911
|
+
void* ctx,
|
1912
|
+
void* dst, size_t maxDstSize,
|
1913
|
+
const void* src, size_t srcSize)
|
1914
|
+
{
|
1915
|
+
/* blockType == blockCompressed, srcSize is trusted */
|
1916
|
+
const BYTE* ip = (const BYTE*)src;
|
1917
|
+
const BYTE* litPtr = NULL;
|
1918
|
+
size_t litSize = 0;
|
1919
|
+
size_t errorCode;
|
1920
|
+
|
1921
|
+
/* Decode literals sub-block */
|
1922
|
+
errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize);
|
1923
|
+
if (ZSTDv01_isError(errorCode)) return errorCode;
|
1924
|
+
ip += errorCode;
|
1925
|
+
srcSize -= errorCode;
|
1926
|
+
|
1927
|
+
return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize);
|
1928
|
+
}
|
1929
|
+
|
1930
|
+
|
1931
|
+
size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
|
1932
|
+
{
|
1933
|
+
const BYTE* ip = (const BYTE*)src;
|
1934
|
+
const BYTE* iend = ip + srcSize;
|
1935
|
+
BYTE* const ostart = (BYTE* const)dst;
|
1936
|
+
BYTE* op = ostart;
|
1937
|
+
BYTE* const oend = ostart + maxDstSize;
|
1938
|
+
size_t remainingSize = srcSize;
|
1939
|
+
U32 magicNumber;
|
1940
|
+
size_t errorCode=0;
|
1941
|
+
blockProperties_t blockProperties;
|
1942
|
+
|
1943
|
+
/* Frame Header */
|
1944
|
+
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
1945
|
+
magicNumber = ZSTD_readBE32(src);
|
1946
|
+
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
|
1947
|
+
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
|
1948
|
+
|
1949
|
+
/* Loop on each block */
|
1950
|
+
while (1)
|
1951
|
+
{
|
1952
|
+
size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties);
|
1953
|
+
if (ZSTDv01_isError(blockSize)) return blockSize;
|
1954
|
+
|
1955
|
+
ip += ZSTD_blockHeaderSize;
|
1956
|
+
remainingSize -= ZSTD_blockHeaderSize;
|
1957
|
+
if (blockSize > remainingSize) return ERROR(srcSize_wrong);
|
1958
|
+
|
1959
|
+
switch(blockProperties.blockType)
|
1960
|
+
{
|
1961
|
+
case bt_compressed:
|
1962
|
+
errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize);
|
1963
|
+
break;
|
1964
|
+
case bt_raw :
|
1965
|
+
errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize);
|
1966
|
+
break;
|
1967
|
+
case bt_rle :
|
1968
|
+
return ERROR(GENERIC); /* not yet supported */
|
1969
|
+
break;
|
1970
|
+
case bt_end :
|
1971
|
+
/* end of frame */
|
1972
|
+
if (remainingSize) return ERROR(srcSize_wrong);
|
1973
|
+
break;
|
1974
|
+
default:
|
1975
|
+
return ERROR(GENERIC);
|
1976
|
+
}
|
1977
|
+
if (blockSize == 0) break; /* bt_end */
|
1978
|
+
|
1979
|
+
if (ZSTDv01_isError(errorCode)) return errorCode;
|
1980
|
+
op += errorCode;
|
1981
|
+
ip += blockSize;
|
1982
|
+
remainingSize -= blockSize;
|
1983
|
+
}
|
1984
|
+
|
1985
|
+
return op-ostart;
|
1986
|
+
}
|
1987
|
+
|
1988
|
+
size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
|
1989
|
+
{
|
1990
|
+
dctx_t ctx;
|
1991
|
+
ctx.base = dst;
|
1992
|
+
return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
|
1993
|
+
}
|
1994
|
+
|
1995
|
+
|
1996
|
+
/*******************************
|
1997
|
+
* Streaming Decompression API
|
1998
|
+
*******************************/
|
1999
|
+
|
2000
|
+
size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx)
|
2001
|
+
{
|
2002
|
+
dctx->expected = ZSTD_frameHeaderSize;
|
2003
|
+
dctx->phase = 0;
|
2004
|
+
dctx->previousDstEnd = NULL;
|
2005
|
+
dctx->base = NULL;
|
2006
|
+
return 0;
|
2007
|
+
}
|
2008
|
+
|
2009
|
+
ZSTDv01_Dctx* ZSTDv01_createDCtx(void)
|
2010
|
+
{
|
2011
|
+
ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx));
|
2012
|
+
if (dctx==NULL) return NULL;
|
2013
|
+
ZSTDv01_resetDCtx(dctx);
|
2014
|
+
return dctx;
|
2015
|
+
}
|
2016
|
+
|
2017
|
+
size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx)
|
2018
|
+
{
|
2019
|
+
free(dctx);
|
2020
|
+
return 0;
|
2021
|
+
}
|
2022
|
+
|
2023
|
+
size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx)
|
2024
|
+
{
|
2025
|
+
return ((dctx_t*)dctx)->expected;
|
2026
|
+
}
|
2027
|
+
|
2028
|
+
size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
|
2029
|
+
{
|
2030
|
+
dctx_t* ctx = (dctx_t*)dctx;
|
2031
|
+
|
2032
|
+
/* Sanity check */
|
2033
|
+
if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
|
2034
|
+
if (dst != ctx->previousDstEnd) /* not contiguous */
|
2035
|
+
ctx->base = dst;
|
2036
|
+
|
2037
|
+
/* Decompress : frame header */
|
2038
|
+
if (ctx->phase == 0)
|
2039
|
+
{
|
2040
|
+
/* Check frame magic header */
|
2041
|
+
U32 magicNumber = ZSTD_readBE32(src);
|
2042
|
+
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
|
2043
|
+
ctx->phase = 1;
|
2044
|
+
ctx->expected = ZSTD_blockHeaderSize;
|
2045
|
+
return 0;
|
2046
|
+
}
|
2047
|
+
|
2048
|
+
/* Decompress : block header */
|
2049
|
+
if (ctx->phase == 1)
|
2050
|
+
{
|
2051
|
+
blockProperties_t bp;
|
2052
|
+
size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
|
2053
|
+
if (ZSTDv01_isError(blockSize)) return blockSize;
|
2054
|
+
if (bp.blockType == bt_end)
|
2055
|
+
{
|
2056
|
+
ctx->expected = 0;
|
2057
|
+
ctx->phase = 0;
|
2058
|
+
}
|
2059
|
+
else
|
2060
|
+
{
|
2061
|
+
ctx->expected = blockSize;
|
2062
|
+
ctx->bType = bp.blockType;
|
2063
|
+
ctx->phase = 2;
|
2064
|
+
}
|
2065
|
+
|
2066
|
+
return 0;
|
2067
|
+
}
|
2068
|
+
|
2069
|
+
/* Decompress : block content */
|
2070
|
+
{
|
2071
|
+
size_t rSize;
|
2072
|
+
switch(ctx->bType)
|
2073
|
+
{
|
2074
|
+
case bt_compressed:
|
2075
|
+
rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
|
2076
|
+
break;
|
2077
|
+
case bt_raw :
|
2078
|
+
rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
|
2079
|
+
break;
|
2080
|
+
case bt_rle :
|
2081
|
+
return ERROR(GENERIC); /* not yet handled */
|
2082
|
+
break;
|
2083
|
+
case bt_end : /* should never happen (filtered at phase 1) */
|
2084
|
+
rSize = 0;
|
2085
|
+
break;
|
2086
|
+
default:
|
2087
|
+
return ERROR(GENERIC);
|
2088
|
+
}
|
2089
|
+
ctx->phase = 1;
|
2090
|
+
ctx->expected = ZSTD_blockHeaderSize;
|
2091
|
+
ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
|
2092
|
+
return rSize;
|
2093
|
+
}
|
2094
|
+
|
2095
|
+
}
|