extzstd 0.1.1 → 0.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/HISTORY.ja.md +18 -0
- data/README.md +15 -50
- data/contrib/zstd/CONTRIBUTING.md +1 -1
- data/contrib/zstd/COPYING +339 -0
- data/contrib/zstd/Makefile +82 -51
- data/contrib/zstd/NEWS +92 -5
- data/contrib/zstd/README.md +50 -41
- data/contrib/zstd/appveyor.yml +164 -102
- data/contrib/zstd/circle.yml +10 -22
- data/contrib/zstd/lib/BUCK +31 -10
- data/contrib/zstd/lib/Makefile +57 -31
- data/contrib/zstd/lib/README.md +68 -37
- data/contrib/zstd/lib/common/bitstream.h +130 -76
- data/contrib/zstd/lib/common/compiler.h +86 -0
- data/contrib/zstd/lib/common/error_private.c +15 -11
- data/contrib/zstd/lib/common/error_private.h +8 -8
- data/contrib/zstd/lib/common/fse.h +19 -9
- data/contrib/zstd/lib/common/fse_decompress.c +3 -22
- data/contrib/zstd/lib/common/huf.h +68 -26
- data/contrib/zstd/lib/common/mem.h +23 -35
- data/contrib/zstd/lib/common/pool.c +123 -63
- data/contrib/zstd/lib/common/pool.h +19 -10
- data/contrib/zstd/lib/common/threading.c +11 -16
- data/contrib/zstd/lib/common/threading.h +52 -33
- data/contrib/zstd/lib/common/xxhash.c +28 -22
- data/contrib/zstd/lib/common/zstd_common.c +40 -27
- data/contrib/zstd/lib/common/zstd_errors.h +43 -34
- data/contrib/zstd/lib/common/zstd_internal.h +131 -123
- data/contrib/zstd/lib/compress/fse_compress.c +17 -33
- data/contrib/zstd/lib/compress/huf_compress.c +15 -9
- data/contrib/zstd/lib/compress/zstd_compress.c +2096 -2363
- data/contrib/zstd/lib/compress/zstd_compress_internal.h +462 -0
- data/contrib/zstd/lib/compress/zstd_double_fast.c +309 -0
- data/contrib/zstd/lib/compress/zstd_double_fast.h +29 -0
- data/contrib/zstd/lib/compress/zstd_fast.c +243 -0
- data/contrib/zstd/lib/compress/zstd_fast.h +31 -0
- data/contrib/zstd/lib/compress/zstd_lazy.c +765 -0
- data/contrib/zstd/lib/compress/zstd_lazy.h +39 -0
- data/contrib/zstd/lib/compress/zstd_ldm.c +707 -0
- data/contrib/zstd/lib/compress/zstd_ldm.h +68 -0
- data/contrib/zstd/lib/compress/zstd_opt.c +785 -0
- data/contrib/zstd/lib/compress/zstd_opt.h +19 -908
- data/contrib/zstd/lib/compress/zstdmt_compress.c +737 -327
- data/contrib/zstd/lib/compress/zstdmt_compress.h +88 -26
- data/contrib/zstd/lib/decompress/huf_decompress.c +158 -50
- data/contrib/zstd/lib/decompress/zstd_decompress.c +884 -699
- data/contrib/zstd/lib/deprecated/zbuff.h +5 -4
- data/contrib/zstd/lib/deprecated/zbuff_common.c +5 -5
- data/contrib/zstd/lib/deprecated/zbuff_compress.c +6 -4
- data/contrib/zstd/lib/deprecated/zbuff_decompress.c +5 -4
- data/contrib/zstd/lib/dictBuilder/cover.c +93 -77
- data/contrib/zstd/lib/dictBuilder/zdict.c +107 -92
- data/contrib/zstd/lib/dictBuilder/zdict.h +112 -102
- data/contrib/zstd/lib/legacy/zstd_legacy.h +9 -4
- data/contrib/zstd/lib/legacy/zstd_v01.c +7 -6
- data/contrib/zstd/lib/legacy/zstd_v01.h +5 -4
- data/contrib/zstd/lib/legacy/zstd_v02.c +27 -99
- data/contrib/zstd/lib/legacy/zstd_v02.h +5 -4
- data/contrib/zstd/lib/legacy/zstd_v03.c +26 -98
- data/contrib/zstd/lib/legacy/zstd_v03.h +5 -4
- data/contrib/zstd/lib/legacy/zstd_v04.c +22 -91
- data/contrib/zstd/lib/legacy/zstd_v04.h +5 -4
- data/contrib/zstd/lib/legacy/zstd_v05.c +23 -99
- data/contrib/zstd/lib/legacy/zstd_v05.h +5 -4
- data/contrib/zstd/lib/legacy/zstd_v06.c +22 -96
- data/contrib/zstd/lib/legacy/zstd_v06.h +5 -4
- data/contrib/zstd/lib/legacy/zstd_v07.c +19 -95
- data/contrib/zstd/lib/legacy/zstd_v07.h +5 -4
- data/contrib/zstd/lib/zstd.h +895 -271
- data/ext/extconf.rb +11 -2
- data/ext/extzstd.c +45 -128
- data/ext/extzstd.h +74 -31
- data/ext/extzstd_stream.c +401 -142
- data/ext/zstd_common.c +5 -0
- data/ext/zstd_compress.c +8 -0
- data/ext/zstd_decompress.c +1 -0
- data/ext/zstd_dictbuilder.c +2 -0
- data/lib/extzstd/version.rb +1 -1
- data/lib/extzstd.rb +48 -1
- data/test/test_basic.rb +9 -1
- metadata +17 -7
- data/HISTORY.ja +0 -10
- data/contrib/zstd/LICENSE-examples +0 -11
- data/contrib/zstd/PATENTS +0 -33
@@ -1,10 +1,11 @@
|
|
1
|
-
|
1
|
+
/*
|
2
2
|
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
|
-
* This source code is licensed under the BSD-style license found in the
|
6
|
-
* LICENSE file in the root directory of this source tree
|
7
|
-
*
|
5
|
+
* This source code is licensed under both the BSD-style license (found in the
|
6
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
7
|
+
* in the COPYING file in the root directory of this source tree).
|
8
|
+
* You may select, at your option, one of the above-listed licenses.
|
8
9
|
*/
|
9
10
|
|
10
11
|
#ifndef ZSTDMT_COMPRESS_H
|
@@ -15,31 +16,41 @@
|
|
15
16
|
#endif
|
16
17
|
|
17
18
|
|
18
|
-
/* Note :
|
19
|
-
*
|
19
|
+
/* Note : This is an internal API.
|
20
|
+
* Some methods are still exposed (ZSTDLIB_API),
|
21
|
+
* because it used to be the only way to invoke MT compression.
|
22
|
+
* Now, it's recommended to use ZSTD_compress_generic() instead.
|
23
|
+
* These methods will stop being exposed in a future version */
|
20
24
|
|
21
25
|
/* === Dependencies === */
|
22
|
-
#include <stddef.h>
|
26
|
+
#include <stddef.h> /* size_t */
|
23
27
|
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
|
24
|
-
#include "zstd.h"
|
28
|
+
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
|
25
29
|
|
26
30
|
|
27
|
-
/* ===
|
28
|
-
|
31
|
+
/* === Memory management === */
|
29
32
|
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
|
30
33
|
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads);
|
31
|
-
ZSTDLIB_API
|
34
|
+
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads,
|
35
|
+
ZSTD_customMem cMem);
|
36
|
+
ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
|
37
|
+
|
38
|
+
ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
|
39
|
+
|
40
|
+
|
41
|
+
/* === Simple buffer-to-butter one-pass function === */
|
42
|
+
|
43
|
+
ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
44
|
+
void* dst, size_t dstCapacity,
|
45
|
+
const void* src, size_t srcSize,
|
46
|
+
int compressionLevel);
|
32
47
|
|
33
|
-
ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* cctx,
|
34
|
-
void* dst, size_t dstCapacity,
|
35
|
-
const void* src, size_t srcSize,
|
36
|
-
int compressionLevel);
|
37
48
|
|
38
49
|
|
39
50
|
/* === Streaming functions === */
|
40
51
|
|
41
52
|
ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
|
42
|
-
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);
|
53
|
+
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it may change in the future, to mean "empty" */
|
43
54
|
|
44
55
|
ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
45
56
|
|
@@ -49,26 +60,77 @@ ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);
|
|
49
60
|
|
50
61
|
/* === Advanced functions and parameters === */
|
51
62
|
|
52
|
-
#ifndef
|
53
|
-
# define
|
63
|
+
#ifndef ZSTDMT_JOBSIZE_MIN
|
64
|
+
# define ZSTDMT_JOBSIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */
|
54
65
|
#endif
|
55
66
|
|
56
|
-
ZSTDLIB_API size_t
|
57
|
-
|
67
|
+
ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
68
|
+
void* dst, size_t dstCapacity,
|
69
|
+
const void* src, size_t srcSize,
|
70
|
+
const ZSTD_CDict* cdict,
|
71
|
+
ZSTD_parameters const params,
|
72
|
+
unsigned overlapLog);
|
73
|
+
|
74
|
+
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
75
|
+
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
|
76
|
+
ZSTD_parameters params,
|
77
|
+
unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
|
78
|
+
|
79
|
+
ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
80
|
+
const ZSTD_CDict* cdict,
|
81
|
+
ZSTD_frameParameters fparams,
|
82
|
+
unsigned long long pledgedSrcSize); /* note : zero means empty */
|
58
83
|
|
59
|
-
/*
|
84
|
+
/* ZSTDMT_parameter :
|
60
85
|
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
|
61
86
|
typedef enum {
|
62
|
-
|
63
|
-
ZSTDMT_p_overlapSectionLog
|
64
|
-
}
|
87
|
+
ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
|
88
|
+
ZSTDMT_p_overlapSectionLog /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */
|
89
|
+
} ZSTDMT_parameter;
|
65
90
|
|
66
91
|
/* ZSTDMT_setMTCtxParameter() :
|
67
92
|
* allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
|
68
|
-
* The function must be called typically after ZSTD_createCCtx()
|
93
|
+
* The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
|
69
94
|
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
|
70
95
|
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
|
71
|
-
ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx,
|
96
|
+
ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value);
|
97
|
+
|
98
|
+
|
99
|
+
/*! ZSTDMT_compressStream_generic() :
|
100
|
+
* Combines ZSTDMT_compressStream() with ZSTDMT_flushStream() or ZSTDMT_endStream()
|
101
|
+
* depending on flush directive.
|
102
|
+
* @return : minimum amount of data still to be flushed
|
103
|
+
* 0 if fully flushed
|
104
|
+
* or an error code */
|
105
|
+
ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
106
|
+
ZSTD_outBuffer* output,
|
107
|
+
ZSTD_inBuffer* input,
|
108
|
+
ZSTD_EndDirective endOp);
|
109
|
+
|
110
|
+
|
111
|
+
/* === Private definitions; never ever use directly === */
|
112
|
+
|
113
|
+
size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
|
114
|
+
|
115
|
+
/* ZSTDMT_CCtxParam_setNbThreads()
|
116
|
+
* Set nbThreads, and clamp it correctly,
|
117
|
+
* also reset jobSize and overlapLog */
|
118
|
+
size_t ZSTDMT_CCtxParam_setNbThreads(ZSTD_CCtx_params* params, unsigned nbThreads);
|
119
|
+
|
120
|
+
/* ZSTDMT_getNbThreads():
|
121
|
+
* @return nb threads currently active in mtctx.
|
122
|
+
* mtctx must be valid */
|
123
|
+
size_t ZSTDMT_getNbThreads(const ZSTDMT_CCtx* mtctx);
|
124
|
+
|
125
|
+
/*! ZSTDMT_initCStream_internal() :
|
126
|
+
* Private use only. Init streaming operation.
|
127
|
+
* expects params to be valid.
|
128
|
+
* must receive dict, or cdict, or none, but not both.
|
129
|
+
* @return : 0, or an error code */
|
130
|
+
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
|
131
|
+
const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode,
|
132
|
+
const ZSTD_CDict* cdict,
|
133
|
+
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
72
134
|
|
73
135
|
|
74
136
|
#if defined (__cplusplus)
|
@@ -32,41 +32,31 @@
|
|
32
32
|
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
33
33
|
****************************************************************** */
|
34
34
|
|
35
|
-
/* **************************************************************
|
36
|
-
* Compiler specifics
|
37
|
-
****************************************************************/
|
38
|
-
#ifdef _MSC_VER /* Visual Studio */
|
39
|
-
# define FORCE_INLINE static __forceinline
|
40
|
-
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
41
|
-
#else
|
42
|
-
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
43
|
-
# ifdef __GNUC__
|
44
|
-
# define FORCE_INLINE static inline __attribute__((always_inline))
|
45
|
-
# else
|
46
|
-
# define FORCE_INLINE static inline
|
47
|
-
# endif
|
48
|
-
# else
|
49
|
-
# define FORCE_INLINE static
|
50
|
-
# endif /* __STDC_VERSION__ */
|
51
|
-
#endif
|
52
|
-
|
53
|
-
|
54
35
|
/* **************************************************************
|
55
36
|
* Dependencies
|
56
37
|
****************************************************************/
|
57
38
|
#include <string.h> /* memcpy, memset */
|
58
39
|
#include "bitstream.h" /* BIT_* */
|
40
|
+
#include "compiler.h"
|
59
41
|
#include "fse.h" /* header compression */
|
60
42
|
#define HUF_STATIC_LINKING_ONLY
|
61
43
|
#include "huf.h"
|
44
|
+
#include "error_private.h"
|
62
45
|
|
63
46
|
|
64
47
|
/* **************************************************************
|
65
48
|
* Error Management
|
66
49
|
****************************************************************/
|
50
|
+
#define HUF_isError ERR_isError
|
67
51
|
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
68
52
|
|
69
53
|
|
54
|
+
/* **************************************************************
|
55
|
+
* Byte alignment for workSpace management
|
56
|
+
****************************************************************/
|
57
|
+
#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
|
58
|
+
#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
59
|
+
|
70
60
|
/*-***************************/
|
71
61
|
/* generic DTableDesc */
|
72
62
|
/*-***************************/
|
@@ -87,16 +77,28 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
|
|
87
77
|
|
88
78
|
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
|
89
79
|
|
90
|
-
size_t
|
80
|
+
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
|
91
81
|
{
|
92
|
-
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
|
93
|
-
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
|
94
82
|
U32 tableLog = 0;
|
95
83
|
U32 nbSymbols = 0;
|
96
84
|
size_t iSize;
|
97
85
|
void* const dtPtr = DTable + 1;
|
98
86
|
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
|
99
87
|
|
88
|
+
U32* rankVal;
|
89
|
+
BYTE* huffWeight;
|
90
|
+
size_t spaceUsed32 = 0;
|
91
|
+
|
92
|
+
rankVal = (U32 *)workSpace + spaceUsed32;
|
93
|
+
spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
|
94
|
+
huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
|
95
|
+
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
|
96
|
+
|
97
|
+
if ((spaceUsed32 << 2) > wkspSize)
|
98
|
+
return ERROR(tableLog_tooLarge);
|
99
|
+
workSpace = (U32 *)workSpace + spaceUsed32;
|
100
|
+
wkspSize -= (spaceUsed32 << 2);
|
101
|
+
|
100
102
|
HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
|
101
103
|
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
|
102
104
|
|
@@ -135,6 +137,13 @@ size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize)
|
|
135
137
|
return iSize;
|
136
138
|
}
|
137
139
|
|
140
|
+
size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
|
141
|
+
{
|
142
|
+
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
143
|
+
return HUF_readDTableX2_wksp(DTable, src, srcSize,
|
144
|
+
workSpace, sizeof(workSpace));
|
145
|
+
}
|
146
|
+
|
138
147
|
|
139
148
|
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
|
140
149
|
{
|
@@ -155,7 +164,7 @@ static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, con
|
|
155
164
|
if (MEM_64bits()) \
|
156
165
|
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
|
157
166
|
|
158
|
-
|
167
|
+
HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
|
159
168
|
{
|
160
169
|
BYTE* const pStart = p;
|
161
170
|
|
@@ -212,11 +221,13 @@ size_t HUF_decompress1X2_usingDTable(
|
|
212
221
|
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
213
222
|
}
|
214
223
|
|
215
|
-
size_t
|
224
|
+
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
225
|
+
const void* cSrc, size_t cSrcSize,
|
226
|
+
void* workSpace, size_t wkspSize)
|
216
227
|
{
|
217
228
|
const BYTE* ip = (const BYTE*) cSrc;
|
218
229
|
|
219
|
-
size_t const hSize =
|
230
|
+
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
|
220
231
|
if (HUF_isError(hSize)) return hSize;
|
221
232
|
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
222
233
|
ip += hSize; cSrcSize -= hSize;
|
@@ -224,6 +235,15 @@ size_t HUF_decompress1X2_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, cons
|
|
224
235
|
return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
|
225
236
|
}
|
226
237
|
|
238
|
+
|
239
|
+
size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
240
|
+
const void* cSrc, size_t cSrcSize)
|
241
|
+
{
|
242
|
+
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
243
|
+
return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
|
244
|
+
workSpace, sizeof(workSpace));
|
245
|
+
}
|
246
|
+
|
227
247
|
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
228
248
|
{
|
229
249
|
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
|
@@ -335,11 +355,14 @@ size_t HUF_decompress4X2_usingDTable(
|
|
335
355
|
}
|
336
356
|
|
337
357
|
|
338
|
-
size_t
|
358
|
+
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
359
|
+
const void* cSrc, size_t cSrcSize,
|
360
|
+
void* workSpace, size_t wkspSize)
|
339
361
|
{
|
340
362
|
const BYTE* ip = (const BYTE*) cSrc;
|
341
363
|
|
342
|
-
size_t const hSize =
|
364
|
+
size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize,
|
365
|
+
workSpace, wkspSize);
|
343
366
|
if (HUF_isError(hSize)) return hSize;
|
344
367
|
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
345
368
|
ip += hSize; cSrcSize -= hSize;
|
@@ -347,6 +370,13 @@ size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, cons
|
|
347
370
|
return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
|
348
371
|
}
|
349
372
|
|
373
|
+
|
374
|
+
size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
375
|
+
{
|
376
|
+
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
377
|
+
return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
378
|
+
workSpace, sizeof(workSpace));
|
379
|
+
}
|
350
380
|
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
351
381
|
{
|
352
382
|
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
|
@@ -403,7 +433,8 @@ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 co
|
|
403
433
|
} }
|
404
434
|
}
|
405
435
|
|
406
|
-
typedef U32
|
436
|
+
typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
|
437
|
+
typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
|
407
438
|
|
408
439
|
static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
|
409
440
|
const sortedSymbol_t* sortedList, const U32 sortedListSize,
|
@@ -447,20 +478,43 @@ static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
|
|
447
478
|
}
|
448
479
|
}
|
449
480
|
|
450
|
-
size_t
|
481
|
+
size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src,
|
482
|
+
size_t srcSize, void* workSpace,
|
483
|
+
size_t wkspSize)
|
451
484
|
{
|
452
|
-
BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
|
453
|
-
sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
|
454
|
-
U32 rankStats[HUF_TABLELOG_MAX + 1] = { 0 };
|
455
|
-
U32 rankStart0[HUF_TABLELOG_MAX + 2] = { 0 };
|
456
|
-
U32* const rankStart = rankStart0+1;
|
457
|
-
rankVal_t rankVal;
|
458
485
|
U32 tableLog, maxW, sizeOfSort, nbSymbols;
|
459
486
|
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
460
487
|
U32 const maxTableLog = dtd.maxTableLog;
|
461
488
|
size_t iSize;
|
462
489
|
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
|
463
490
|
HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
|
491
|
+
U32 *rankStart;
|
492
|
+
|
493
|
+
rankValCol_t* rankVal;
|
494
|
+
U32* rankStats;
|
495
|
+
U32* rankStart0;
|
496
|
+
sortedSymbol_t* sortedSymbol;
|
497
|
+
BYTE* weightList;
|
498
|
+
size_t spaceUsed32 = 0;
|
499
|
+
|
500
|
+
rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
|
501
|
+
spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
|
502
|
+
rankStats = (U32 *)workSpace + spaceUsed32;
|
503
|
+
spaceUsed32 += HUF_TABLELOG_MAX + 1;
|
504
|
+
rankStart0 = (U32 *)workSpace + spaceUsed32;
|
505
|
+
spaceUsed32 += HUF_TABLELOG_MAX + 2;
|
506
|
+
sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
|
507
|
+
spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
|
508
|
+
weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
|
509
|
+
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
|
510
|
+
|
511
|
+
if ((spaceUsed32 << 2) > wkspSize)
|
512
|
+
return ERROR(tableLog_tooLarge);
|
513
|
+
workSpace = (U32 *)workSpace + spaceUsed32;
|
514
|
+
wkspSize -= (spaceUsed32 << 2);
|
515
|
+
|
516
|
+
rankStart = rankStart0 + 1;
|
517
|
+
memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
|
464
518
|
|
465
519
|
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
|
466
520
|
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
@@ -527,6 +581,12 @@ size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize)
|
|
527
581
|
return iSize;
|
528
582
|
}
|
529
583
|
|
584
|
+
size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize)
|
585
|
+
{
|
586
|
+
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
587
|
+
return HUF_readDTableX4_wksp(DTable, src, srcSize,
|
588
|
+
workSpace, sizeof(workSpace));
|
589
|
+
}
|
530
590
|
|
531
591
|
static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
|
532
592
|
{
|
@@ -545,7 +605,8 @@ static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DE
|
|
545
605
|
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
|
546
606
|
BIT_skipBits(DStream, dt[val].nbBits);
|
547
607
|
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
|
548
|
-
|
608
|
+
/* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
|
609
|
+
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
|
549
610
|
} }
|
550
611
|
return 1;
|
551
612
|
}
|
@@ -562,7 +623,7 @@ static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DE
|
|
562
623
|
if (MEM_64bits()) \
|
563
624
|
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
564
625
|
|
565
|
-
|
626
|
+
HINT_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
|
566
627
|
{
|
567
628
|
BYTE* const pStart = p;
|
568
629
|
|
@@ -626,11 +687,14 @@ size_t HUF_decompress1X4_usingDTable(
|
|
626
687
|
return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
627
688
|
}
|
628
689
|
|
629
|
-
size_t
|
690
|
+
size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
691
|
+
const void* cSrc, size_t cSrcSize,
|
692
|
+
void* workSpace, size_t wkspSize)
|
630
693
|
{
|
631
694
|
const BYTE* ip = (const BYTE*) cSrc;
|
632
695
|
|
633
|
-
size_t const hSize =
|
696
|
+
size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize,
|
697
|
+
workSpace, wkspSize);
|
634
698
|
if (HUF_isError(hSize)) return hSize;
|
635
699
|
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
636
700
|
ip += hSize; cSrcSize -= hSize;
|
@@ -638,6 +702,15 @@ size_t HUF_decompress1X4_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, cons
|
|
638
702
|
return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
|
639
703
|
}
|
640
704
|
|
705
|
+
|
706
|
+
size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
707
|
+
const void* cSrc, size_t cSrcSize)
|
708
|
+
{
|
709
|
+
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
710
|
+
return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
|
711
|
+
workSpace, sizeof(workSpace));
|
712
|
+
}
|
713
|
+
|
641
714
|
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
642
715
|
{
|
643
716
|
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
|
@@ -748,11 +821,14 @@ size_t HUF_decompress4X4_usingDTable(
|
|
748
821
|
}
|
749
822
|
|
750
823
|
|
751
|
-
size_t
|
824
|
+
size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
825
|
+
const void* cSrc, size_t cSrcSize,
|
826
|
+
void* workSpace, size_t wkspSize)
|
752
827
|
{
|
753
828
|
const BYTE* ip = (const BYTE*) cSrc;
|
754
829
|
|
755
|
-
size_t hSize =
|
830
|
+
size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize,
|
831
|
+
workSpace, wkspSize);
|
756
832
|
if (HUF_isError(hSize)) return hSize;
|
757
833
|
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
758
834
|
ip += hSize; cSrcSize -= hSize;
|
@@ -760,6 +836,15 @@ size_t HUF_decompress4X4_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, cons
|
|
760
836
|
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
|
761
837
|
}
|
762
838
|
|
839
|
+
|
840
|
+
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
|
841
|
+
const void* cSrc, size_t cSrcSize)
|
842
|
+
{
|
843
|
+
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
844
|
+
return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
845
|
+
workSpace, sizeof(workSpace));
|
846
|
+
}
|
847
|
+
|
763
848
|
size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
764
849
|
{
|
765
850
|
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
|
@@ -816,11 +901,11 @@ static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, qu
|
|
816
901
|
* Tells which decoder is likely to decode faster,
|
817
902
|
* based on a set of pre-determined metrics.
|
818
903
|
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
819
|
-
* Assumption : 0 < cSrcSize
|
904
|
+
* Assumption : 0 < cSrcSize, dstSize <= 128 KB */
|
820
905
|
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
|
821
906
|
{
|
822
907
|
/* decoder timing evaluation */
|
823
|
-
U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16
|
908
|
+
U32 const Q = cSrcSize >= dstSize ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
|
824
909
|
U32 const D256 = (U32)(dstSize >> 8);
|
825
910
|
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
|
826
911
|
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
|
@@ -861,19 +946,32 @@ size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const
|
|
861
946
|
}
|
862
947
|
}
|
863
948
|
|
864
|
-
size_t HUF_decompress4X_hufOnly
|
949
|
+
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
950
|
+
{
|
951
|
+
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
952
|
+
return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
953
|
+
workSpace, sizeof(workSpace));
|
954
|
+
}
|
955
|
+
|
956
|
+
|
957
|
+
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
|
958
|
+
size_t dstSize, const void* cSrc,
|
959
|
+
size_t cSrcSize, void* workSpace,
|
960
|
+
size_t wkspSize)
|
865
961
|
{
|
866
962
|
/* validation checks */
|
867
963
|
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
868
|
-
if (
|
964
|
+
if (cSrcSize == 0) return ERROR(corruption_detected);
|
869
965
|
|
870
966
|
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
871
|
-
return algoNb ?
|
872
|
-
|
967
|
+
return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
|
968
|
+
HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
|
873
969
|
}
|
874
970
|
}
|
875
971
|
|
876
|
-
size_t
|
972
|
+
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
973
|
+
const void* cSrc, size_t cSrcSize,
|
974
|
+
void* workSpace, size_t wkspSize)
|
877
975
|
{
|
878
976
|
/* validation checks */
|
879
977
|
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
@@ -882,7 +980,17 @@ size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const
|
|
882
980
|
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
|
883
981
|
|
884
982
|
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
885
|
-
return algoNb ?
|
886
|
-
|
983
|
+
return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
984
|
+
cSrcSize, workSpace, wkspSize):
|
985
|
+
HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
986
|
+
cSrcSize, workSpace, wkspSize);
|
887
987
|
}
|
888
988
|
}
|
989
|
+
|
990
|
+
size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
|
991
|
+
const void* cSrc, size_t cSrcSize)
|
992
|
+
{
|
993
|
+
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
994
|
+
return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
995
|
+
workSpace, sizeof(workSpace));
|
996
|
+
}
|