extzstd 0.3.1 → 0.3.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +28 -14
- data/contrib/zstd/CHANGELOG +114 -56
- data/contrib/zstd/CONTRIBUTING.md +14 -0
- data/contrib/zstd/Makefile +37 -31
- data/contrib/zstd/README.md +6 -0
- data/contrib/zstd/appveyor.yml +4 -1
- data/contrib/zstd/lib/Makefile +231 -134
- data/contrib/zstd/lib/README.md +28 -0
- data/contrib/zstd/lib/common/bitstream.h +24 -15
- data/contrib/zstd/lib/common/compiler.h +116 -3
- data/contrib/zstd/lib/common/cpu.h +0 -2
- data/contrib/zstd/lib/common/debug.h +11 -18
- data/contrib/zstd/lib/common/entropy_common.c +188 -42
- data/contrib/zstd/lib/common/error_private.c +1 -0
- data/contrib/zstd/lib/common/error_private.h +1 -1
- data/contrib/zstd/lib/common/fse.h +38 -11
- data/contrib/zstd/lib/common/fse_decompress.c +123 -16
- data/contrib/zstd/lib/common/huf.h +26 -5
- data/contrib/zstd/lib/common/mem.h +66 -93
- data/contrib/zstd/lib/common/pool.c +22 -16
- data/contrib/zstd/lib/common/pool.h +1 -1
- data/contrib/zstd/lib/common/threading.c +6 -5
- data/contrib/zstd/lib/common/xxhash.c +18 -56
- data/contrib/zstd/lib/common/xxhash.h +1 -1
- data/contrib/zstd/lib/common/zstd_common.c +9 -9
- data/contrib/zstd/lib/common/zstd_deps.h +111 -0
- data/contrib/zstd/lib/common/zstd_errors.h +1 -0
- data/contrib/zstd/lib/common/zstd_internal.h +89 -58
- data/contrib/zstd/lib/compress/fse_compress.c +30 -23
- data/contrib/zstd/lib/compress/hist.c +26 -28
- data/contrib/zstd/lib/compress/hist.h +1 -1
- data/contrib/zstd/lib/compress/huf_compress.c +210 -95
- data/contrib/zstd/lib/compress/zstd_compress.c +1339 -409
- data/contrib/zstd/lib/compress/zstd_compress_internal.h +119 -41
- data/contrib/zstd/lib/compress/zstd_compress_literals.c +4 -4
- data/contrib/zstd/lib/compress/zstd_compress_sequences.c +17 -3
- data/contrib/zstd/lib/compress/zstd_compress_superblock.c +23 -19
- data/contrib/zstd/lib/compress/zstd_cwksp.h +60 -24
- data/contrib/zstd/lib/compress/zstd_double_fast.c +22 -22
- data/contrib/zstd/lib/compress/zstd_fast.c +19 -19
- data/contrib/zstd/lib/compress/zstd_lazy.c +351 -77
- data/contrib/zstd/lib/compress/zstd_lazy.h +20 -0
- data/contrib/zstd/lib/compress/zstd_ldm.c +59 -18
- data/contrib/zstd/lib/compress/zstd_ldm.h +6 -0
- data/contrib/zstd/lib/compress/zstd_opt.c +190 -45
- data/contrib/zstd/lib/compress/zstdmt_compress.c +74 -406
- data/contrib/zstd/lib/compress/zstdmt_compress.h +26 -108
- data/contrib/zstd/lib/decompress/huf_decompress.c +302 -200
- data/contrib/zstd/lib/decompress/zstd_ddict.c +8 -8
- data/contrib/zstd/lib/decompress/zstd_ddict.h +1 -1
- data/contrib/zstd/lib/decompress/zstd_decompress.c +125 -80
- data/contrib/zstd/lib/decompress/zstd_decompress_block.c +145 -37
- data/contrib/zstd/lib/decompress/zstd_decompress_block.h +5 -2
- data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +11 -10
- data/contrib/zstd/lib/dictBuilder/cover.c +29 -20
- data/contrib/zstd/lib/dictBuilder/cover.h +1 -1
- data/contrib/zstd/lib/dictBuilder/fastcover.c +20 -19
- data/contrib/zstd/lib/dictBuilder/zdict.c +15 -16
- data/contrib/zstd/lib/dictBuilder/zdict.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v01.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v02.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v03.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v04.c +6 -2
- data/contrib/zstd/lib/legacy/zstd_v05.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v06.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v07.c +5 -1
- data/contrib/zstd/lib/libzstd.pc.in +3 -3
- data/contrib/zstd/lib/zstd.h +348 -47
- data/ext/extzstd.c +6 -0
- data/ext/extzstd.h +6 -0
- data/gemstub.rb +3 -21
- data/lib/extzstd.rb +0 -2
- data/lib/extzstd/version.rb +6 -1
- data/test/test_basic.rb +0 -5
- metadata +5 -4
@@ -77,6 +77,7 @@ typedef enum {
|
|
77
77
|
ZSTD_error_frameIndex_tooLarge = 100,
|
78
78
|
ZSTD_error_seekableIO = 102,
|
79
79
|
ZSTD_error_dstBuffer_wrong = 104,
|
80
|
+
ZSTD_error_srcBuffer_wrong = 105,
|
80
81
|
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
|
81
82
|
} ZSTD_ErrorCode;
|
82
83
|
|
@@ -19,7 +19,7 @@
|
|
19
19
|
/*-*************************************
|
20
20
|
* Dependencies
|
21
21
|
***************************************/
|
22
|
-
#
|
22
|
+
#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
|
23
23
|
#include <arm_neon.h>
|
24
24
|
#endif
|
25
25
|
#include "compiler.h"
|
@@ -139,7 +139,7 @@ void _force_has_format_string(const char *format, ...) {
|
|
139
139
|
|
140
140
|
#define ZSTD_REP_NUM 3 /* number of repcodes */
|
141
141
|
#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
|
142
|
-
static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
|
142
|
+
static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
|
143
143
|
|
144
144
|
#define KB *(1 <<10)
|
145
145
|
#define MB *(1 <<20)
|
@@ -153,13 +153,13 @@ static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
|
|
153
153
|
#define BIT0 1
|
154
154
|
|
155
155
|
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
|
156
|
-
static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
|
157
|
-
static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
|
156
|
+
static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
|
157
|
+
static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
|
158
158
|
|
159
159
|
#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
|
160
160
|
|
161
161
|
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
|
162
|
-
static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
|
162
|
+
static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
|
163
163
|
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
|
164
164
|
|
165
165
|
#define ZSTD_FRAMECHECKSUMSIZE 4
|
@@ -186,61 +186,75 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
|
|
186
186
|
#define OffFSELog 8
|
187
187
|
#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
|
188
188
|
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
189
|
+
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
|
190
|
+
/* Each table cannot take more than #symbols * FSELog bits */
|
191
|
+
#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
|
192
|
+
|
193
|
+
static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
|
194
|
+
0, 0, 0, 0, 0, 0, 0, 0,
|
195
|
+
0, 0, 0, 0, 0, 0, 0, 0,
|
196
|
+
1, 1, 1, 1, 2, 2, 3, 3,
|
197
|
+
4, 6, 7, 8, 9,10,11,12,
|
198
|
+
13,14,15,16
|
199
|
+
};
|
200
|
+
static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
|
201
|
+
4, 3, 2, 2, 2, 2, 2, 2,
|
202
|
+
2, 2, 2, 2, 2, 1, 1, 1,
|
203
|
+
2, 2, 2, 2, 2, 2, 2, 2,
|
204
|
+
2, 3, 2, 1, 1, 1, 1, 1,
|
205
|
+
-1,-1,-1,-1
|
206
|
+
};
|
199
207
|
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
|
200
|
-
static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
|
201
|
-
|
202
|
-
static const U32 ML_bits[MaxML+1] = {
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
208
|
+
static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
|
209
|
+
|
210
|
+
static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
|
211
|
+
0, 0, 0, 0, 0, 0, 0, 0,
|
212
|
+
0, 0, 0, 0, 0, 0, 0, 0,
|
213
|
+
0, 0, 0, 0, 0, 0, 0, 0,
|
214
|
+
0, 0, 0, 0, 0, 0, 0, 0,
|
215
|
+
1, 1, 1, 1, 2, 2, 3, 3,
|
216
|
+
4, 4, 5, 7, 8, 9,10,11,
|
217
|
+
12,13,14,15,16
|
218
|
+
};
|
219
|
+
static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
|
220
|
+
1, 4, 3, 2, 2, 2, 2, 2,
|
221
|
+
2, 1, 1, 1, 1, 1, 1, 1,
|
222
|
+
1, 1, 1, 1, 1, 1, 1, 1,
|
223
|
+
1, 1, 1, 1, 1, 1, 1, 1,
|
224
|
+
1, 1, 1, 1, 1, 1, 1, 1,
|
225
|
+
1, 1, 1, 1, 1, 1,-1,-1,
|
226
|
+
-1,-1,-1,-1,-1
|
227
|
+
};
|
216
228
|
#define ML_DEFAULTNORMLOG 6 /* for static allocation */
|
217
|
-
static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
|
218
|
-
|
219
|
-
static const S16 OF_defaultNorm[DefaultMaxOff+1] = {
|
220
|
-
|
221
|
-
|
222
|
-
|
229
|
+
static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
|
230
|
+
|
231
|
+
static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
|
232
|
+
1, 1, 1, 1, 1, 1, 2, 2,
|
233
|
+
2, 1, 1, 1, 1, 1, 1, 1,
|
234
|
+
1, 1, 1, 1, 1, 1, 1, 1,
|
235
|
+
-1,-1,-1,-1,-1
|
236
|
+
};
|
223
237
|
#define OF_DEFAULTNORMLOG 5 /* for static allocation */
|
224
|
-
static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
|
238
|
+
static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
|
225
239
|
|
226
240
|
|
227
241
|
/*-*******************************************
|
228
242
|
* Shared functions to include for inlining
|
229
243
|
*********************************************/
|
230
244
|
static void ZSTD_copy8(void* dst, const void* src) {
|
231
|
-
#
|
245
|
+
#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
|
232
246
|
vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
|
233
247
|
#else
|
234
|
-
|
248
|
+
ZSTD_memcpy(dst, src, 8);
|
235
249
|
#endif
|
236
250
|
}
|
237
251
|
|
238
252
|
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
239
253
|
static void ZSTD_copy16(void* dst, const void* src) {
|
240
|
-
#
|
254
|
+
#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
|
241
255
|
vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
|
242
256
|
#else
|
243
|
-
|
257
|
+
ZSTD_memcpy(dst, src, 16);
|
244
258
|
#endif
|
245
259
|
}
|
246
260
|
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
|
@@ -255,13 +269,13 @@ typedef enum {
|
|
255
269
|
} ZSTD_overlap_e;
|
256
270
|
|
257
271
|
/*! ZSTD_wildcopy() :
|
258
|
-
* Custom version of
|
272
|
+
* Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
|
259
273
|
* @param ovtype controls the overlap detection
|
260
274
|
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
|
261
275
|
* - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
|
262
276
|
* The src buffer must be before the dst buffer.
|
263
277
|
*/
|
264
|
-
MEM_STATIC FORCE_INLINE_ATTR
|
278
|
+
MEM_STATIC FORCE_INLINE_ATTR
|
265
279
|
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
|
266
280
|
{
|
267
281
|
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
|
@@ -284,14 +298,16 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
|
|
284
298
|
* one COPY16() in the first call. Then, do two calls per loop since
|
285
299
|
* at that point it is more likely to have a high trip count.
|
286
300
|
*/
|
287
|
-
#
|
301
|
+
#ifdef __aarch64__
|
288
302
|
do {
|
289
303
|
COPY16(op, ip);
|
290
304
|
}
|
291
305
|
while (op < oend);
|
292
306
|
#else
|
293
|
-
|
294
|
-
if (
|
307
|
+
ZSTD_copy16(op, ip);
|
308
|
+
if (16 >= length) return;
|
309
|
+
op += 16;
|
310
|
+
ip += 16;
|
295
311
|
do {
|
296
312
|
COPY16(op, ip);
|
297
313
|
COPY16(op, ip);
|
@@ -305,7 +321,7 @@ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src,
|
|
305
321
|
{
|
306
322
|
size_t const length = MIN(dstCapacity, srcSize);
|
307
323
|
if (length > 0) {
|
308
|
-
|
324
|
+
ZSTD_memcpy(dst, src, length);
|
309
325
|
}
|
310
326
|
return length;
|
311
327
|
}
|
@@ -320,28 +336,39 @@ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src,
|
|
320
336
|
* In which case, resize it down to free some memory */
|
321
337
|
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
|
322
338
|
|
339
|
+
/* Controls whether the input/output buffer is buffered or stable. */
|
340
|
+
typedef enum {
|
341
|
+
ZSTD_bm_buffered = 0, /* Buffer the input/output */
|
342
|
+
ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
|
343
|
+
} ZSTD_bufferMode_e;
|
344
|
+
|
323
345
|
|
324
346
|
/*-*******************************************
|
325
347
|
* Private declarations
|
326
348
|
*********************************************/
|
327
349
|
typedef struct seqDef_s {
|
328
|
-
U32 offset;
|
350
|
+
U32 offset; /* Offset code of the sequence */
|
329
351
|
U16 litLength;
|
330
352
|
U16 matchLength;
|
331
353
|
} seqDef;
|
332
354
|
|
333
355
|
typedef struct {
|
334
356
|
seqDef* sequencesStart;
|
335
|
-
seqDef* sequences;
|
357
|
+
seqDef* sequences; /* ptr to end of sequences */
|
336
358
|
BYTE* litStart;
|
337
|
-
BYTE* lit;
|
359
|
+
BYTE* lit; /* ptr to end of literals */
|
338
360
|
BYTE* llCode;
|
339
361
|
BYTE* mlCode;
|
340
362
|
BYTE* ofCode;
|
341
363
|
size_t maxNbSeq;
|
342
364
|
size_t maxNbLit;
|
343
|
-
|
344
|
-
|
365
|
+
|
366
|
+
/* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength
|
367
|
+
* in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
|
368
|
+
* the existing value of the litLength or matchLength by 0x10000.
|
369
|
+
*/
|
370
|
+
U32 longLengthID; /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
|
371
|
+
U32 longLengthPos; /* Index of the sequence to apply long length modification to */
|
345
372
|
} seqStore_t;
|
346
373
|
|
347
374
|
typedef struct {
|
@@ -384,9 +411,9 @@ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBu
|
|
384
411
|
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
|
385
412
|
|
386
413
|
/* custom memory allocation functions */
|
387
|
-
void*
|
388
|
-
void*
|
389
|
-
void
|
414
|
+
void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
|
415
|
+
void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
|
416
|
+
void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
|
390
417
|
|
391
418
|
|
392
419
|
MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
|
@@ -394,8 +421,12 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
|
|
394
421
|
assert(val != 0);
|
395
422
|
{
|
396
423
|
# if defined(_MSC_VER) /* Visual */
|
397
|
-
|
398
|
-
|
424
|
+
# if STATIC_BMI2 == 1
|
425
|
+
return _lzcnt_u32(val)^31;
|
426
|
+
# else
|
427
|
+
unsigned long r=0;
|
428
|
+
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
|
429
|
+
# endif
|
399
430
|
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
|
400
431
|
return __builtin_clz (val) ^ 31;
|
401
432
|
# elif defined(__ICCARM__) /* IAR Intrinsic */
|
@@ -15,8 +15,6 @@
|
|
15
15
|
/* **************************************************************
|
16
16
|
* Includes
|
17
17
|
****************************************************************/
|
18
|
-
#include <stdlib.h> /* malloc, free, qsort */
|
19
|
-
#include <string.h> /* memcpy, memset */
|
20
18
|
#include "../common/compiler.h"
|
21
19
|
#include "../common/mem.h" /* U32, U16, etc. */
|
22
20
|
#include "../common/debug.h" /* assert, DEBUGLOG */
|
@@ -25,6 +23,9 @@
|
|
25
23
|
#define FSE_STATIC_LINKING_ONLY
|
26
24
|
#include "../common/fse.h"
|
27
25
|
#include "../common/error_private.h"
|
26
|
+
#define ZSTD_DEPS_NEED_MALLOC
|
27
|
+
#define ZSTD_DEPS_NEED_MATH64
|
28
|
+
#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
|
28
29
|
|
29
30
|
|
30
31
|
/* **************************************************************
|
@@ -74,13 +75,15 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
|
74
75
|
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
|
75
76
|
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
76
77
|
U32 const step = FSE_TABLESTEP(tableSize);
|
77
|
-
U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
|
78
78
|
|
79
|
-
|
79
|
+
U32* cumul = (U32*)workSpace;
|
80
|
+
FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
|
81
|
+
|
80
82
|
U32 highThreshold = tableSize-1;
|
81
83
|
|
84
|
+
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
|
85
|
+
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
|
82
86
|
/* CTable header */
|
83
|
-
if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
|
84
87
|
tableU16[-2] = (U16) tableLog;
|
85
88
|
tableU16[-1] = (U16) maxSymbolValue;
|
86
89
|
assert(tableLog < 16); /* required for threshold strategy to work */
|
@@ -89,7 +92,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
|
89
92
|
* http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
|
90
93
|
|
91
94
|
#ifdef __clang_analyzer__
|
92
|
-
|
95
|
+
ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
|
93
96
|
#endif
|
94
97
|
|
95
98
|
/* symbol start positions */
|
@@ -168,12 +171,13 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
|
168
171
|
return 0;
|
169
172
|
}
|
170
173
|
|
171
|
-
|
174
|
+
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
|
172
175
|
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
173
176
|
{
|
174
177
|
FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
|
175
178
|
return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
|
176
179
|
}
|
180
|
+
#endif
|
177
181
|
|
178
182
|
|
179
183
|
|
@@ -307,10 +311,10 @@ FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
|
|
307
311
|
size_t size;
|
308
312
|
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
|
309
313
|
size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
|
310
|
-
return (FSE_CTable*)
|
314
|
+
return (FSE_CTable*)ZSTD_malloc(size);
|
311
315
|
}
|
312
316
|
|
313
|
-
void FSE_freeCTable (FSE_CTable* ct) {
|
317
|
+
void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
|
314
318
|
|
315
319
|
/* provides the minimum logSize to safely represent a distribution */
|
316
320
|
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
|
@@ -341,11 +345,10 @@ unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
|
|
341
345
|
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
|
342
346
|
}
|
343
347
|
|
344
|
-
|
345
348
|
/* Secondary normalization method.
|
346
349
|
To be used when primary method fails. */
|
347
350
|
|
348
|
-
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
|
351
|
+
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
|
349
352
|
{
|
350
353
|
short const NOT_YET_ASSIGNED = -2;
|
351
354
|
U32 s;
|
@@ -362,7 +365,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
|
362
365
|
continue;
|
363
366
|
}
|
364
367
|
if (count[s] <= lowThreshold) {
|
365
|
-
norm[s] =
|
368
|
+
norm[s] = lowProbCount;
|
366
369
|
distributed++;
|
367
370
|
total -= count[s];
|
368
371
|
continue;
|
@@ -414,7 +417,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
|
414
417
|
|
415
418
|
{ U64 const vStepLog = 62 - tableLog;
|
416
419
|
U64 const mid = (1ULL << (vStepLog-1)) - 1;
|
417
|
-
U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid)
|
420
|
+
U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
|
418
421
|
U64 tmpTotal = mid;
|
419
422
|
for (s=0; s<=maxSymbolValue; s++) {
|
420
423
|
if (norm[s]==NOT_YET_ASSIGNED) {
|
@@ -431,10 +434,9 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
|
431
434
|
return 0;
|
432
435
|
}
|
433
436
|
|
434
|
-
|
435
437
|
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
436
438
|
const unsigned* count, size_t total,
|
437
|
-
unsigned maxSymbolValue)
|
439
|
+
unsigned maxSymbolValue, unsigned useLowProbCount)
|
438
440
|
{
|
439
441
|
/* Sanity checks */
|
440
442
|
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
@@ -443,8 +445,9 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
|
443
445
|
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
|
444
446
|
|
445
447
|
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
|
448
|
+
short const lowProbCount = useLowProbCount ? -1 : 1;
|
446
449
|
U64 const scale = 62 - tableLog;
|
447
|
-
U64 const step = ((U64)1<<62)
|
450
|
+
U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
|
448
451
|
U64 const vStep = 1ULL<<(scale-20);
|
449
452
|
int stillToDistribute = 1<<tableLog;
|
450
453
|
unsigned s;
|
@@ -456,7 +459,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
|
456
459
|
if (count[s] == total) return 0; /* rle special case */
|
457
460
|
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
|
458
461
|
if (count[s] <= lowThreshold) {
|
459
|
-
normalizedCounter[s] =
|
462
|
+
normalizedCounter[s] = lowProbCount;
|
460
463
|
stillToDistribute--;
|
461
464
|
} else {
|
462
465
|
short proba = (short)((count[s]*step) >> scale);
|
@@ -470,7 +473,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
|
470
473
|
} }
|
471
474
|
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
|
472
475
|
/* corner case, need another normalization method */
|
473
|
-
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
|
476
|
+
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
|
474
477
|
if (FSE_isError(errorCode)) return errorCode;
|
475
478
|
}
|
476
479
|
else normalizedCounter[largest] += (short)stillToDistribute;
|
@@ -625,6 +628,7 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
|
|
625
628
|
|
626
629
|
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
|
627
630
|
|
631
|
+
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
|
628
632
|
/* FSE_compress_wksp() :
|
629
633
|
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
|
630
634
|
* `wkspSize` size must be `(1<<tableLog)`.
|
@@ -643,7 +647,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
|
|
643
647
|
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
|
644
648
|
|
645
649
|
/* init conditions */
|
646
|
-
if (wkspSize <
|
650
|
+
if (wkspSize < FSE_COMPRESS_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
|
647
651
|
if (srcSize <= 1) return 0; /* Not compressible */
|
648
652
|
if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
649
653
|
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
|
@@ -656,7 +660,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
|
|
656
660
|
}
|
657
661
|
|
658
662
|
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
|
659
|
-
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
|
663
|
+
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) );
|
660
664
|
|
661
665
|
/* Write table description header */
|
662
666
|
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
|
@@ -678,13 +682,16 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
|
|
678
682
|
|
679
683
|
typedef struct {
|
680
684
|
FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
|
681
|
-
|
685
|
+
union {
|
686
|
+
U32 hist_wksp[HIST_WKSP_SIZE_U32];
|
687
|
+
BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
|
688
|
+
} workspace;
|
682
689
|
} fseWkspMax_t;
|
683
690
|
|
684
691
|
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
|
685
692
|
{
|
686
693
|
fseWkspMax_t scratchBuffer;
|
687
|
-
DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >=
|
694
|
+
DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
|
688
695
|
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
689
696
|
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
|
690
697
|
}
|
@@ -693,6 +700,6 @@ size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcS
|
|
693
700
|
{
|
694
701
|
return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
|
695
702
|
}
|
696
|
-
|
703
|
+
#endif
|
697
704
|
|
698
705
|
#endif /* FSE_COMMONDEFS_ONLY */
|