zstd-ruby 1.3.7.0 → 1.3.8.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +1 -1
- data/ext/zstdruby/libzstd/BUCK +15 -2
- data/ext/zstdruby/libzstd/Makefile +37 -2
- data/ext/zstdruby/libzstd/README.md +67 -41
- data/ext/zstdruby/libzstd/common/bitstream.h +2 -2
- data/ext/zstdruby/libzstd/common/compiler.h +19 -12
- data/ext/zstdruby/libzstd/common/cpu.h +1 -1
- data/ext/zstdruby/libzstd/common/debug.h +22 -11
- data/ext/zstdruby/libzstd/common/error_private.c +6 -0
- data/ext/zstdruby/libzstd/common/fse.h +2 -2
- data/ext/zstdruby/libzstd/common/huf.h +25 -1
- data/ext/zstdruby/libzstd/common/pool.c +1 -1
- data/ext/zstdruby/libzstd/common/zstd_common.c +3 -1
- data/ext/zstdruby/libzstd/common/zstd_errors.h +1 -0
- data/ext/zstdruby/libzstd/common/zstd_internal.h +11 -2
- data/ext/zstdruby/libzstd/compress/fse_compress.c +3 -3
- data/ext/zstdruby/libzstd/compress/hist.c +19 -11
- data/ext/zstdruby/libzstd/compress/hist.h +11 -8
- data/ext/zstdruby/libzstd/compress/huf_compress.c +33 -31
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +621 -371
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +90 -28
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +4 -4
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +15 -15
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +25 -18
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +18 -67
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +2 -6
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +133 -48
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +8 -0
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +229 -73
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +18 -10
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +178 -42
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +240 -0
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +44 -0
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +244 -1680
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1307 -0
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +59 -0
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +168 -0
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +13 -11
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +15 -15
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +28 -28
- data/ext/zstdruby/libzstd/dll/libzstd.def +0 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -10
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +15 -15
- data/ext/zstdruby/libzstd/zstd.h +1208 -968
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +7 -2
@@ -28,6 +28,16 @@
|
|
28
28
|
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
|
29
29
|
|
30
30
|
|
31
|
+
/* === Constants === */
|
32
|
+
#ifndef ZSTDMT_NBWORKERS_MAX
|
33
|
+
# define ZSTDMT_NBWORKERS_MAX 200
|
34
|
+
#endif
|
35
|
+
#ifndef ZSTDMT_JOBSIZE_MIN
|
36
|
+
# define ZSTDMT_JOBSIZE_MIN (1 MB)
|
37
|
+
#endif
|
38
|
+
#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB))
|
39
|
+
|
40
|
+
|
31
41
|
/* === Memory management === */
|
32
42
|
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
|
33
43
|
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
|
@@ -52,6 +62,7 @@ ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
|
52
62
|
ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
|
53
63
|
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
|
54
64
|
|
65
|
+
ZSTDLIB_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
|
55
66
|
ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
56
67
|
|
57
68
|
ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
|
@@ -60,16 +71,12 @@ ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);
|
|
60
71
|
|
61
72
|
/* === Advanced functions and parameters === */
|
62
73
|
|
63
|
-
#ifndef ZSTDMT_JOBSIZE_MIN
|
64
|
-
# define ZSTDMT_JOBSIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */
|
65
|
-
#endif
|
66
|
-
|
67
74
|
ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
68
75
|
void* dst, size_t dstCapacity,
|
69
76
|
const void* src, size_t srcSize,
|
70
77
|
const ZSTD_CDict* cdict,
|
71
78
|
ZSTD_parameters params,
|
72
|
-
|
79
|
+
int overlapLog);
|
73
80
|
|
74
81
|
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
75
82
|
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
|
@@ -84,8 +91,9 @@ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
|
84
91
|
/* ZSTDMT_parameter :
|
85
92
|
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
|
86
93
|
typedef enum {
|
87
|
-
ZSTDMT_p_jobSize,
|
88
|
-
|
94
|
+
ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
|
95
|
+
ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
|
96
|
+
ZSTDMT_p_rsyncable /* Enables rsyncable mode. */
|
89
97
|
} ZSTDMT_parameter;
|
90
98
|
|
91
99
|
/* ZSTDMT_setMTCtxParameter() :
|
@@ -93,12 +101,12 @@ typedef enum {
|
|
93
101
|
* The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
|
94
102
|
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
|
95
103
|
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
|
96
|
-
ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
|
104
|
+
ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
|
97
105
|
|
98
106
|
/* ZSTDMT_getMTCtxParameter() :
|
99
107
|
* Query the ZSTDMT_CCtx for a parameter value.
|
100
108
|
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
|
101
|
-
ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
|
109
|
+
ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
|
102
110
|
|
103
111
|
|
104
112
|
/*! ZSTDMT_compressStream_generic() :
|
@@ -129,7 +137,7 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
|
|
129
137
|
|
130
138
|
/*! ZSTDMT_CCtxParam_setMTCtxParameter()
|
131
139
|
* like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */
|
132
|
-
size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter,
|
140
|
+
size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value);
|
133
141
|
|
134
142
|
/*! ZSTDMT_CCtxParam_setNbWorkers()
|
135
143
|
* Set nbWorkers, and clamp it.
|
@@ -43,6 +43,19 @@
|
|
43
43
|
#include "huf.h"
|
44
44
|
#include "error_private.h"
|
45
45
|
|
46
|
+
/* **************************************************************
|
47
|
+
* Macros
|
48
|
+
****************************************************************/
|
49
|
+
|
50
|
+
/* These two optional macros force the use one way or another of the two
|
51
|
+
* Huffman decompression implementations. You can't force in both directions
|
52
|
+
* at the same time.
|
53
|
+
*/
|
54
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1) && \
|
55
|
+
defined(HUF_FORCE_DECOMPRESS_X2)
|
56
|
+
#error "Cannot force the use of the X1 and X2 decoders at the same time!"
|
57
|
+
#endif
|
58
|
+
|
46
59
|
|
47
60
|
/* **************************************************************
|
48
61
|
* Error Management
|
@@ -58,6 +71,51 @@
|
|
58
71
|
#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
59
72
|
|
60
73
|
|
74
|
+
/* **************************************************************
|
75
|
+
* BMI2 Variant Wrappers
|
76
|
+
****************************************************************/
|
77
|
+
#if DYNAMIC_BMI2
|
78
|
+
|
79
|
+
#define HUF_DGEN(fn) \
|
80
|
+
\
|
81
|
+
static size_t fn##_default( \
|
82
|
+
void* dst, size_t dstSize, \
|
83
|
+
const void* cSrc, size_t cSrcSize, \
|
84
|
+
const HUF_DTable* DTable) \
|
85
|
+
{ \
|
86
|
+
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
87
|
+
} \
|
88
|
+
\
|
89
|
+
static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \
|
90
|
+
void* dst, size_t dstSize, \
|
91
|
+
const void* cSrc, size_t cSrcSize, \
|
92
|
+
const HUF_DTable* DTable) \
|
93
|
+
{ \
|
94
|
+
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
95
|
+
} \
|
96
|
+
\
|
97
|
+
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
|
98
|
+
size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
|
99
|
+
{ \
|
100
|
+
if (bmi2) { \
|
101
|
+
return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
|
102
|
+
} \
|
103
|
+
return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
|
104
|
+
}
|
105
|
+
|
106
|
+
#else
|
107
|
+
|
108
|
+
#define HUF_DGEN(fn) \
|
109
|
+
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
|
110
|
+
size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
|
111
|
+
{ \
|
112
|
+
(void)bmi2; \
|
113
|
+
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
114
|
+
}
|
115
|
+
|
116
|
+
#endif
|
117
|
+
|
118
|
+
|
61
119
|
/*-***************************/
|
62
120
|
/* generic DTableDesc */
|
63
121
|
/*-***************************/
|
@@ -71,6 +129,8 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
|
|
71
129
|
}
|
72
130
|
|
73
131
|
|
132
|
+
#ifndef HUF_FORCE_DECOMPRESS_X2
|
133
|
+
|
74
134
|
/*-***************************/
|
75
135
|
/* single-symbol decoding */
|
76
136
|
/*-***************************/
|
@@ -307,46 +367,6 @@ typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
|
|
307
367
|
const void *cSrc,
|
308
368
|
size_t cSrcSize,
|
309
369
|
const HUF_DTable *DTable);
|
310
|
-
#if DYNAMIC_BMI2
|
311
|
-
|
312
|
-
#define HUF_DGEN(fn) \
|
313
|
-
\
|
314
|
-
static size_t fn##_default( \
|
315
|
-
void* dst, size_t dstSize, \
|
316
|
-
const void* cSrc, size_t cSrcSize, \
|
317
|
-
const HUF_DTable* DTable) \
|
318
|
-
{ \
|
319
|
-
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
320
|
-
} \
|
321
|
-
\
|
322
|
-
static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \
|
323
|
-
void* dst, size_t dstSize, \
|
324
|
-
const void* cSrc, size_t cSrcSize, \
|
325
|
-
const HUF_DTable* DTable) \
|
326
|
-
{ \
|
327
|
-
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
328
|
-
} \
|
329
|
-
\
|
330
|
-
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
|
331
|
-
size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
|
332
|
-
{ \
|
333
|
-
if (bmi2) { \
|
334
|
-
return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
|
335
|
-
} \
|
336
|
-
return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
|
337
|
-
}
|
338
|
-
|
339
|
-
#else
|
340
|
-
|
341
|
-
#define HUF_DGEN(fn) \
|
342
|
-
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
|
343
|
-
size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
|
344
|
-
{ \
|
345
|
-
(void)bmi2; \
|
346
|
-
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
347
|
-
}
|
348
|
-
|
349
|
-
#endif
|
350
370
|
|
351
371
|
HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
|
352
372
|
HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
|
@@ -437,6 +457,10 @@ size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cS
|
|
437
457
|
return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
|
438
458
|
}
|
439
459
|
|
460
|
+
#endif /* HUF_FORCE_DECOMPRESS_X2 */
|
461
|
+
|
462
|
+
|
463
|
+
#ifndef HUF_FORCE_DECOMPRESS_X1
|
440
464
|
|
441
465
|
/* *************************/
|
442
466
|
/* double-symbols decoding */
|
@@ -911,6 +935,8 @@ size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
|
|
911
935
|
return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
|
912
936
|
}
|
913
937
|
|
938
|
+
#endif /* HUF_FORCE_DECOMPRESS_X1 */
|
939
|
+
|
914
940
|
|
915
941
|
/* ***********************************/
|
916
942
|
/* Universal decompression selectors */
|
@@ -921,8 +947,18 @@ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
|
|
921
947
|
const HUF_DTable* DTable)
|
922
948
|
{
|
923
949
|
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
950
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
951
|
+
(void)dtd;
|
952
|
+
assert(dtd.tableType == 0);
|
953
|
+
return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
954
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
955
|
+
(void)dtd;
|
956
|
+
assert(dtd.tableType == 1);
|
957
|
+
return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
958
|
+
#else
|
924
959
|
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
|
925
960
|
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
961
|
+
#endif
|
926
962
|
}
|
927
963
|
|
928
964
|
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
|
@@ -930,11 +966,22 @@ size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
|
|
930
966
|
const HUF_DTable* DTable)
|
931
967
|
{
|
932
968
|
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
969
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
970
|
+
(void)dtd;
|
971
|
+
assert(dtd.tableType == 0);
|
972
|
+
return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
973
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
974
|
+
(void)dtd;
|
975
|
+
assert(dtd.tableType == 1);
|
976
|
+
return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
977
|
+
#else
|
933
978
|
return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
|
934
979
|
HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
980
|
+
#endif
|
935
981
|
}
|
936
982
|
|
937
983
|
|
984
|
+
#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
|
938
985
|
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
|
939
986
|
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
|
940
987
|
{
|
@@ -956,6 +1003,7 @@ static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, qu
|
|
956
1003
|
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
|
957
1004
|
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
|
958
1005
|
};
|
1006
|
+
#endif
|
959
1007
|
|
960
1008
|
/** HUF_selectDecoder() :
|
961
1009
|
* Tells which decoder is likely to decode faster,
|
@@ -966,6 +1014,15 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
|
|
966
1014
|
{
|
967
1015
|
assert(dstSize > 0);
|
968
1016
|
assert(dstSize <= 128*1024);
|
1017
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
1018
|
+
(void)dstSize;
|
1019
|
+
(void)cSrcSize;
|
1020
|
+
return 0;
|
1021
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
1022
|
+
(void)dstSize;
|
1023
|
+
(void)cSrcSize;
|
1024
|
+
return 1;
|
1025
|
+
#else
|
969
1026
|
/* decoder timing evaluation */
|
970
1027
|
{ U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
|
971
1028
|
U32 const D256 = (U32)(dstSize >> 8);
|
@@ -973,14 +1030,18 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
|
|
973
1030
|
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
|
974
1031
|
DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */
|
975
1032
|
return DTime1 < DTime0;
|
976
|
-
}
|
1033
|
+
}
|
1034
|
+
#endif
|
1035
|
+
}
|
977
1036
|
|
978
1037
|
|
979
1038
|
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
980
1039
|
|
981
1040
|
size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
982
1041
|
{
|
1042
|
+
#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
|
983
1043
|
static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
|
1044
|
+
#endif
|
984
1045
|
|
985
1046
|
/* validation checks */
|
986
1047
|
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
@@ -989,7 +1050,17 @@ size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcS
|
|
989
1050
|
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
|
990
1051
|
|
991
1052
|
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
1053
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
1054
|
+
(void)algoNb;
|
1055
|
+
assert(algoNb == 0);
|
1056
|
+
return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);
|
1057
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
1058
|
+
(void)algoNb;
|
1059
|
+
assert(algoNb == 1);
|
1060
|
+
return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);
|
1061
|
+
#else
|
992
1062
|
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
|
1063
|
+
#endif
|
993
1064
|
}
|
994
1065
|
}
|
995
1066
|
|
@@ -1002,8 +1073,18 @@ size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const
|
|
1002
1073
|
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
|
1003
1074
|
|
1004
1075
|
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
1076
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
1077
|
+
(void)algoNb;
|
1078
|
+
assert(algoNb == 0);
|
1079
|
+
return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
|
1080
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
1081
|
+
(void)algoNb;
|
1082
|
+
assert(algoNb == 1);
|
1083
|
+
return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
|
1084
|
+
#else
|
1005
1085
|
return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
|
1006
1086
|
HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
|
1087
|
+
#endif
|
1007
1088
|
}
|
1008
1089
|
}
|
1009
1090
|
|
@@ -1025,8 +1106,19 @@ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
|
|
1025
1106
|
if (cSrcSize == 0) return ERROR(corruption_detected);
|
1026
1107
|
|
1027
1108
|
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
1028
|
-
|
1109
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
1110
|
+
(void)algoNb;
|
1111
|
+
assert(algoNb == 0);
|
1112
|
+
return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
|
1113
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
1114
|
+
(void)algoNb;
|
1115
|
+
assert(algoNb == 1);
|
1116
|
+
return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
|
1117
|
+
#else
|
1118
|
+
return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
1119
|
+
cSrcSize, workSpace, wkspSize):
|
1029
1120
|
HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
|
1121
|
+
#endif
|
1030
1122
|
}
|
1031
1123
|
}
|
1032
1124
|
|
@@ -1041,10 +1133,22 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
|
1041
1133
|
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
|
1042
1134
|
|
1043
1135
|
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
1136
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
1137
|
+
(void)algoNb;
|
1138
|
+
assert(algoNb == 0);
|
1139
|
+
return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
1140
|
+
cSrcSize, workSpace, wkspSize);
|
1141
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
1142
|
+
(void)algoNb;
|
1143
|
+
assert(algoNb == 1);
|
1144
|
+
return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
1145
|
+
cSrcSize, workSpace, wkspSize);
|
1146
|
+
#else
|
1044
1147
|
return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
1045
1148
|
cSrcSize, workSpace, wkspSize):
|
1046
1149
|
HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
|
1047
1150
|
cSrcSize, workSpace, wkspSize);
|
1151
|
+
#endif
|
1048
1152
|
}
|
1049
1153
|
}
|
1050
1154
|
|
@@ -1060,10 +1164,21 @@ size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
|
|
1060
1164
|
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
|
1061
1165
|
{
|
1062
1166
|
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
1167
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
1168
|
+
(void)dtd;
|
1169
|
+
assert(dtd.tableType == 0);
|
1170
|
+
return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
|
1171
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
1172
|
+
(void)dtd;
|
1173
|
+
assert(dtd.tableType == 1);
|
1174
|
+
return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
|
1175
|
+
#else
|
1063
1176
|
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
|
1064
1177
|
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
|
1178
|
+
#endif
|
1065
1179
|
}
|
1066
1180
|
|
1181
|
+
#ifndef HUF_FORCE_DECOMPRESS_X2
|
1067
1182
|
size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
|
1068
1183
|
{
|
1069
1184
|
const BYTE* ip = (const BYTE*) cSrc;
|
@@ -1075,12 +1190,23 @@ size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstS
|
|
1075
1190
|
|
1076
1191
|
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
|
1077
1192
|
}
|
1193
|
+
#endif
|
1078
1194
|
|
1079
1195
|
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
|
1080
1196
|
{
|
1081
1197
|
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
1198
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
1199
|
+
(void)dtd;
|
1200
|
+
assert(dtd.tableType == 0);
|
1201
|
+
return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
|
1202
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
1203
|
+
(void)dtd;
|
1204
|
+
assert(dtd.tableType == 1);
|
1205
|
+
return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
|
1206
|
+
#else
|
1082
1207
|
return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
|
1083
1208
|
HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
|
1209
|
+
#endif
|
1084
1210
|
}
|
1085
1211
|
|
1086
1212
|
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
|
@@ -1090,7 +1216,17 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds
|
|
1090
1216
|
if (cSrcSize == 0) return ERROR(corruption_detected);
|
1091
1217
|
|
1092
1218
|
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
1219
|
+
#if defined(HUF_FORCE_DECOMPRESS_X1)
|
1220
|
+
(void)algoNb;
|
1221
|
+
assert(algoNb == 0);
|
1222
|
+
return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
|
1223
|
+
#elif defined(HUF_FORCE_DECOMPRESS_X2)
|
1224
|
+
(void)algoNb;
|
1225
|
+
assert(algoNb == 1);
|
1226
|
+
return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
|
1227
|
+
#else
|
1093
1228
|
return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
|
1094
1229
|
HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
|
1230
|
+
#endif
|
1095
1231
|
}
|
1096
1232
|
}
|
@@ -0,0 +1,240 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* This source code is licensed under both the BSD-style license (found in the
|
6
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
7
|
+
* in the COPYING file in the root directory of this source tree).
|
8
|
+
* You may select, at your option, one of the above-listed licenses.
|
9
|
+
*/
|
10
|
+
|
11
|
+
/* zstd_ddict.c :
|
12
|
+
* concentrates all logic that needs to know the internals of ZSTD_DDict object */
|
13
|
+
|
14
|
+
/*-*******************************************************
|
15
|
+
* Dependencies
|
16
|
+
*********************************************************/
|
17
|
+
#include <string.h> /* memcpy, memmove, memset */
|
18
|
+
#include "cpu.h" /* bmi2 */
|
19
|
+
#include "mem.h" /* low level memory routines */
|
20
|
+
#define FSE_STATIC_LINKING_ONLY
|
21
|
+
#include "fse.h"
|
22
|
+
#define HUF_STATIC_LINKING_ONLY
|
23
|
+
#include "huf.h"
|
24
|
+
#include "zstd_decompress_internal.h"
|
25
|
+
#include "zstd_ddict.h"
|
26
|
+
|
27
|
+
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
|
28
|
+
# include "zstd_legacy.h"
|
29
|
+
#endif
|
30
|
+
|
31
|
+
|
32
|
+
|
33
|
+
/*-*******************************************************
|
34
|
+
* Types
|
35
|
+
*********************************************************/
|
36
|
+
struct ZSTD_DDict_s {
|
37
|
+
void* dictBuffer;
|
38
|
+
const void* dictContent;
|
39
|
+
size_t dictSize;
|
40
|
+
ZSTD_entropyDTables_t entropy;
|
41
|
+
U32 dictID;
|
42
|
+
U32 entropyPresent;
|
43
|
+
ZSTD_customMem cMem;
|
44
|
+
}; /* typedef'd to ZSTD_DDict within "zstd.h" */
|
45
|
+
|
46
|
+
const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
|
47
|
+
{
|
48
|
+
assert(ddict != NULL);
|
49
|
+
return ddict->dictContent;
|
50
|
+
}
|
51
|
+
|
52
|
+
size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
|
53
|
+
{
|
54
|
+
assert(ddict != NULL);
|
55
|
+
return ddict->dictSize;
|
56
|
+
}
|
57
|
+
|
58
|
+
void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
|
59
|
+
{
|
60
|
+
DEBUGLOG(4, "ZSTD_copyDDictParameters");
|
61
|
+
assert(dctx != NULL);
|
62
|
+
assert(ddict != NULL);
|
63
|
+
dctx->dictID = ddict->dictID;
|
64
|
+
dctx->prefixStart = ddict->dictContent;
|
65
|
+
dctx->virtualStart = ddict->dictContent;
|
66
|
+
dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
|
67
|
+
dctx->previousDstEnd = dctx->dictEnd;
|
68
|
+
if (ddict->entropyPresent) {
|
69
|
+
dctx->litEntropy = 1;
|
70
|
+
dctx->fseEntropy = 1;
|
71
|
+
dctx->LLTptr = ddict->entropy.LLTable;
|
72
|
+
dctx->MLTptr = ddict->entropy.MLTable;
|
73
|
+
dctx->OFTptr = ddict->entropy.OFTable;
|
74
|
+
dctx->HUFptr = ddict->entropy.hufTable;
|
75
|
+
dctx->entropy.rep[0] = ddict->entropy.rep[0];
|
76
|
+
dctx->entropy.rep[1] = ddict->entropy.rep[1];
|
77
|
+
dctx->entropy.rep[2] = ddict->entropy.rep[2];
|
78
|
+
} else {
|
79
|
+
dctx->litEntropy = 0;
|
80
|
+
dctx->fseEntropy = 0;
|
81
|
+
}
|
82
|
+
}
|
83
|
+
|
84
|
+
|
85
|
+
static size_t
|
86
|
+
ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
|
87
|
+
ZSTD_dictContentType_e dictContentType)
|
88
|
+
{
|
89
|
+
ddict->dictID = 0;
|
90
|
+
ddict->entropyPresent = 0;
|
91
|
+
if (dictContentType == ZSTD_dct_rawContent) return 0;
|
92
|
+
|
93
|
+
if (ddict->dictSize < 8) {
|
94
|
+
if (dictContentType == ZSTD_dct_fullDict)
|
95
|
+
return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
|
96
|
+
return 0; /* pure content mode */
|
97
|
+
}
|
98
|
+
{ U32 const magic = MEM_readLE32(ddict->dictContent);
|
99
|
+
if (magic != ZSTD_MAGIC_DICTIONARY) {
|
100
|
+
if (dictContentType == ZSTD_dct_fullDict)
|
101
|
+
return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
|
102
|
+
return 0; /* pure content mode */
|
103
|
+
}
|
104
|
+
}
|
105
|
+
ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
|
106
|
+
|
107
|
+
/* load entropy tables */
|
108
|
+
CHECK_E( ZSTD_loadDEntropy(&ddict->entropy,
|
109
|
+
ddict->dictContent, ddict->dictSize),
|
110
|
+
dictionary_corrupted );
|
111
|
+
ddict->entropyPresent = 1;
|
112
|
+
return 0;
|
113
|
+
}
|
114
|
+
|
115
|
+
|
116
|
+
static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
|
117
|
+
const void* dict, size_t dictSize,
|
118
|
+
ZSTD_dictLoadMethod_e dictLoadMethod,
|
119
|
+
ZSTD_dictContentType_e dictContentType)
|
120
|
+
{
|
121
|
+
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
|
122
|
+
ddict->dictBuffer = NULL;
|
123
|
+
ddict->dictContent = dict;
|
124
|
+
if (!dict) dictSize = 0;
|
125
|
+
} else {
|
126
|
+
void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
|
127
|
+
ddict->dictBuffer = internalBuffer;
|
128
|
+
ddict->dictContent = internalBuffer;
|
129
|
+
if (!internalBuffer) return ERROR(memory_allocation);
|
130
|
+
memcpy(internalBuffer, dict, dictSize);
|
131
|
+
}
|
132
|
+
ddict->dictSize = dictSize;
|
133
|
+
ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
|
134
|
+
|
135
|
+
/* parse dictionary content */
|
136
|
+
CHECK_F( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
|
137
|
+
|
138
|
+
return 0;
|
139
|
+
}
|
140
|
+
|
141
|
+
ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
|
142
|
+
ZSTD_dictLoadMethod_e dictLoadMethod,
|
143
|
+
ZSTD_dictContentType_e dictContentType,
|
144
|
+
ZSTD_customMem customMem)
|
145
|
+
{
|
146
|
+
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
|
147
|
+
|
148
|
+
{ ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
|
149
|
+
if (ddict == NULL) return NULL;
|
150
|
+
ddict->cMem = customMem;
|
151
|
+
{ size_t const initResult = ZSTD_initDDict_internal(ddict,
|
152
|
+
dict, dictSize,
|
153
|
+
dictLoadMethod, dictContentType);
|
154
|
+
if (ZSTD_isError(initResult)) {
|
155
|
+
ZSTD_freeDDict(ddict);
|
156
|
+
return NULL;
|
157
|
+
} }
|
158
|
+
return ddict;
|
159
|
+
}
|
160
|
+
}
|
161
|
+
|
162
|
+
/*! ZSTD_createDDict() :
|
163
|
+
* Create a digested dictionary, to start decompression without startup delay.
|
164
|
+
* `dict` content is copied inside DDict.
|
165
|
+
* Consequently, `dict` can be released after `ZSTD_DDict` creation */
|
166
|
+
ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
|
167
|
+
{
|
168
|
+
ZSTD_customMem const allocator = { NULL, NULL, NULL };
|
169
|
+
return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
|
170
|
+
}
|
171
|
+
|
172
|
+
/*! ZSTD_createDDict_byReference() :
|
173
|
+
* Create a digested dictionary, to start decompression without startup delay.
|
174
|
+
* Dictionary content is simply referenced, it will be accessed during decompression.
|
175
|
+
* Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
|
176
|
+
ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
|
177
|
+
{
|
178
|
+
ZSTD_customMem const allocator = { NULL, NULL, NULL };
|
179
|
+
return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
|
180
|
+
}
|
181
|
+
|
182
|
+
|
183
|
+
const ZSTD_DDict* ZSTD_initStaticDDict(
|
184
|
+
void* sBuffer, size_t sBufferSize,
|
185
|
+
const void* dict, size_t dictSize,
|
186
|
+
ZSTD_dictLoadMethod_e dictLoadMethod,
|
187
|
+
ZSTD_dictContentType_e dictContentType)
|
188
|
+
{
|
189
|
+
size_t const neededSpace = sizeof(ZSTD_DDict)
|
190
|
+
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
|
191
|
+
ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
|
192
|
+
assert(sBuffer != NULL);
|
193
|
+
assert(dict != NULL);
|
194
|
+
if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
|
195
|
+
if (sBufferSize < neededSpace) return NULL;
|
196
|
+
if (dictLoadMethod == ZSTD_dlm_byCopy) {
|
197
|
+
memcpy(ddict+1, dict, dictSize); /* local copy */
|
198
|
+
dict = ddict+1;
|
199
|
+
}
|
200
|
+
if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
|
201
|
+
dict, dictSize,
|
202
|
+
ZSTD_dlm_byRef, dictContentType) ))
|
203
|
+
return NULL;
|
204
|
+
return ddict;
|
205
|
+
}
|
206
|
+
|
207
|
+
|
208
|
+
size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
|
209
|
+
{
|
210
|
+
if (ddict==NULL) return 0; /* support free on NULL */
|
211
|
+
{ ZSTD_customMem const cMem = ddict->cMem;
|
212
|
+
ZSTD_free(ddict->dictBuffer, cMem);
|
213
|
+
ZSTD_free(ddict, cMem);
|
214
|
+
return 0;
|
215
|
+
}
|
216
|
+
}
|
217
|
+
|
218
|
+
/*! ZSTD_estimateDDictSize() :
|
219
|
+
* Estimate amount of memory that will be needed to create a dictionary for decompression.
|
220
|
+
* Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
|
221
|
+
size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
|
222
|
+
{
|
223
|
+
return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
|
224
|
+
}
|
225
|
+
|
226
|
+
size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
|
227
|
+
{
|
228
|
+
if (ddict==NULL) return 0; /* support sizeof on NULL */
|
229
|
+
return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
|
230
|
+
}
|
231
|
+
|
232
|
+
/*! ZSTD_getDictID_fromDDict() :
|
233
|
+
* Provides the dictID of the dictionary loaded into `ddict`.
|
234
|
+
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
|
235
|
+
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
|
236
|
+
unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
|
237
|
+
{
|
238
|
+
if (ddict==NULL) return 0;
|
239
|
+
return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
|
240
|
+
}
|