zstd-ruby 1.4.0.0 → 1.4.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +1 -1
- data/ext/zstdruby/libzstd/Makefile +5 -0
- data/ext/zstdruby/libzstd/common/compiler.h +7 -0
- data/ext/zstdruby/libzstd/common/zstd_internal.h +58 -6
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +175 -117
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +74 -30
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +56 -36
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +35 -14
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +10 -5
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +45 -32
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +18 -7
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +1 -0
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +12 -9
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +20 -9
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +154 -43
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +38 -3
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +46 -39
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +9 -9
- data/ext/zstdruby/libzstd/dictBuilder/zdict.h +5 -0
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +4 -0
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +95 -101
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +11 -6
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +11 -6
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +11 -8
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +88 -84
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +2 -4
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +2 -4
- data/ext/zstdruby/libzstd/zstd.h +53 -21
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +3 -4
@@ -218,6 +218,11 @@ MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
|
|
218
218
|
}
|
219
219
|
}
|
220
220
|
|
221
|
+
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
|
222
|
+
{
|
223
|
+
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
|
224
|
+
}
|
225
|
+
|
221
226
|
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
|
222
227
|
{
|
223
228
|
if (MEM_isLittleEndian())
|
@@ -1998,91 +2003,92 @@ size_t HUFv05_decompress4X2_usingDTable(
|
|
1998
2003
|
const void* cSrc, size_t cSrcSize,
|
1999
2004
|
const U16* DTable)
|
2000
2005
|
{
|
2001
|
-
const BYTE* const istart = (const BYTE*) cSrc;
|
2002
|
-
BYTE* const ostart = (BYTE*) dst;
|
2003
|
-
BYTE* const oend = ostart + dstSize;
|
2004
|
-
const void* const dtPtr = DTable;
|
2005
|
-
const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;
|
2006
|
-
const U32 dtLog = DTable[0];
|
2007
|
-
size_t errorCode;
|
2008
|
-
|
2009
|
-
/* Init */
|
2010
|
-
BITv05_DStream_t bitD1;
|
2011
|
-
BITv05_DStream_t bitD2;
|
2012
|
-
BITv05_DStream_t bitD3;
|
2013
|
-
BITv05_DStream_t bitD4;
|
2014
|
-
const size_t length1 = MEM_readLE16(istart);
|
2015
|
-
const size_t length2 = MEM_readLE16(istart+2);
|
2016
|
-
const size_t length3 = MEM_readLE16(istart+4);
|
2017
|
-
size_t length4;
|
2018
|
-
const BYTE* const istart1 = istart + 6; /* jumpTable */
|
2019
|
-
const BYTE* const istart2 = istart1 + length1;
|
2020
|
-
const BYTE* const istart3 = istart2 + length2;
|
2021
|
-
const BYTE* const istart4 = istart3 + length3;
|
2022
|
-
const size_t segmentSize = (dstSize+3) / 4;
|
2023
|
-
BYTE* const opStart2 = ostart + segmentSize;
|
2024
|
-
BYTE* const opStart3 = opStart2 + segmentSize;
|
2025
|
-
BYTE* const opStart4 = opStart3 + segmentSize;
|
2026
|
-
BYTE* op1 = ostart;
|
2027
|
-
BYTE* op2 = opStart2;
|
2028
|
-
BYTE* op3 = opStart3;
|
2029
|
-
BYTE* op4 = opStart4;
|
2030
|
-
U32 endSignal;
|
2031
|
-
|
2032
2006
|
/* Check */
|
2033
2007
|
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
2008
|
+
{
|
2009
|
+
const BYTE* const istart = (const BYTE*) cSrc;
|
2010
|
+
BYTE* const ostart = (BYTE*) dst;
|
2011
|
+
BYTE* const oend = ostart + dstSize;
|
2012
|
+
const void* const dtPtr = DTable;
|
2013
|
+
const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;
|
2014
|
+
const U32 dtLog = DTable[0];
|
2015
|
+
size_t errorCode;
|
2034
2016
|
|
2035
|
-
|
2036
|
-
|
2037
|
-
|
2038
|
-
|
2039
|
-
|
2040
|
-
|
2041
|
-
|
2042
|
-
|
2043
|
-
|
2044
|
-
|
2017
|
+
/* Init */
|
2018
|
+
BITv05_DStream_t bitD1;
|
2019
|
+
BITv05_DStream_t bitD2;
|
2020
|
+
BITv05_DStream_t bitD3;
|
2021
|
+
BITv05_DStream_t bitD4;
|
2022
|
+
const size_t length1 = MEM_readLE16(istart);
|
2023
|
+
const size_t length2 = MEM_readLE16(istart+2);
|
2024
|
+
const size_t length3 = MEM_readLE16(istart+4);
|
2025
|
+
size_t length4;
|
2026
|
+
const BYTE* const istart1 = istart + 6; /* jumpTable */
|
2027
|
+
const BYTE* const istart2 = istart1 + length1;
|
2028
|
+
const BYTE* const istart3 = istart2 + length2;
|
2029
|
+
const BYTE* const istart4 = istart3 + length3;
|
2030
|
+
const size_t segmentSize = (dstSize+3) / 4;
|
2031
|
+
BYTE* const opStart2 = ostart + segmentSize;
|
2032
|
+
BYTE* const opStart3 = opStart2 + segmentSize;
|
2033
|
+
BYTE* const opStart4 = opStart3 + segmentSize;
|
2034
|
+
BYTE* op1 = ostart;
|
2035
|
+
BYTE* op2 = opStart2;
|
2036
|
+
BYTE* op3 = opStart3;
|
2037
|
+
BYTE* op4 = opStart4;
|
2038
|
+
U32 endSignal;
|
2045
2039
|
|
2046
|
-
|
2047
|
-
|
2048
|
-
|
2049
|
-
|
2050
|
-
|
2051
|
-
|
2052
|
-
|
2053
|
-
|
2054
|
-
|
2055
|
-
|
2056
|
-
|
2057
|
-
|
2058
|
-
HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
|
2059
|
-
HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
|
2060
|
-
HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
|
2061
|
-
HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);
|
2062
|
-
HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);
|
2063
|
-
HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);
|
2064
|
-
HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);
|
2040
|
+
length4 = cSrcSize - (length1 + length2 + length3 + 6);
|
2041
|
+
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
|
2042
|
+
errorCode = BITv05_initDStream(&bitD1, istart1, length1);
|
2043
|
+
if (HUFv05_isError(errorCode)) return errorCode;
|
2044
|
+
errorCode = BITv05_initDStream(&bitD2, istart2, length2);
|
2045
|
+
if (HUFv05_isError(errorCode)) return errorCode;
|
2046
|
+
errorCode = BITv05_initDStream(&bitD3, istart3, length3);
|
2047
|
+
if (HUFv05_isError(errorCode)) return errorCode;
|
2048
|
+
errorCode = BITv05_initDStream(&bitD4, istart4, length4);
|
2049
|
+
if (HUFv05_isError(errorCode)) return errorCode;
|
2050
|
+
|
2051
|
+
/* 16-32 symbols per loop (4-8 symbols per stream) */
|
2065
2052
|
endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
|
2066
|
-
|
2053
|
+
for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
|
2054
|
+
HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
|
2055
|
+
HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
|
2056
|
+
HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
|
2057
|
+
HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
|
2058
|
+
HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1);
|
2059
|
+
HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2);
|
2060
|
+
HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3);
|
2061
|
+
HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4);
|
2062
|
+
HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
|
2063
|
+
HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
|
2064
|
+
HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
|
2065
|
+
HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
|
2066
|
+
HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);
|
2067
|
+
HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);
|
2068
|
+
HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);
|
2069
|
+
HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);
|
2070
|
+
endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
|
2071
|
+
}
|
2067
2072
|
|
2068
|
-
|
2069
|
-
|
2070
|
-
|
2071
|
-
|
2072
|
-
|
2073
|
+
/* check corruption */
|
2074
|
+
if (op1 > opStart2) return ERROR(corruption_detected);
|
2075
|
+
if (op2 > opStart3) return ERROR(corruption_detected);
|
2076
|
+
if (op3 > opStart4) return ERROR(corruption_detected);
|
2077
|
+
/* note : op4 supposed already verified within main loop */
|
2073
2078
|
|
2074
|
-
|
2075
|
-
|
2076
|
-
|
2077
|
-
|
2078
|
-
|
2079
|
+
/* finish bitStreams one by one */
|
2080
|
+
HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
|
2081
|
+
HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
|
2082
|
+
HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
|
2083
|
+
HUFv05_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
|
2079
2084
|
|
2080
|
-
|
2081
|
-
|
2082
|
-
|
2085
|
+
/* check */
|
2086
|
+
endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
|
2087
|
+
if (!endSignal) return ERROR(corruption_detected);
|
2083
2088
|
|
2084
|
-
|
2085
|
-
|
2089
|
+
/* decoded size */
|
2090
|
+
return dstSize;
|
2091
|
+
}
|
2086
2092
|
}
|
2087
2093
|
|
2088
2094
|
|
@@ -3150,14 +3156,13 @@ static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
|
|
3150
3156
|
litLength = FSEv05_peakSymbol(&(seqState->stateLL));
|
3151
3157
|
prevOffset = litLength ? seq->offset : seqState->prevOffset;
|
3152
3158
|
if (litLength == MaxLL) {
|
3153
|
-
U32 add = *dumps++;
|
3159
|
+
const U32 add = *dumps++;
|
3154
3160
|
if (add < 255) litLength += add;
|
3155
|
-
else {
|
3156
|
-
litLength =
|
3161
|
+
else if (dumps + 3 <= de) {
|
3162
|
+
litLength = MEM_readLE24(dumps);
|
3157
3163
|
if (litLength&1) litLength>>=1, dumps += 3;
|
3158
3164
|
else litLength = (U16)(litLength)>>1, dumps += 2;
|
3159
3165
|
}
|
3160
|
-
if (dumps > de) { litLength = MaxLL+255; } /* late correction, to avoid using uninitialized memory */
|
3161
3166
|
if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
|
3162
3167
|
}
|
3163
3168
|
|
@@ -3184,14 +3189,13 @@ static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
|
|
3184
3189
|
/* MatchLength */
|
3185
3190
|
matchLength = FSEv05_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
|
3186
3191
|
if (matchLength == MaxML) {
|
3187
|
-
U32 add = *dumps
|
3192
|
+
const U32 add = dumps<de ? *dumps++ : 0;
|
3188
3193
|
if (add < 255) matchLength += add;
|
3189
|
-
else {
|
3190
|
-
matchLength =
|
3194
|
+
else if (dumps + 3 <= de) {
|
3195
|
+
matchLength = MEM_readLE24(dumps);
|
3191
3196
|
if (matchLength&1) matchLength>>=1, dumps += 3;
|
3192
3197
|
else matchLength = (U16)(matchLength)>>1, dumps += 2;
|
3193
3198
|
}
|
3194
|
-
if (dumps > de) { matchLength = MaxML+255; } /* late correction, to avoid using uninitialized memory */
|
3195
3199
|
if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
|
3196
3200
|
}
|
3197
3201
|
matchLength += MINMATCH;
|
@@ -3242,14 +3242,12 @@ static size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr,
|
|
3242
3242
|
}
|
3243
3243
|
|
3244
3244
|
/* FSE table descriptors */
|
3245
|
+
if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */
|
3245
3246
|
{ U32 const LLtype = *ip >> 6;
|
3246
3247
|
U32 const Offtype = (*ip >> 4) & 3;
|
3247
3248
|
U32 const MLtype = (*ip >> 2) & 3;
|
3248
3249
|
ip++;
|
3249
3250
|
|
3250
|
-
/* check */
|
3251
|
-
if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
|
3252
|
-
|
3253
3251
|
/* Build DTables */
|
3254
3252
|
{ size_t const bhSize = ZSTDv06_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
|
3255
3253
|
if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
|
@@ -3672,7 +3670,7 @@ void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cS
|
|
3672
3670
|
blockProperties_t blockProperties = { bt_compressed, 0 };
|
3673
3671
|
|
3674
3672
|
/* Frame Header */
|
3675
|
-
{ size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src,
|
3673
|
+
{ size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, srcSize);
|
3676
3674
|
if (ZSTDv06_isError(frameHeaderSize)) {
|
3677
3675
|
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
|
3678
3676
|
return;
|
@@ -3470,14 +3470,12 @@ static size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr,
|
|
3470
3470
|
}
|
3471
3471
|
|
3472
3472
|
/* FSE table descriptors */
|
3473
|
+
if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */
|
3473
3474
|
{ U32 const LLtype = *ip >> 6;
|
3474
3475
|
U32 const OFtype = (*ip >> 4) & 3;
|
3475
3476
|
U32 const MLtype = (*ip >> 2) & 3;
|
3476
3477
|
ip++;
|
3477
3478
|
|
3478
|
-
/* check */
|
3479
|
-
if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
|
3480
|
-
|
3481
3479
|
/* Build DTables */
|
3482
3480
|
{ size_t const llhSize = ZSTDv07_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
|
3483
3481
|
if (ZSTDv07_isError(llhSize)) return ERROR(corruption_detected);
|
@@ -3918,7 +3916,7 @@ void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cS
|
|
3918
3916
|
}
|
3919
3917
|
|
3920
3918
|
/* Frame Header */
|
3921
|
-
{ size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src,
|
3919
|
+
{ size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, srcSize);
|
3922
3920
|
if (ZSTDv07_isError(frameHeaderSize)) {
|
3923
3921
|
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
|
3924
3922
|
return;
|
data/ext/zstdruby/libzstd/zstd.h
CHANGED
@@ -71,7 +71,7 @@ extern "C" {
|
|
71
71
|
/*------ Version ------*/
|
72
72
|
#define ZSTD_VERSION_MAJOR 1
|
73
73
|
#define ZSTD_VERSION_MINOR 4
|
74
|
-
#define ZSTD_VERSION_RELEASE
|
74
|
+
#define ZSTD_VERSION_RELEASE 1
|
75
75
|
|
76
76
|
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
77
77
|
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
|
@@ -82,16 +82,16 @@ ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library v
|
|
82
82
|
#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
|
83
83
|
ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */
|
84
84
|
|
85
|
-
|
86
|
-
* Default constant
|
87
|
-
***************************************/
|
85
|
+
/* *************************************
|
86
|
+
* Default constant
|
87
|
+
***************************************/
|
88
88
|
#ifndef ZSTD_CLEVEL_DEFAULT
|
89
89
|
# define ZSTD_CLEVEL_DEFAULT 3
|
90
90
|
#endif
|
91
91
|
|
92
|
-
|
93
|
-
* Constants
|
94
|
-
***************************************/
|
92
|
+
/* *************************************
|
93
|
+
* Constants
|
94
|
+
***************************************/
|
95
95
|
|
96
96
|
/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
|
97
97
|
#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
|
@@ -183,9 +183,14 @@ ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compres
|
|
183
183
|
***************************************/
|
184
184
|
/*= Compression context
|
185
185
|
* When compressing many times,
|
186
|
-
* it is recommended to allocate a context just once,
|
186
|
+
* it is recommended to allocate a context just once,
|
187
|
+
* and re-use it for each successive compression operation.
|
187
188
|
* This will make workload friendlier for system's memory.
|
188
|
-
*
|
189
|
+
* Note : re-using context is just a speed / resource optimization.
|
190
|
+
* It doesn't change the compression ratio, which remains identical.
|
191
|
+
* Note 2 : In multi-threaded environments,
|
192
|
+
* use one different context per thread for parallel execution.
|
193
|
+
*/
|
189
194
|
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
|
190
195
|
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
|
191
196
|
ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
|
@@ -380,6 +385,7 @@ typedef enum {
|
|
380
385
|
* ZSTD_c_forceMaxWindow
|
381
386
|
* ZSTD_c_forceAttachDict
|
382
387
|
* ZSTD_c_literalCompressionMode
|
388
|
+
* ZSTD_c_targetCBlockSize
|
383
389
|
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
|
384
390
|
* note : never ever use experimentalParam? names directly;
|
385
391
|
* also, the enums values themselves are unstable and can still change.
|
@@ -389,6 +395,7 @@ typedef enum {
|
|
389
395
|
ZSTD_c_experimentalParam3=1000,
|
390
396
|
ZSTD_c_experimentalParam4=1001,
|
391
397
|
ZSTD_c_experimentalParam5=1002,
|
398
|
+
ZSTD_c_experimentalParam6=1003,
|
392
399
|
} ZSTD_cParameter;
|
393
400
|
|
394
401
|
typedef struct {
|
@@ -657,17 +664,33 @@ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
|
|
657
664
|
ZSTD_inBuffer* input,
|
658
665
|
ZSTD_EndDirective endOp);
|
659
666
|
|
667
|
+
|
668
|
+
/* These buffer sizes are softly recommended.
|
669
|
+
* They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
|
670
|
+
* Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
|
671
|
+
* reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
|
672
|
+
*
|
673
|
+
* However, note that these recommendations are from the perspective of a C caller program.
|
674
|
+
* If the streaming interface is invoked from some other language,
|
675
|
+
* especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
|
676
|
+
* a major performance rule is to reduce crossing such interface to an absolute minimum.
|
677
|
+
* It's not rare that performance ends being spent more into the interface, rather than compression itself.
|
678
|
+
* In which cases, prefer using large buffers, as large as practical,
|
679
|
+
* for both input and output, to reduce the nb of roundtrips.
|
680
|
+
*/
|
660
681
|
ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */
|
661
|
-
ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block
|
682
|
+
ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
|
662
683
|
|
663
|
-
|
664
|
-
|
665
|
-
*
|
684
|
+
|
685
|
+
/* *****************************************************************************
|
686
|
+
* This following is a legacy streaming API.
|
687
|
+
* It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
|
688
|
+
* It is redundant, but remains fully supported.
|
666
689
|
* Advanced parameters and dictionary compression can only be used through the
|
667
690
|
* new API.
|
668
691
|
******************************************************************************/
|
669
692
|
|
670
|
-
|
693
|
+
/*!
|
671
694
|
* Equivalent to:
|
672
695
|
*
|
673
696
|
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
|
@@ -675,16 +698,16 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output
|
|
675
698
|
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
|
676
699
|
*/
|
677
700
|
ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
|
678
|
-
|
701
|
+
/*!
|
679
702
|
* Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
|
680
703
|
* NOTE: The return value is different. ZSTD_compressStream() returns a hint for
|
681
704
|
* the next read size (if non-zero and not an error). ZSTD_compressStream2()
|
682
|
-
* returns the
|
705
|
+
* returns the minimum nb of bytes left to flush (if non-zero and not an error).
|
683
706
|
*/
|
684
707
|
ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
685
|
-
|
708
|
+
/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
|
686
709
|
ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
|
687
|
-
|
710
|
+
/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
|
688
711
|
ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
|
689
712
|
|
690
713
|
|
@@ -969,7 +992,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|
969
992
|
#endif /* ZSTD_H_235446 */
|
970
993
|
|
971
994
|
|
972
|
-
|
995
|
+
/* **************************************************************************************
|
973
996
|
* ADVANCED AND EXPERIMENTAL FUNCTIONS
|
974
997
|
****************************************************************************************
|
975
998
|
* The definitions in the following section are considered experimental.
|
@@ -1037,6 +1060,10 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|
1037
1060
|
#define ZSTD_LDM_HASHRATELOG_MIN 0
|
1038
1061
|
#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
|
1039
1062
|
|
1063
|
+
/* Advanced parameter bounds */
|
1064
|
+
#define ZSTD_TARGETCBLOCKSIZE_MIN 64
|
1065
|
+
#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
|
1066
|
+
|
1040
1067
|
/* internal */
|
1041
1068
|
#define ZSTD_HASHLOG3_MAX 17
|
1042
1069
|
|
@@ -1162,7 +1189,7 @@ typedef enum {
|
|
1162
1189
|
* however it does mean that all frame data must be present and valid. */
|
1163
1190
|
ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
|
1164
1191
|
|
1165
|
-
|
1192
|
+
/*! ZSTD_decompressBound() :
|
1166
1193
|
* `src` should point to the start of a series of ZSTD encoded and/or skippable frames
|
1167
1194
|
* `srcSize` must be the _exact_ size of this series
|
1168
1195
|
* (i.e. there should be a frame boundary at `src + srcSize`)
|
@@ -1409,6 +1436,11 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
|
|
1409
1436
|
*/
|
1410
1437
|
#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
|
1411
1438
|
|
1439
|
+
/* Tries to fit compressed block size to be around targetCBlockSize.
|
1440
|
+
* No target when targetCBlockSize == 0.
|
1441
|
+
* There is no guarantee on compressed block size (default:0) */
|
1442
|
+
#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
|
1443
|
+
|
1412
1444
|
/*! ZSTD_CCtx_getParameter() :
|
1413
1445
|
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
|
1414
1446
|
* and store it into int* value.
|
@@ -1843,7 +1875,7 @@ typedef struct {
|
|
1843
1875
|
unsigned checksumFlag;
|
1844
1876
|
} ZSTD_frameHeader;
|
1845
1877
|
|
1846
|
-
|
1878
|
+
/*! ZSTD_getFrameHeader() :
|
1847
1879
|
* decode Frame Header, or requires larger `srcSize`.
|
1848
1880
|
* @return : 0, `zfhPtr` is correctly filled,
|
1849
1881
|
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
|
data/lib/zstd-ruby/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: zstd-ruby
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.4.
|
4
|
+
version: 1.4.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- SpringMT
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2019-
|
11
|
+
date: 2019-07-22 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -194,8 +194,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
194
194
|
- !ruby/object:Gem::Version
|
195
195
|
version: '0'
|
196
196
|
requirements: []
|
197
|
-
|
198
|
-
rubygems_version: 2.7.6
|
197
|
+
rubygems_version: 3.0.3
|
199
198
|
signing_key:
|
200
199
|
specification_version: 4
|
201
200
|
summary: Ruby binding for zstd(Zstandard - Fast real-time compression algorithm)
|