zstd-ruby 1.5.1.0 → 1.5.2.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/README.md +56 -3
- data/Rakefile +8 -2
- data/ext/zstdruby/{zstdruby.h → common.h} +2 -0
- data/ext/zstdruby/extconf.rb +1 -1
- data/ext/zstdruby/libzstd/common/pool.c +11 -6
- data/ext/zstdruby/libzstd/common/pool.h +2 -2
- data/ext/zstdruby/libzstd/common/portability_macros.h +6 -0
- data/ext/zstdruby/libzstd/common/zstd_internal.h +3 -4
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +114 -96
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +72 -39
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +10 -10
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +38 -24
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +10 -10
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +11 -11
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +66 -62
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +5 -3
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +66 -43
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +17 -9
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +4 -1
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +2 -2
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +17 -3
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +2 -2
- data/ext/zstdruby/libzstd/zstd.h +1 -1
- data/ext/zstdruby/main.c +14 -0
- data/ext/zstdruby/streaming_compress.c +185 -0
- data/ext/zstdruby/streaming_compress.h +5 -0
- data/ext/zstdruby/streaming_decompress.c +125 -0
- data/ext/zstdruby/zstdruby.c +4 -6
- data/lib/zstd-ruby/version.rb +1 -1
- data/zstd-ruby.gemspec +1 -1
- metadata +11 -40
- data/.github/dependabot.yml +0 -8
- data/.github/workflows/ruby.yml +0 -35
- data/ext/zstdruby/libzstd/.gitignore +0 -3
- data/ext/zstdruby/libzstd/BUCK +0 -232
- data/ext/zstdruby/libzstd/Makefile +0 -357
- data/ext/zstdruby/libzstd/README.md +0 -217
- data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
- data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
- data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -167
- data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
- data/ext/zstdruby/libzstd/dll/example/Makefile +0 -48
- data/ext/zstdruby/libzstd/dll/example/README.md +0 -63
- data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2158
- data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3518
- data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3160
- data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3647
- data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4050
- data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4154
- data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4541
- data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
- data/ext/zstdruby/libzstd/libzstd.mk +0 -185
- data/ext/zstdruby/libzstd/libzstd.pc.in +0 -16
- data/ext/zstdruby/libzstd/modulemap/module.modulemap +0 -4
@@ -204,7 +204,8 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|
204
204
|
1, 1, 1, 1, 1, 1, 1, 1,
|
205
205
|
1, 1, 1, 1
|
206
206
|
};
|
207
|
-
ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
|
207
|
+
ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
|
208
|
+
optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
|
208
209
|
}
|
209
210
|
|
210
211
|
{ unsigned ml;
|
@@ -219,7 +220,8 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|
219
220
|
1, 1, 1, 1, 1, 1, 1, 1,
|
220
221
|
1, 1, 1, 1, 1, 1, 1, 1
|
221
222
|
};
|
222
|
-
ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
|
223
|
+
ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
|
224
|
+
optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
|
223
225
|
}
|
224
226
|
|
225
227
|
|
@@ -267,7 +269,16 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
|
|
267
269
|
* cost of literalLength symbol */
|
268
270
|
static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
|
269
271
|
{
|
270
|
-
|
272
|
+
assert(litLength <= ZSTD_BLOCKSIZE_MAX);
|
273
|
+
if (optPtr->priceType == zop_predef)
|
274
|
+
return WEIGHT(litLength, optLevel);
|
275
|
+
/* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
|
276
|
+
* because it isn't representable in the zstd format. So instead just
|
277
|
+
* call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block
|
278
|
+
* would be all literals.
|
279
|
+
*/
|
280
|
+
if (litLength == ZSTD_BLOCKSIZE_MAX)
|
281
|
+
return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
|
271
282
|
|
272
283
|
/* dynamic statistics */
|
273
284
|
{ U32 const llCode = ZSTD_LLcode(litLength);
|
@@ -280,15 +291,17 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
|
|
280
291
|
/* ZSTD_getMatchPrice() :
|
281
292
|
* Provides the cost of the match part (offset + matchLength) of a sequence
|
282
293
|
* Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
|
283
|
-
*
|
294
|
+
* @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2
|
295
|
+
* @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
|
296
|
+
*/
|
284
297
|
FORCE_INLINE_TEMPLATE U32
|
285
|
-
ZSTD_getMatchPrice(U32 const
|
298
|
+
ZSTD_getMatchPrice(U32 const offcode,
|
286
299
|
U32 const matchLength,
|
287
300
|
const optState_t* const optPtr,
|
288
301
|
int const optLevel)
|
289
302
|
{
|
290
303
|
U32 price;
|
291
|
-
U32 const offCode = ZSTD_highbit32(
|
304
|
+
U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode));
|
292
305
|
U32 const mlBase = matchLength - MINMATCH;
|
293
306
|
assert(matchLength >= MINMATCH);
|
294
307
|
|
@@ -331,8 +344,8 @@ static void ZSTD_updateStats(optState_t* const optPtr,
|
|
331
344
|
optPtr->litLengthSum++;
|
332
345
|
}
|
333
346
|
|
334
|
-
/*
|
335
|
-
{ U32 const offCode = ZSTD_highbit32(offsetCode
|
347
|
+
/* offset code : expected to follow storeSeq() numeric representation */
|
348
|
+
{ U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode));
|
336
349
|
assert(offCode <= MaxOff);
|
337
350
|
optPtr->offCodeFreq[offCode]++;
|
338
351
|
optPtr->offCodeSum++;
|
@@ -631,7 +644,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|
631
644
|
DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
|
632
645
|
repCode, ll0, repOffset, repLen);
|
633
646
|
bestLength = repLen;
|
634
|
-
matches[mnum].off = repCode - ll0;
|
647
|
+
matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */
|
635
648
|
matches[mnum].len = (U32)repLen;
|
636
649
|
mnum++;
|
637
650
|
if ( (repLen > sufficient_len)
|
@@ -660,7 +673,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|
660
673
|
bestLength = mlen;
|
661
674
|
assert(curr > matchIndex3);
|
662
675
|
assert(mnum==0); /* no prior solution */
|
663
|
-
matches[0].off = (curr - matchIndex3)
|
676
|
+
matches[0].off = STORE_OFFSET(curr - matchIndex3);
|
664
677
|
matches[0].len = (U32)mlen;
|
665
678
|
mnum = 1;
|
666
679
|
if ( (mlen > sufficient_len) |
|
@@ -694,12 +707,12 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|
694
707
|
|
695
708
|
if (matchLength > bestLength) {
|
696
709
|
DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
|
697
|
-
(U32)matchLength, curr - matchIndex, curr - matchIndex
|
710
|
+
(U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
|
698
711
|
assert(matchEndIdx > matchIndex);
|
699
712
|
if (matchLength > matchEndIdx - matchIndex)
|
700
713
|
matchEndIdx = matchIndex + (U32)matchLength;
|
701
714
|
bestLength = matchLength;
|
702
|
-
matches[mnum].off = (curr - matchIndex)
|
715
|
+
matches[mnum].off = STORE_OFFSET(curr - matchIndex);
|
703
716
|
matches[mnum].len = (U32)matchLength;
|
704
717
|
mnum++;
|
705
718
|
if ( (matchLength > ZSTD_OPT_NUM)
|
@@ -742,11 +755,11 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|
742
755
|
if (matchLength > bestLength) {
|
743
756
|
matchIndex = dictMatchIndex + dmsIndexDelta;
|
744
757
|
DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
|
745
|
-
(U32)matchLength, curr - matchIndex, curr - matchIndex
|
758
|
+
(U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
|
746
759
|
if (matchLength > matchEndIdx - matchIndex)
|
747
760
|
matchEndIdx = matchIndex + (U32)matchLength;
|
748
761
|
bestLength = matchLength;
|
749
|
-
matches[mnum].off = (curr - matchIndex)
|
762
|
+
matches[mnum].off = STORE_OFFSET(curr - matchIndex);
|
750
763
|
matches[mnum].len = (U32)matchLength;
|
751
764
|
mnum++;
|
752
765
|
if ( (matchLength > ZSTD_OPT_NUM)
|
@@ -835,7 +848,8 @@ GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
|
|
835
848
|
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
|
836
849
|
}
|
837
850
|
|
838
|
-
static ZSTD_getAllMatchesFn
|
851
|
+
static ZSTD_getAllMatchesFn
|
852
|
+
ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
|
839
853
|
{
|
840
854
|
ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
|
841
855
|
ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
|
@@ -854,16 +868,18 @@ static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const*
|
|
854
868
|
|
855
869
|
/* Struct containing info needed to make decision about ldm inclusion */
|
856
870
|
typedef struct {
|
857
|
-
rawSeqStore_t seqStore;
|
858
|
-
U32 startPosInBlock;
|
859
|
-
U32 endPosInBlock;
|
860
|
-
U32 offset;
|
871
|
+
rawSeqStore_t seqStore; /* External match candidates store for this block */
|
872
|
+
U32 startPosInBlock; /* Start position of the current match candidate */
|
873
|
+
U32 endPosInBlock; /* End position of the current match candidate */
|
874
|
+
U32 offset; /* Offset of the match candidate */
|
861
875
|
} ZSTD_optLdm_t;
|
862
876
|
|
863
877
|
/* ZSTD_optLdm_skipRawSeqStoreBytes():
|
864
|
-
* Moves forward in rawSeqStore by nbBytes,
|
878
|
+
* Moves forward in @rawSeqStore by @nbBytes,
|
879
|
+
* which will update the fields 'pos' and 'posInSequence'.
|
865
880
|
*/
|
866
|
-
static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
|
881
|
+
static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
|
882
|
+
{
|
867
883
|
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
|
868
884
|
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
|
869
885
|
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
|
@@ -884,8 +900,10 @@ static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t
|
|
884
900
|
* Calculates the beginning and end of the next match in the current block.
|
885
901
|
* Updates 'pos' and 'posInSequence' of the ldmSeqStore.
|
886
902
|
*/
|
887
|
-
static void
|
888
|
-
|
903
|
+
static void
|
904
|
+
ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
|
905
|
+
U32 blockBytesRemaining)
|
906
|
+
{
|
889
907
|
rawSeq currSeq;
|
890
908
|
U32 currBlockEndPos;
|
891
909
|
U32 literalsBytesRemaining;
|
@@ -897,8 +915,8 @@ static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 cu
|
|
897
915
|
optLdm->endPosInBlock = UINT_MAX;
|
898
916
|
return;
|
899
917
|
}
|
900
|
-
/* Calculate appropriate bytes left in matchLength and litLength
|
901
|
-
|
918
|
+
/* Calculate appropriate bytes left in matchLength and litLength
|
919
|
+
* after adjusting based on ldmSeqStore->posInSequence */
|
902
920
|
currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
|
903
921
|
assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
|
904
922
|
currBlockEndPos = currPosInBlock + blockBytesRemaining;
|
@@ -934,15 +952,16 @@ static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 cu
|
|
934
952
|
}
|
935
953
|
|
936
954
|
/* ZSTD_optLdm_maybeAddMatch():
|
937
|
-
* Adds a match if it's long enough,
|
938
|
-
*
|
955
|
+
* Adds a match if it's long enough,
|
956
|
+
* based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
|
957
|
+
* into 'matches'. Maintains the correct ordering of 'matches'.
|
939
958
|
*/
|
940
959
|
static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
|
941
|
-
ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
|
942
|
-
|
960
|
+
const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
|
961
|
+
{
|
962
|
+
U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
|
943
963
|
/* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
|
944
|
-
U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
|
945
|
-
U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
|
964
|
+
U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
|
946
965
|
|
947
966
|
/* Ensure that current block position is not outside of the match */
|
948
967
|
if (currPosInBlock < optLdm->startPosInBlock
|
@@ -952,6 +971,7 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
|
|
952
971
|
}
|
953
972
|
|
954
973
|
if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
|
974
|
+
U32 const candidateOffCode = STORE_OFFSET(optLdm->offset);
|
955
975
|
DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
|
956
976
|
candidateOffCode, candidateMatchLength, currPosInBlock);
|
957
977
|
matches[*nbMatches].len = candidateMatchLength;
|
@@ -963,8 +983,11 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
|
|
963
983
|
/* ZSTD_optLdm_processMatchCandidate():
|
964
984
|
* Wrapper function to update ldm seq store and call ldm functions as necessary.
|
965
985
|
*/
|
966
|
-
static void
|
967
|
-
|
986
|
+
static void
|
987
|
+
ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
|
988
|
+
ZSTD_match_t* matches, U32* nbMatches,
|
989
|
+
U32 currPosInBlock, U32 remainingBytes)
|
990
|
+
{
|
968
991
|
if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
|
969
992
|
return;
|
970
993
|
}
|
@@ -975,7 +998,7 @@ static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_
|
|
975
998
|
* at the end of a match from the ldm seq store, and will often be some bytes
|
976
999
|
* over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
|
977
1000
|
*/
|
978
|
-
U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
|
1001
|
+
U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock;
|
979
1002
|
ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
|
980
1003
|
}
|
981
1004
|
ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
|
@@ -1075,14 +1098,14 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|
1075
1098
|
|
1076
1099
|
/* large match -> immediate encoding */
|
1077
1100
|
{ U32 const maxML = matches[nbMatches-1].len;
|
1078
|
-
U32 const
|
1101
|
+
U32 const maxOffcode = matches[nbMatches-1].off;
|
1079
1102
|
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
|
1080
|
-
nbMatches, maxML,
|
1103
|
+
nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart));
|
1081
1104
|
|
1082
1105
|
if (maxML > sufficient_len) {
|
1083
1106
|
lastSequence.litlen = litlen;
|
1084
1107
|
lastSequence.mlen = maxML;
|
1085
|
-
lastSequence.off =
|
1108
|
+
lastSequence.off = maxOffcode;
|
1086
1109
|
DEBUGLOG(6, "large match (%u>%u), immediate encoding",
|
1087
1110
|
maxML, sufficient_len);
|
1088
1111
|
cur = 0;
|
@@ -1099,15 +1122,15 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|
1099
1122
|
opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
|
1100
1123
|
}
|
1101
1124
|
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
|
1102
|
-
U32 const
|
1125
|
+
U32 const offcode = matches[matchNb].off;
|
1103
1126
|
U32 const end = matches[matchNb].len;
|
1104
1127
|
for ( ; pos <= end ; pos++ ) {
|
1105
|
-
U32 const matchPrice = ZSTD_getMatchPrice(
|
1128
|
+
U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
|
1106
1129
|
U32 const sequencePrice = literalsPrice + matchPrice;
|
1107
1130
|
DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
|
1108
1131
|
pos, ZSTD_fCost(sequencePrice));
|
1109
1132
|
opt[pos].mlen = pos;
|
1110
|
-
opt[pos].off =
|
1133
|
+
opt[pos].off = offcode;
|
1111
1134
|
opt[pos].litlen = litlen;
|
1112
1135
|
opt[pos].price = (int)sequencePrice;
|
1113
1136
|
} }
|
@@ -1152,7 +1175,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|
1152
1175
|
assert(cur >= opt[cur].mlen);
|
1153
1176
|
if (opt[cur].mlen != 0) {
|
1154
1177
|
U32 const prev = cur - opt[cur].mlen;
|
1155
|
-
repcodes_t newReps =
|
1178
|
+
repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
|
1156
1179
|
ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
|
1157
1180
|
} else {
|
1158
1181
|
ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
|
@@ -1242,7 +1265,7 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
|
|
1242
1265
|
* update them while traversing the sequences.
|
1243
1266
|
*/
|
1244
1267
|
if (lastSequence.mlen != 0) {
|
1245
|
-
repcodes_t reps =
|
1268
|
+
repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
|
1246
1269
|
ZSTD_memcpy(rep, &reps, sizeof(reps));
|
1247
1270
|
} else {
|
1248
1271
|
ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
|
@@ -1286,7 +1309,7 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
|
|
1286
1309
|
|
1287
1310
|
assert(anchor + llen <= iend);
|
1288
1311
|
ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
|
1289
|
-
ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen
|
1312
|
+
ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen);
|
1290
1313
|
anchor += advance;
|
1291
1314
|
ip = anchor;
|
1292
1315
|
} }
|
@@ -102,9 +102,8 @@ typedef struct ZSTDMT_bufferPool_s {
|
|
102
102
|
buffer_t bTable[1]; /* variable size */
|
103
103
|
} ZSTDMT_bufferPool;
|
104
104
|
|
105
|
-
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned
|
105
|
+
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem)
|
106
106
|
{
|
107
|
-
unsigned const maxNbBuffers = 2*nbWorkers + 3;
|
108
107
|
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
|
109
108
|
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
|
110
109
|
if (bufPool==NULL) return NULL;
|
@@ -160,9 +159,8 @@ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const
|
|
160
159
|
}
|
161
160
|
|
162
161
|
|
163
|
-
static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool,
|
162
|
+
static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers)
|
164
163
|
{
|
165
|
-
unsigned const maxNbBuffers = 2*nbWorkers + 3;
|
166
164
|
if (srcBufPool==NULL) return NULL;
|
167
165
|
if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
|
168
166
|
return srcBufPool;
|
@@ -171,7 +169,7 @@ static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool,
|
|
171
169
|
size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
|
172
170
|
ZSTDMT_bufferPool* newBufPool;
|
173
171
|
ZSTDMT_freeBufferPool(srcBufPool);
|
174
|
-
newBufPool = ZSTDMT_createBufferPool(
|
172
|
+
newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem);
|
175
173
|
if (newBufPool==NULL) return newBufPool;
|
176
174
|
ZSTDMT_setBufferSize(newBufPool, bSize);
|
177
175
|
return newBufPool;
|
@@ -263,6 +261,16 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
|
263
261
|
ZSTD_customFree(buf.start, bufPool->cMem);
|
264
262
|
}
|
265
263
|
|
264
|
+
/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released.
|
265
|
+
* The 3 additional buffers are as follows:
|
266
|
+
* 1 buffer for input loading
|
267
|
+
* 1 buffer for "next input" when submitting current one
|
268
|
+
* 1 buffer stuck in queue */
|
269
|
+
#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) 2*nbWorkers + 3
|
270
|
+
|
271
|
+
/* After a worker releases its rawSeqStore, it is immediately ready for reuse.
|
272
|
+
* So we only need one seq buffer per worker. */
|
273
|
+
#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) nbWorkers
|
266
274
|
|
267
275
|
/* ===== Seq Pool Wrapper ====== */
|
268
276
|
|
@@ -316,7 +324,7 @@ static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
|
|
316
324
|
|
317
325
|
static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
|
318
326
|
{
|
319
|
-
ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
|
327
|
+
ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
|
320
328
|
if (seqPool == NULL) return NULL;
|
321
329
|
ZSTDMT_setNbSeq(seqPool, 0);
|
322
330
|
return seqPool;
|
@@ -329,7 +337,7 @@ static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
|
|
329
337
|
|
330
338
|
static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
|
331
339
|
{
|
332
|
-
return ZSTDMT_expandBufferPool(pool, nbWorkers);
|
340
|
+
return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers));
|
333
341
|
}
|
334
342
|
|
335
343
|
|
@@ -936,7 +944,7 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
|
|
936
944
|
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
|
937
945
|
assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
|
938
946
|
mtctx->jobIDMask = nbJobs - 1;
|
939
|
-
mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
|
947
|
+
mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
|
940
948
|
mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
|
941
949
|
mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
|
942
950
|
initError = ZSTDMT_serialState_init(&mtctx->serial);
|
@@ -1039,7 +1047,7 @@ static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
|
|
1039
1047
|
{
|
1040
1048
|
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
|
1041
1049
|
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
|
1042
|
-
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
|
1050
|
+
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers));
|
1043
1051
|
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
|
1044
1052
|
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
|
1045
1053
|
if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
|
@@ -65,8 +65,11 @@ size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
|
|
65
65
|
* Private use only. Init streaming operation.
|
66
66
|
* expects params to be valid.
|
67
67
|
* must receive dict, or cdict, or none, but not both.
|
68
|
+
* mtctx can be freshly constructed or reused from a prior compression.
|
69
|
+
* If mtctx is reused, memory allocations from the prior compression may not be freed,
|
70
|
+
* even if they are not needed for the current compression.
|
68
71
|
* @return : 0, or an error code */
|
69
|
-
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx*
|
72
|
+
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* mtctx,
|
70
73
|
const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
|
71
74
|
const ZSTD_CDict* cdict,
|
72
75
|
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
@@ -664,7 +664,7 @@ size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize,
|
|
664
664
|
|
665
665
|
#if ZSTD_ENABLE_ASM_X86_64_BMI2
|
666
666
|
|
667
|
-
HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args);
|
667
|
+
HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
|
668
668
|
|
669
669
|
static HUF_ASM_X86_64_BMI2_ATTRS
|
670
670
|
size_t
|
@@ -1380,7 +1380,7 @@ size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize,
|
|
1380
1380
|
|
1381
1381
|
#if ZSTD_ENABLE_ASM_X86_64_BMI2
|
1382
1382
|
|
1383
|
-
HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args);
|
1383
|
+
HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
|
1384
1384
|
|
1385
1385
|
static HUF_ASM_X86_64_BMI2_ATTRS size_t
|
1386
1386
|
HUF_decompress4X2_usingDTable_internal_bmi2_asm(
|
@@ -1,14 +1,24 @@
|
|
1
|
-
|
1
|
+
/*
|
2
|
+
* Copyright (c) Facebook, Inc.
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* This source code is licensed under both the BSD-style license (found in the
|
6
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
7
|
+
* in the COPYING file in the root directory of this source tree).
|
8
|
+
* You may select, at your option, one of the above-listed licenses.
|
9
|
+
*/
|
2
10
|
|
3
|
-
#
|
11
|
+
#include "../common/portability_macros.h"
|
4
12
|
|
5
13
|
/* Stack marking
|
6
14
|
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
|
7
15
|
*/
|
8
|
-
#if defined(
|
16
|
+
#if defined(__ELF__) && defined(__GNUC__)
|
9
17
|
.section .note.GNU-stack,"",%progbits
|
10
18
|
#endif
|
11
19
|
|
20
|
+
#if ZSTD_ENABLE_ASM_X86_64_BMI2
|
21
|
+
|
12
22
|
/* Calling convention:
|
13
23
|
*
|
14
24
|
* %rdi contains the first argument: HUF_DecompressAsmArgs*.
|
@@ -20,6 +30,10 @@
|
|
20
30
|
* TODO: Support Windows calling convention.
|
21
31
|
*/
|
22
32
|
|
33
|
+
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop)
|
34
|
+
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop)
|
35
|
+
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop)
|
36
|
+
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop)
|
23
37
|
.global HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop
|
24
38
|
.global HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop
|
25
39
|
.global _HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop
|
@@ -682,8 +682,8 @@ static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,
|
|
682
682
|
|
683
683
|
if (nbSeq >= 2) { /* rep offsets */
|
684
684
|
const seqDef* const seq = seqStorePtr->sequencesStart;
|
685
|
-
U32 offset1 = seq[0].
|
686
|
-
U32 offset2 = seq[1].
|
685
|
+
U32 offset1 = seq[0].offBase - ZSTD_REP_NUM;
|
686
|
+
U32 offset2 = seq[1].offBase - ZSTD_REP_NUM;
|
687
687
|
if (offset1 >= MAXREPOFFSET) offset1 = 0;
|
688
688
|
if (offset2 >= MAXREPOFFSET) offset2 = 0;
|
689
689
|
repOffsets[offset1] += 3;
|
data/ext/zstdruby/libzstd/zstd.h
CHANGED
@@ -74,7 +74,7 @@ extern "C" {
|
|
74
74
|
/*------ Version ------*/
|
75
75
|
#define ZSTD_VERSION_MAJOR 1
|
76
76
|
#define ZSTD_VERSION_MINOR 5
|
77
|
-
#define ZSTD_VERSION_RELEASE
|
77
|
+
#define ZSTD_VERSION_RELEASE 2
|
78
78
|
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
79
79
|
|
80
80
|
/*! ZSTD_versionNumber() :
|
data/ext/zstdruby/main.c
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
#include <common.h>
|
2
|
+
VALUE rb_mZstd;
|
3
|
+
void zstd_ruby_init(void);
|
4
|
+
void zstd_ruby_streaming_compress_init(void);
|
5
|
+
void zstd_ruby_streaming_decompress_init(void);
|
6
|
+
|
7
|
+
void
|
8
|
+
Init_zstdruby(void)
|
9
|
+
{
|
10
|
+
rb_mZstd = rb_define_module("Zstd");
|
11
|
+
zstd_ruby_init();
|
12
|
+
zstd_ruby_streaming_compress_init();
|
13
|
+
zstd_ruby_streaming_decompress_init();
|
14
|
+
}
|
@@ -0,0 +1,185 @@
|
|
1
|
+
#include <common.h>
|
2
|
+
#include <streaming_compress.h>
|
3
|
+
|
4
|
+
struct streaming_compress_t {
|
5
|
+
ZSTD_CCtx* ctx;
|
6
|
+
VALUE buf;
|
7
|
+
size_t buf_size;
|
8
|
+
};
|
9
|
+
|
10
|
+
static void
|
11
|
+
streaming_compress_mark(void *p)
|
12
|
+
{
|
13
|
+
struct streaming_compress_t *sc = p;
|
14
|
+
// rb_gc_mark((VALUE)sc->ctx);
|
15
|
+
rb_gc_mark(sc->buf);
|
16
|
+
rb_gc_mark(sc->buf_size);
|
17
|
+
}
|
18
|
+
|
19
|
+
static void
|
20
|
+
streaming_compress_free(void *p)
|
21
|
+
{
|
22
|
+
struct streaming_compress_t *sc = p;
|
23
|
+
ZSTD_CCtx* ctx = sc->ctx;
|
24
|
+
if (ctx != NULL) {
|
25
|
+
ZSTD_freeCCtx(ctx);
|
26
|
+
}
|
27
|
+
xfree(sc);
|
28
|
+
}
|
29
|
+
|
30
|
+
static size_t
|
31
|
+
streaming_compress_memsize(const void *p)
|
32
|
+
{
|
33
|
+
return sizeof(struct streaming_compress_t);
|
34
|
+
}
|
35
|
+
|
36
|
+
static const rb_data_type_t streaming_compress_type = {
|
37
|
+
"streaming_compress",
|
38
|
+
{ streaming_compress_mark, streaming_compress_free, streaming_compress_memsize, },
|
39
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
40
|
+
};
|
41
|
+
|
42
|
+
static VALUE
|
43
|
+
rb_streaming_compress_allocate(VALUE klass)
|
44
|
+
{
|
45
|
+
struct streaming_compress_t* sc;
|
46
|
+
VALUE obj = TypedData_Make_Struct(klass, struct streaming_compress_t, &streaming_compress_type, sc);
|
47
|
+
sc->ctx = NULL;
|
48
|
+
sc->buf = Qnil;
|
49
|
+
sc->buf_size = 0;
|
50
|
+
return obj;
|
51
|
+
}
|
52
|
+
|
53
|
+
static VALUE
|
54
|
+
rb_streaming_compress_initialize(int argc, VALUE *argv, VALUE obj)
|
55
|
+
{
|
56
|
+
VALUE compression_level_value;
|
57
|
+
rb_scan_args(argc, argv, "01", &compression_level_value);
|
58
|
+
|
59
|
+
int compression_level;
|
60
|
+
if (NIL_P(compression_level_value)) {
|
61
|
+
compression_level = 0; // The default. See ZSTD_CLEVEL_DEFAULT in zstd_compress.c
|
62
|
+
} else {
|
63
|
+
compression_level = NUM2INT(compression_level_value);
|
64
|
+
}
|
65
|
+
|
66
|
+
struct streaming_compress_t* sc;
|
67
|
+
TypedData_Get_Struct(obj, struct streaming_compress_t, &streaming_compress_type, sc);
|
68
|
+
size_t const buffOutSize = ZSTD_CStreamOutSize();
|
69
|
+
|
70
|
+
ZSTD_CCtx* ctx = ZSTD_createCCtx();
|
71
|
+
if (ctx == NULL) {
|
72
|
+
rb_raise(rb_eRuntimeError, "%s", "ZSTD_createCCtx error");
|
73
|
+
}
|
74
|
+
ZSTD_CCtx_setParameter(ctx, ZSTD_c_compressionLevel, compression_level);
|
75
|
+
sc->ctx = ctx;
|
76
|
+
sc->buf = rb_str_new(NULL, buffOutSize);
|
77
|
+
sc->buf_size = buffOutSize;
|
78
|
+
|
79
|
+
return obj;
|
80
|
+
}
|
81
|
+
|
82
|
+
#define FIXNUMARG(val, ifnil) \
|
83
|
+
(NIL_P((val)) ? (ifnil) \
|
84
|
+
: (FIX2INT((val))))
|
85
|
+
#define ARG_CONTINUE(val) FIXNUMARG((val), ZSTD_e_continue)
|
86
|
+
|
87
|
+
static VALUE
|
88
|
+
no_compress(struct streaming_compress_t* sc, ZSTD_EndDirective endOp)
|
89
|
+
{
|
90
|
+
ZSTD_inBuffer input = { NULL, 0, 0 };
|
91
|
+
const char* output_data = RSTRING_PTR(sc->buf);
|
92
|
+
VALUE result = rb_str_new(0, 0);
|
93
|
+
size_t ret;
|
94
|
+
do {
|
95
|
+
ZSTD_outBuffer output = { (void*)output_data, sc->buf_size, 0 };
|
96
|
+
|
97
|
+
size_t const ret = ZSTD_compressStream2(sc->ctx, &output, &input, endOp);
|
98
|
+
if (ZSTD_isError(ret)) {
|
99
|
+
rb_raise(rb_eRuntimeError, "flush error error code: %s", ZSTD_getErrorName(ret));
|
100
|
+
}
|
101
|
+
rb_str_cat(result, output.dst, output.pos);
|
102
|
+
} while (ret > 0);
|
103
|
+
return result;
|
104
|
+
}
|
105
|
+
|
106
|
+
static VALUE
|
107
|
+
rb_streaming_compress_compress(VALUE obj, VALUE src)
|
108
|
+
{
|
109
|
+
StringValue(src);
|
110
|
+
const char* input_data = RSTRING_PTR(src);
|
111
|
+
size_t input_size = RSTRING_LEN(src);
|
112
|
+
ZSTD_inBuffer input = { input_data, input_size, 0 };
|
113
|
+
|
114
|
+
struct streaming_compress_t* sc;
|
115
|
+
TypedData_Get_Struct(obj, struct streaming_compress_t, &streaming_compress_type, sc);
|
116
|
+
const char* output_data = RSTRING_PTR(sc->buf);
|
117
|
+
VALUE result = rb_str_new(0, 0);
|
118
|
+
while (input.pos < input.size) {
|
119
|
+
ZSTD_outBuffer output = { (void*)output_data, sc->buf_size, 0 };
|
120
|
+
size_t const ret = ZSTD_compressStream2(sc->ctx, &output, &input, ZSTD_e_continue);
|
121
|
+
if (ZSTD_isError(ret)) {
|
122
|
+
rb_raise(rb_eRuntimeError, "compress error error code: %s", ZSTD_getErrorName(ret));
|
123
|
+
}
|
124
|
+
rb_str_cat(result, output.dst, output.pos);
|
125
|
+
}
|
126
|
+
return result;
|
127
|
+
}
|
128
|
+
|
129
|
+
static VALUE
|
130
|
+
rb_streaming_compress_addstr(VALUE obj, VALUE src)
|
131
|
+
{
|
132
|
+
StringValue(src);
|
133
|
+
const char* input_data = RSTRING_PTR(src);
|
134
|
+
size_t input_size = RSTRING_LEN(src);
|
135
|
+
ZSTD_inBuffer input = { input_data, input_size, 0 };
|
136
|
+
|
137
|
+
struct streaming_compress_t* sc;
|
138
|
+
TypedData_Get_Struct(obj, struct streaming_compress_t, &streaming_compress_type, sc);
|
139
|
+
const char* output_data = RSTRING_PTR(sc->buf);
|
140
|
+
|
141
|
+
while (input.pos < input.size) {
|
142
|
+
ZSTD_outBuffer output = { (void*)output_data, sc->buf_size, 0 };
|
143
|
+
size_t const result = ZSTD_compressStream2(sc->ctx, &output, &input, ZSTD_e_continue);
|
144
|
+
if (ZSTD_isError(result)) {
|
145
|
+
rb_raise(rb_eRuntimeError, "compress error error code: %s", ZSTD_getErrorName(result));
|
146
|
+
}
|
147
|
+
}
|
148
|
+
return obj;
|
149
|
+
}
|
150
|
+
|
151
|
+
static VALUE
|
152
|
+
rb_streaming_compress_flush(VALUE obj)
|
153
|
+
{
|
154
|
+
struct streaming_compress_t* sc;
|
155
|
+
TypedData_Get_Struct(obj, struct streaming_compress_t, &streaming_compress_type, sc);
|
156
|
+
VALUE result = no_compress(sc, ZSTD_e_flush);
|
157
|
+
return result;
|
158
|
+
}
|
159
|
+
|
160
|
+
static VALUE
|
161
|
+
rb_streaming_compress_finish(VALUE obj)
|
162
|
+
{
|
163
|
+
struct streaming_compress_t* sc;
|
164
|
+
TypedData_Get_Struct(obj, struct streaming_compress_t, &streaming_compress_type, sc);
|
165
|
+
VALUE result = no_compress(sc, ZSTD_e_end);
|
166
|
+
return result;
|
167
|
+
}
|
168
|
+
|
169
|
+
extern VALUE rb_mZstd, cStreamingCompress;
|
170
|
+
void
|
171
|
+
zstd_ruby_streaming_compress_init(void)
|
172
|
+
{
|
173
|
+
VALUE cStreamingCompress = rb_define_class_under(rb_mZstd, "StreamingCompress", rb_cObject);
|
174
|
+
rb_define_alloc_func(cStreamingCompress, rb_streaming_compress_allocate);
|
175
|
+
rb_define_method(cStreamingCompress, "initialize", rb_streaming_compress_initialize, -1);
|
176
|
+
rb_define_method(cStreamingCompress, "compress", rb_streaming_compress_compress, 1);
|
177
|
+
rb_define_method(cStreamingCompress, "<<", rb_streaming_compress_addstr, 1);
|
178
|
+
rb_define_method(cStreamingCompress, "flush", rb_streaming_compress_flush, 0);
|
179
|
+
rb_define_method(cStreamingCompress, "finish", rb_streaming_compress_finish, 0);
|
180
|
+
|
181
|
+
rb_define_const(cStreamingCompress, "CONTINUE", INT2FIX(ZSTD_e_continue));
|
182
|
+
rb_define_const(cStreamingCompress, "FLUSH", INT2FIX(ZSTD_e_flush));
|
183
|
+
rb_define_const(cStreamingCompress, "END", INT2FIX(ZSTD_e_end));
|
184
|
+
}
|
185
|
+
|