vibe_zstd 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. checksums.yaml +7 -0
  2. data/.standard.yml +3 -0
  3. data/CHANGELOG.md +22 -0
  4. data/LICENSE.txt +21 -0
  5. data/README.md +978 -0
  6. data/Rakefile +20 -0
  7. data/benchmark/README.md +198 -0
  8. data/benchmark/compression_levels.rb +99 -0
  9. data/benchmark/context_reuse.rb +174 -0
  10. data/benchmark/decompression_speed_by_level.rb +65 -0
  11. data/benchmark/dictionary_training.rb +182 -0
  12. data/benchmark/dictionary_usage.rb +121 -0
  13. data/benchmark/for_readme.rb +157 -0
  14. data/benchmark/generate_fixture.rb +82 -0
  15. data/benchmark/helpers.rb +237 -0
  16. data/benchmark/multithreading.rb +105 -0
  17. data/benchmark/run_all.rb +150 -0
  18. data/benchmark/streaming.rb +154 -0
  19. data/ext/vibe_zstd/Makefile +270 -0
  20. data/ext/vibe_zstd/cctx.c +565 -0
  21. data/ext/vibe_zstd/dctx.c +493 -0
  22. data/ext/vibe_zstd/dict.c +587 -0
  23. data/ext/vibe_zstd/extconf.rb +52 -0
  24. data/ext/vibe_zstd/frames.c +132 -0
  25. data/ext/vibe_zstd/libzstd/LICENSE +30 -0
  26. data/ext/vibe_zstd/libzstd/common/allocations.h +55 -0
  27. data/ext/vibe_zstd/libzstd/common/bits.h +205 -0
  28. data/ext/vibe_zstd/libzstd/common/bitstream.h +454 -0
  29. data/ext/vibe_zstd/libzstd/common/compiler.h +464 -0
  30. data/ext/vibe_zstd/libzstd/common/cpu.h +249 -0
  31. data/ext/vibe_zstd/libzstd/common/debug.c +30 -0
  32. data/ext/vibe_zstd/libzstd/common/debug.h +107 -0
  33. data/ext/vibe_zstd/libzstd/common/entropy_common.c +340 -0
  34. data/ext/vibe_zstd/libzstd/common/error_private.c +64 -0
  35. data/ext/vibe_zstd/libzstd/common/error_private.h +158 -0
  36. data/ext/vibe_zstd/libzstd/common/fse.h +625 -0
  37. data/ext/vibe_zstd/libzstd/common/fse_decompress.c +315 -0
  38. data/ext/vibe_zstd/libzstd/common/huf.h +277 -0
  39. data/ext/vibe_zstd/libzstd/common/mem.h +422 -0
  40. data/ext/vibe_zstd/libzstd/common/pool.c +371 -0
  41. data/ext/vibe_zstd/libzstd/common/pool.h +81 -0
  42. data/ext/vibe_zstd/libzstd/common/portability_macros.h +171 -0
  43. data/ext/vibe_zstd/libzstd/common/threading.c +182 -0
  44. data/ext/vibe_zstd/libzstd/common/threading.h +142 -0
  45. data/ext/vibe_zstd/libzstd/common/xxhash.c +18 -0
  46. data/ext/vibe_zstd/libzstd/common/xxhash.h +7094 -0
  47. data/ext/vibe_zstd/libzstd/common/zstd_common.c +48 -0
  48. data/ext/vibe_zstd/libzstd/common/zstd_deps.h +123 -0
  49. data/ext/vibe_zstd/libzstd/common/zstd_internal.h +324 -0
  50. data/ext/vibe_zstd/libzstd/common/zstd_trace.h +156 -0
  51. data/ext/vibe_zstd/libzstd/compress/clevels.h +134 -0
  52. data/ext/vibe_zstd/libzstd/compress/fse_compress.c +625 -0
  53. data/ext/vibe_zstd/libzstd/compress/hist.c +191 -0
  54. data/ext/vibe_zstd/libzstd/compress/hist.h +82 -0
  55. data/ext/vibe_zstd/libzstd/compress/huf_compress.c +1464 -0
  56. data/ext/vibe_zstd/libzstd/compress/zstd_compress.c +7843 -0
  57. data/ext/vibe_zstd/libzstd/compress/zstd_compress_internal.h +1636 -0
  58. data/ext/vibe_zstd/libzstd/compress/zstd_compress_literals.c +235 -0
  59. data/ext/vibe_zstd/libzstd/compress/zstd_compress_literals.h +39 -0
  60. data/ext/vibe_zstd/libzstd/compress/zstd_compress_sequences.c +442 -0
  61. data/ext/vibe_zstd/libzstd/compress/zstd_compress_sequences.h +55 -0
  62. data/ext/vibe_zstd/libzstd/compress/zstd_compress_superblock.c +688 -0
  63. data/ext/vibe_zstd/libzstd/compress/zstd_compress_superblock.h +32 -0
  64. data/ext/vibe_zstd/libzstd/compress/zstd_cwksp.h +765 -0
  65. data/ext/vibe_zstd/libzstd/compress/zstd_double_fast.c +778 -0
  66. data/ext/vibe_zstd/libzstd/compress/zstd_double_fast.h +42 -0
  67. data/ext/vibe_zstd/libzstd/compress/zstd_fast.c +985 -0
  68. data/ext/vibe_zstd/libzstd/compress/zstd_fast.h +30 -0
  69. data/ext/vibe_zstd/libzstd/compress/zstd_lazy.c +2199 -0
  70. data/ext/vibe_zstd/libzstd/compress/zstd_lazy.h +193 -0
  71. data/ext/vibe_zstd/libzstd/compress/zstd_ldm.c +745 -0
  72. data/ext/vibe_zstd/libzstd/compress/zstd_ldm.h +109 -0
  73. data/ext/vibe_zstd/libzstd/compress/zstd_ldm_geartab.h +106 -0
  74. data/ext/vibe_zstd/libzstd/compress/zstd_opt.c +1580 -0
  75. data/ext/vibe_zstd/libzstd/compress/zstd_opt.h +72 -0
  76. data/ext/vibe_zstd/libzstd/compress/zstd_preSplit.c +238 -0
  77. data/ext/vibe_zstd/libzstd/compress/zstd_preSplit.h +33 -0
  78. data/ext/vibe_zstd/libzstd/compress/zstdmt_compress.c +1923 -0
  79. data/ext/vibe_zstd/libzstd/compress/zstdmt_compress.h +102 -0
  80. data/ext/vibe_zstd/libzstd/decompress/huf_decompress.c +1944 -0
  81. data/ext/vibe_zstd/libzstd/decompress/huf_decompress_amd64.S +602 -0
  82. data/ext/vibe_zstd/libzstd/decompress/zstd_ddict.c +244 -0
  83. data/ext/vibe_zstd/libzstd/decompress/zstd_ddict.h +44 -0
  84. data/ext/vibe_zstd/libzstd/decompress/zstd_decompress.c +2410 -0
  85. data/ext/vibe_zstd/libzstd/decompress/zstd_decompress_block.c +2209 -0
  86. data/ext/vibe_zstd/libzstd/decompress/zstd_decompress_block.h +73 -0
  87. data/ext/vibe_zstd/libzstd/decompress/zstd_decompress_internal.h +240 -0
  88. data/ext/vibe_zstd/libzstd/deprecated/zbuff.h +214 -0
  89. data/ext/vibe_zstd/libzstd/deprecated/zbuff_common.c +26 -0
  90. data/ext/vibe_zstd/libzstd/deprecated/zbuff_compress.c +167 -0
  91. data/ext/vibe_zstd/libzstd/deprecated/zbuff_decompress.c +77 -0
  92. data/ext/vibe_zstd/libzstd/dictBuilder/cover.c +1302 -0
  93. data/ext/vibe_zstd/libzstd/dictBuilder/cover.h +152 -0
  94. data/ext/vibe_zstd/libzstd/dictBuilder/divsufsort.c +1913 -0
  95. data/ext/vibe_zstd/libzstd/dictBuilder/divsufsort.h +57 -0
  96. data/ext/vibe_zstd/libzstd/dictBuilder/fastcover.c +766 -0
  97. data/ext/vibe_zstd/libzstd/dictBuilder/zdict.c +1133 -0
  98. data/ext/vibe_zstd/libzstd/zdict.h +481 -0
  99. data/ext/vibe_zstd/libzstd/zstd.h +3198 -0
  100. data/ext/vibe_zstd/libzstd/zstd_errors.h +107 -0
  101. data/ext/vibe_zstd/streaming.c +410 -0
  102. data/ext/vibe_zstd/vibe_zstd.c +293 -0
  103. data/ext/vibe_zstd/vibe_zstd.h +56 -0
  104. data/ext/vibe_zstd/vibe_zstd_internal.h +27 -0
  105. data/lib/vibe_zstd/constants.rb +67 -0
  106. data/lib/vibe_zstd/version.rb +5 -0
  107. data/lib/vibe_zstd.rb +255 -0
  108. data/sig/vibe_zstd.rbs +76 -0
  109. metadata +179 -0
@@ -0,0 +1,72 @@
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
+ * You may select, at your option, one of the above-listed licenses.
9
+ */
10
+
11
+ #ifndef ZSTD_OPT_H
12
+ #define ZSTD_OPT_H
13
+
14
+ #include "zstd_compress_internal.h"
15
+
16
+ #if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
17
+ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
18
+ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
19
+ /* used in ZSTD_loadDictionaryContent() */
20
+ void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend);
21
+ #endif
22
+
23
+ #ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
24
+ size_t ZSTD_compressBlock_btopt(
25
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
26
+ void const* src, size_t srcSize);
27
+ size_t ZSTD_compressBlock_btopt_dictMatchState(
28
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
29
+ void const* src, size_t srcSize);
30
+ size_t ZSTD_compressBlock_btopt_extDict(
31
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
32
+ void const* src, size_t srcSize);
33
+
34
+ #define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt
35
+ #define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE ZSTD_compressBlock_btopt_dictMatchState
36
+ #define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT ZSTD_compressBlock_btopt_extDict
37
+ #else
38
+ #define ZSTD_COMPRESSBLOCK_BTOPT NULL
39
+ #define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE NULL
40
+ #define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT NULL
41
+ #endif
42
+
43
+ #ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
44
+ size_t ZSTD_compressBlock_btultra(
45
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
46
+ void const* src, size_t srcSize);
47
+ size_t ZSTD_compressBlock_btultra_dictMatchState(
48
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
49
+ void const* src, size_t srcSize);
50
+ size_t ZSTD_compressBlock_btultra_extDict(
51
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
52
+ void const* src, size_t srcSize);
53
+
54
+ /* note : no btultra2 variant for extDict nor dictMatchState,
55
+ * because btultra2 is not meant to work with dictionaries
56
+ * and is only specific for the first block (no prefix) */
57
+ size_t ZSTD_compressBlock_btultra2(
58
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
59
+ void const* src, size_t srcSize);
60
+
61
+ #define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra
62
+ #define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState
63
+ #define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict
64
+ #define ZSTD_COMPRESSBLOCK_BTULTRA2 ZSTD_compressBlock_btultra2
65
+ #else
66
+ #define ZSTD_COMPRESSBLOCK_BTULTRA NULL
67
+ #define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE NULL
68
+ #define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT NULL
69
+ #define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL
70
+ #endif
71
+
72
+ #endif /* ZSTD_OPT_H */
@@ -0,0 +1,238 @@
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
+ * You may select, at your option, one of the above-listed licenses.
9
+ */
10
+
11
+ #include "../common/compiler.h" /* ZSTD_ALIGNOF */
12
+ #include "../common/mem.h" /* S64 */
13
+ #include "../common/zstd_deps.h" /* ZSTD_memset */
14
+ #include "../common/zstd_internal.h" /* ZSTD_STATIC_ASSERT */
15
+ #include "hist.h" /* HIST_add */
16
+ #include "zstd_preSplit.h"
17
+
18
+
19
+ #define BLOCKSIZE_MIN 3500
20
+ #define THRESHOLD_PENALTY_RATE 16
21
+ #define THRESHOLD_BASE (THRESHOLD_PENALTY_RATE - 2)
22
+ #define THRESHOLD_PENALTY 3
23
+
24
+ #define HASHLENGTH 2
25
+ #define HASHLOG_MAX 10
26
+ #define HASHTABLESIZE (1 << HASHLOG_MAX)
27
+ #define HASHMASK (HASHTABLESIZE - 1)
28
+ #define KNUTH 0x9e3779b9
29
+
30
+ /* for hashLog > 8, hash 2 bytes.
31
+ * for hashLog == 8, just take the byte, no hashing.
32
+ * The speed of this method relies on compile-time constant propagation */
33
+ FORCE_INLINE_TEMPLATE unsigned hash2(const void *p, unsigned hashLog)
34
+ {
35
+ assert(hashLog >= 8);
36
+ if (hashLog == 8) return (U32)((const BYTE*)p)[0];
37
+ assert(hashLog <= HASHLOG_MAX);
38
+ return (U32)(MEM_read16(p)) * KNUTH >> (32 - hashLog);
39
+ }
40
+
41
+
42
+ typedef struct {
43
+ unsigned events[HASHTABLESIZE];
44
+ size_t nbEvents;
45
+ } Fingerprint;
46
+ typedef struct {
47
+ Fingerprint pastEvents;
48
+ Fingerprint newEvents;
49
+ } FPStats;
50
+
51
+ static void initStats(FPStats* fpstats)
52
+ {
53
+ ZSTD_memset(fpstats, 0, sizeof(FPStats));
54
+ }
55
+
56
+ FORCE_INLINE_TEMPLATE void
57
+ addEvents_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
58
+ {
59
+ const char* p = (const char*)src;
60
+ size_t limit = srcSize - HASHLENGTH + 1;
61
+ size_t n;
62
+ assert(srcSize >= HASHLENGTH);
63
+ for (n = 0; n < limit; n+=samplingRate) {
64
+ fp->events[hash2(p+n, hashLog)]++;
65
+ }
66
+ fp->nbEvents += limit/samplingRate;
67
+ }
68
+
69
+ FORCE_INLINE_TEMPLATE void
70
+ recordFingerprint_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
71
+ {
72
+ ZSTD_memset(fp, 0, sizeof(unsigned) * ((size_t)1 << hashLog));
73
+ fp->nbEvents = 0;
74
+ addEvents_generic(fp, src, srcSize, samplingRate, hashLog);
75
+ }
76
+
77
+ typedef void (*RecordEvents_f)(Fingerprint* fp, const void* src, size_t srcSize);
78
+
79
+ #define FP_RECORD(_rate) ZSTD_recordFingerprint_##_rate
80
+
81
+ #define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize) \
82
+ static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \
83
+ { \
84
+ recordFingerprint_generic(fp, src, srcSize, _rate, _hSize); \
85
+ }
86
+
87
+ ZSTD_GEN_RECORD_FINGERPRINT(1, 10)
88
+ ZSTD_GEN_RECORD_FINGERPRINT(5, 10)
89
+ ZSTD_GEN_RECORD_FINGERPRINT(11, 9)
90
+ ZSTD_GEN_RECORD_FINGERPRINT(43, 8)
91
+
92
+
93
+ static U64 abs64(S64 s64) { return (U64)((s64 < 0) ? -s64 : s64); }
94
+
95
+ static U64 fpDistance(const Fingerprint* fp1, const Fingerprint* fp2, unsigned hashLog)
96
+ {
97
+ U64 distance = 0;
98
+ size_t n;
99
+ assert(hashLog <= HASHLOG_MAX);
100
+ for (n = 0; n < ((size_t)1 << hashLog); n++) {
101
+ distance +=
102
+ abs64((S64)fp1->events[n] * (S64)fp2->nbEvents - (S64)fp2->events[n] * (S64)fp1->nbEvents);
103
+ }
104
+ return distance;
105
+ }
106
+
107
+ /* Compare newEvents with pastEvents
108
+ * return 1 when considered "too different"
109
+ */
110
+ static int compareFingerprints(const Fingerprint* ref,
111
+ const Fingerprint* newfp,
112
+ int penalty,
113
+ unsigned hashLog)
114
+ {
115
+ assert(ref->nbEvents > 0);
116
+ assert(newfp->nbEvents > 0);
117
+ { U64 p50 = (U64)ref->nbEvents * (U64)newfp->nbEvents;
118
+ U64 deviation = fpDistance(ref, newfp, hashLog);
119
+ U64 threshold = p50 * (U64)(THRESHOLD_BASE + penalty) / THRESHOLD_PENALTY_RATE;
120
+ return deviation >= threshold;
121
+ }
122
+ }
123
+
124
+ static void mergeEvents(Fingerprint* acc, const Fingerprint* newfp)
125
+ {
126
+ size_t n;
127
+ for (n = 0; n < HASHTABLESIZE; n++) {
128
+ acc->events[n] += newfp->events[n];
129
+ }
130
+ acc->nbEvents += newfp->nbEvents;
131
+ }
132
+
133
+ static void flushEvents(FPStats* fpstats)
134
+ {
135
+ size_t n;
136
+ for (n = 0; n < HASHTABLESIZE; n++) {
137
+ fpstats->pastEvents.events[n] = fpstats->newEvents.events[n];
138
+ }
139
+ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents;
140
+ ZSTD_memset(&fpstats->newEvents, 0, sizeof(fpstats->newEvents));
141
+ }
142
+
143
+ static void removeEvents(Fingerprint* acc, const Fingerprint* slice)
144
+ {
145
+ size_t n;
146
+ for (n = 0; n < HASHTABLESIZE; n++) {
147
+ assert(acc->events[n] >= slice->events[n]);
148
+ acc->events[n] -= slice->events[n];
149
+ }
150
+ acc->nbEvents -= slice->nbEvents;
151
+ }
152
+
153
+ #define CHUNKSIZE (8 << 10)
154
+ static size_t ZSTD_splitBlock_byChunks(const void* blockStart, size_t blockSize,
155
+ int level,
156
+ void* workspace, size_t wkspSize)
157
+ {
158
+ static const RecordEvents_f records_fs[] = {
159
+ FP_RECORD(43), FP_RECORD(11), FP_RECORD(5), FP_RECORD(1)
160
+ };
161
+ static const unsigned hashParams[] = { 8, 9, 10, 10 };
162
+ const RecordEvents_f record_f = (assert(0<=level && level<=3), records_fs[level]);
163
+ FPStats* const fpstats = (FPStats*)workspace;
164
+ const char* p = (const char*)blockStart;
165
+ int penalty = THRESHOLD_PENALTY;
166
+ size_t pos = 0;
167
+ assert(blockSize == (128 << 10));
168
+ assert(workspace != NULL);
169
+ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
170
+ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
171
+ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
172
+
173
+ initStats(fpstats);
174
+ record_f(&fpstats->pastEvents, p, CHUNKSIZE);
175
+ for (pos = CHUNKSIZE; pos <= blockSize - CHUNKSIZE; pos += CHUNKSIZE) {
176
+ record_f(&fpstats->newEvents, p + pos, CHUNKSIZE);
177
+ if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level])) {
178
+ return pos;
179
+ } else {
180
+ mergeEvents(&fpstats->pastEvents, &fpstats->newEvents);
181
+ if (penalty > 0) penalty--;
182
+ }
183
+ }
184
+ assert(pos == blockSize);
185
+ return blockSize;
186
+ (void)flushEvents; (void)removeEvents;
187
+ }
188
+
189
+ /* ZSTD_splitBlock_fromBorders(): very fast strategy :
190
+ * compare fingerprint from beginning and end of the block,
191
+ * derive from their difference if it's preferable to split in the middle,
192
+ * repeat the process a second time, for finer grained decision.
193
+ * 3 times did not brought improvements, so I stopped at 2.
194
+ * Benefits are good enough for a cheap heuristic.
195
+ * More accurate splitting saves more, but speed impact is also more perceptible.
196
+ * For better accuracy, use more elaborate variant *_byChunks.
197
+ */
198
+ static size_t ZSTD_splitBlock_fromBorders(const void* blockStart, size_t blockSize,
199
+ void* workspace, size_t wkspSize)
200
+ {
201
+ #define SEGMENT_SIZE 512
202
+ FPStats* const fpstats = (FPStats*)workspace;
203
+ Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned));
204
+ assert(blockSize == (128 << 10));
205
+ assert(workspace != NULL);
206
+ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
207
+ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
208
+ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
209
+
210
+ initStats(fpstats);
211
+ HIST_add(fpstats->pastEvents.events, blockStart, SEGMENT_SIZE);
212
+ HIST_add(fpstats->newEvents.events, (const char*)blockStart + blockSize - SEGMENT_SIZE, SEGMENT_SIZE);
213
+ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = SEGMENT_SIZE;
214
+ if (!compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8))
215
+ return blockSize;
216
+
217
+ HIST_add(middleEvents->events, (const char*)blockStart + blockSize/2 - SEGMENT_SIZE/2, SEGMENT_SIZE);
218
+ middleEvents->nbEvents = SEGMENT_SIZE;
219
+ { U64 const distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8);
220
+ U64 const distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8);
221
+ U64 const minDistance = SEGMENT_SIZE * SEGMENT_SIZE / 3;
222
+ if (abs64((S64)distFromBegin - (S64)distFromEnd) < minDistance)
223
+ return 64 KB;
224
+ return (distFromBegin > distFromEnd) ? 32 KB : 96 KB;
225
+ }
226
+ }
227
+
228
+ size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
229
+ int level,
230
+ void* workspace, size_t wkspSize)
231
+ {
232
+ DEBUGLOG(6, "ZSTD_splitBlock (level=%i)", level);
233
+ assert(0<=level && level<=4);
234
+ if (level == 0)
235
+ return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize);
236
+ /* level >= 1*/
237
+ return ZSTD_splitBlock_byChunks(blockStart, blockSize, level-1, workspace, wkspSize);
238
+ }
@@ -0,0 +1,33 @@
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
+ * You may select, at your option, one of the above-listed licenses.
9
+ */
10
+
11
+ #ifndef ZSTD_PRESPLIT_H
12
+ #define ZSTD_PRESPLIT_H
13
+
14
+ #include <stddef.h> /* size_t */
15
+
16
+ #define ZSTD_SLIPBLOCK_WORKSPACESIZE 8208
17
+
18
+ /* ZSTD_splitBlock():
19
+ * @level must be a value between 0 and 4.
20
+ * higher levels spend more energy to detect block boundaries.
21
+ * @workspace must be aligned for size_t.
22
+ * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE
23
+ * note:
24
+ * For the time being, this function only accepts full 128 KB blocks.
25
+ * Therefore, @blockSize must be == 128 KB.
26
+ * While this could be extended to smaller sizes in the future,
27
+ * it is not yet clear if this would be useful. TBD.
28
+ */
29
+ size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
30
+ int level,
31
+ void* workspace, size_t wkspSize);
32
+
33
+ #endif /* ZSTD_PRESPLIT_H */