zstd-ruby 1.5.1.1 → 1.5.5.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/README.md +78 -5
- data/Rakefile +8 -2
- data/ext/zstdruby/common.h +15 -0
- data/ext/zstdruby/extconf.rb +1 -1
- data/ext/zstdruby/libzstd/common/allocations.h +55 -0
- data/ext/zstdruby/libzstd/common/bits.h +200 -0
- data/ext/zstdruby/libzstd/common/bitstream.h +19 -60
- data/ext/zstdruby/libzstd/common/compiler.h +26 -3
- data/ext/zstdruby/libzstd/common/cpu.h +1 -1
- data/ext/zstdruby/libzstd/common/debug.c +1 -1
- data/ext/zstdruby/libzstd/common/debug.h +1 -1
- data/ext/zstdruby/libzstd/common/entropy_common.c +12 -40
- data/ext/zstdruby/libzstd/common/error_private.c +9 -2
- data/ext/zstdruby/libzstd/common/error_private.h +1 -1
- data/ext/zstdruby/libzstd/common/fse.h +5 -83
- data/ext/zstdruby/libzstd/common/fse_decompress.c +7 -99
- data/ext/zstdruby/libzstd/common/huf.h +65 -156
- data/ext/zstdruby/libzstd/common/mem.h +39 -46
- data/ext/zstdruby/libzstd/common/pool.c +37 -16
- data/ext/zstdruby/libzstd/common/pool.h +9 -3
- data/ext/zstdruby/libzstd/common/portability_macros.h +28 -3
- data/ext/zstdruby/libzstd/common/threading.c +68 -14
- data/ext/zstdruby/libzstd/common/threading.h +5 -10
- data/ext/zstdruby/libzstd/common/xxhash.c +2 -2
- data/ext/zstdruby/libzstd/common/xxhash.h +8 -8
- data/ext/zstdruby/libzstd/common/zstd_common.c +1 -36
- data/ext/zstdruby/libzstd/common/zstd_deps.h +1 -1
- data/ext/zstdruby/libzstd/common/zstd_internal.h +20 -122
- data/ext/zstdruby/libzstd/common/zstd_trace.h +3 -3
- data/ext/zstdruby/libzstd/compress/clevels.h +1 -1
- data/ext/zstdruby/libzstd/compress/fse_compress.c +7 -124
- data/ext/zstdruby/libzstd/compress/hist.c +1 -1
- data/ext/zstdruby/libzstd/compress/hist.h +1 -1
- data/ext/zstdruby/libzstd/compress/huf_compress.c +234 -169
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +1317 -594
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +272 -165
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +115 -39
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -8
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +13 -13
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +25 -21
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +162 -82
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +95 -33
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +3 -2
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +434 -149
- data/ext/zstdruby/libzstd/compress/zstd_fast.h +3 -2
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +405 -348
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +4 -2
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +9 -7
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +149 -100
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +32 -16
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +5 -2
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +434 -441
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +42 -37
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +4 -4
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +1 -1
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +205 -80
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +201 -81
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +6 -1
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +4 -2
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +19 -15
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +1 -1
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +2 -2
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +13 -91
- data/ext/zstdruby/libzstd/zdict.h +53 -31
- data/ext/zstdruby/libzstd/zstd.h +580 -135
- data/ext/zstdruby/libzstd/zstd_errors.h +27 -8
- data/ext/zstdruby/main.c +20 -0
- data/ext/zstdruby/skippable_frame.c +63 -0
- data/ext/zstdruby/streaming_compress.c +177 -0
- data/ext/zstdruby/streaming_compress.h +5 -0
- data/ext/zstdruby/streaming_decompress.c +123 -0
- data/ext/zstdruby/zstdruby.c +113 -31
- data/lib/zstd-ruby/version.rb +1 -1
- data/lib/zstd-ruby.rb +0 -1
- data/zstd-ruby.gemspec +1 -1
- metadata +11 -37
- data/.github/dependabot.yml +0 -8
- data/.github/workflows/ruby.yml +0 -35
- data/ext/zstdruby/libzstd/.gitignore +0 -3
- data/ext/zstdruby/libzstd/BUCK +0 -232
- data/ext/zstdruby/libzstd/Makefile +0 -357
- data/ext/zstdruby/libzstd/README.md +0 -217
- data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
- data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
- data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -167
- data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
- data/ext/zstdruby/libzstd/dll/example/Makefile +0 -48
- data/ext/zstdruby/libzstd/dll/example/README.md +0 -63
- data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2158
- data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3518
- data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3160
- data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3647
- data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4050
- data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4154
- data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4541
- data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
- data/ext/zstdruby/libzstd/libzstd.mk +0 -185
- data/ext/zstdruby/libzstd/libzstd.pc.in +0 -16
- data/ext/zstdruby/libzstd/modulemap/module.modulemap +0 -4
- data/ext/zstdruby/zstdruby.h +0 -6
@@ -1,7 +1,7 @@
|
|
1
1
|
/* ******************************************************************
|
2
2
|
* huff0 huffman codec,
|
3
3
|
* part of Finite State Entropy library
|
4
|
-
* Copyright (c)
|
4
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
5
5
|
*
|
6
6
|
* You can contact the author at :
|
7
7
|
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
@@ -21,99 +21,22 @@ extern "C" {
|
|
21
21
|
|
22
22
|
/* *** Dependencies *** */
|
23
23
|
#include "zstd_deps.h" /* size_t */
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
|
28
|
-
* HUF symbols remain "private" (internal symbols for library only).
|
29
|
-
* Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
|
30
|
-
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
|
31
|
-
# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
|
32
|
-
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
|
33
|
-
# define HUF_PUBLIC_API __declspec(dllexport)
|
34
|
-
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
|
35
|
-
# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
|
36
|
-
#else
|
37
|
-
# define HUF_PUBLIC_API
|
38
|
-
#endif
|
39
|
-
|
40
|
-
|
41
|
-
/* ========================== */
|
42
|
-
/* *** simple functions *** */
|
43
|
-
/* ========================== */
|
44
|
-
|
45
|
-
/** HUF_compress() :
|
46
|
-
* Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
|
47
|
-
* 'dst' buffer must be already allocated.
|
48
|
-
* Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
|
49
|
-
* `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
|
50
|
-
* @return : size of compressed data (<= `dstCapacity`).
|
51
|
-
* Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
52
|
-
* if HUF_isError(return), compression failed (more details using HUF_getErrorName())
|
53
|
-
*/
|
54
|
-
HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
|
55
|
-
const void* src, size_t srcSize);
|
56
|
-
|
57
|
-
/** HUF_decompress() :
|
58
|
-
* Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
|
59
|
-
* into already allocated buffer 'dst', of minimum size 'dstSize'.
|
60
|
-
* `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
|
61
|
-
* Note : in contrast with FSE, HUF_decompress can regenerate
|
62
|
-
* RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
|
63
|
-
* because it knows size to regenerate (originalSize).
|
64
|
-
* @return : size of regenerated data (== originalSize),
|
65
|
-
* or an error code, which can be tested using HUF_isError()
|
66
|
-
*/
|
67
|
-
HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
|
68
|
-
const void* cSrc, size_t cSrcSize);
|
24
|
+
#include "mem.h" /* U32 */
|
25
|
+
#define FSE_STATIC_LINKING_ONLY
|
26
|
+
#include "fse.h"
|
69
27
|
|
70
28
|
|
71
29
|
/* *** Tool functions *** */
|
72
|
-
#define HUF_BLOCKSIZE_MAX (128 * 1024)
|
73
|
-
|
30
|
+
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
|
31
|
+
size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
|
74
32
|
|
75
33
|
/* Error Management */
|
76
|
-
|
77
|
-
|
34
|
+
unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
|
35
|
+
const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
|
78
36
|
|
79
37
|
|
80
|
-
/* *** Advanced function *** */
|
81
|
-
|
82
|
-
/** HUF_compress2() :
|
83
|
-
* Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
|
84
|
-
* `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
|
85
|
-
* `tableLog` must be `<= HUF_TABLELOG_MAX` . */
|
86
|
-
HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
|
87
|
-
const void* src, size_t srcSize,
|
88
|
-
unsigned maxSymbolValue, unsigned tableLog);
|
89
|
-
|
90
|
-
/** HUF_compress4X_wksp() :
|
91
|
-
* Same as HUF_compress2(), but uses externally allocated `workSpace`.
|
92
|
-
* `workspace` must be at least as large as HUF_WORKSPACE_SIZE */
|
93
38
|
#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
|
94
39
|
#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
|
95
|
-
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
|
96
|
-
const void* src, size_t srcSize,
|
97
|
-
unsigned maxSymbolValue, unsigned tableLog,
|
98
|
-
void* workSpace, size_t wkspSize);
|
99
|
-
|
100
|
-
#endif /* HUF_H_298734234 */
|
101
|
-
|
102
|
-
/* ******************************************************************
|
103
|
-
* WARNING !!
|
104
|
-
* The following section contains advanced and experimental definitions
|
105
|
-
* which shall never be used in the context of a dynamic library,
|
106
|
-
* because they are not guaranteed to remain stable in the future.
|
107
|
-
* Only consider them in association with static linking.
|
108
|
-
* *****************************************************************/
|
109
|
-
#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
|
110
|
-
#define HUF_H_HUF_STATIC_LINKING_ONLY
|
111
|
-
|
112
|
-
/* *** Dependencies *** */
|
113
|
-
#include "mem.h" /* U32 */
|
114
|
-
#define FSE_STATIC_LINKING_ONLY
|
115
|
-
#include "fse.h"
|
116
|
-
|
117
40
|
|
118
41
|
/* *** Constants *** */
|
119
42
|
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
|
@@ -154,25 +77,49 @@ typedef U32 HUF_DTable;
|
|
154
77
|
/* ****************************************
|
155
78
|
* Advanced decompression functions
|
156
79
|
******************************************/
|
157
|
-
size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
158
|
-
#ifndef HUF_FORCE_DECOMPRESS_X1
|
159
|
-
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
160
|
-
#endif
|
161
80
|
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
81
|
+
/**
|
82
|
+
* Huffman flags bitset.
|
83
|
+
* For all flags, 0 is the default value.
|
84
|
+
*/
|
85
|
+
typedef enum {
|
86
|
+
/**
|
87
|
+
* If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime.
|
88
|
+
* Otherwise: Ignored.
|
89
|
+
*/
|
90
|
+
HUF_flags_bmi2 = (1 << 0),
|
91
|
+
/**
|
92
|
+
* If set: Test possible table depths to find the one that produces the smallest header + encoded size.
|
93
|
+
* If unset: Use heuristic to find the table depth.
|
94
|
+
*/
|
95
|
+
HUF_flags_optimalDepth = (1 << 1),
|
96
|
+
/**
|
97
|
+
* If set: If the previous table can encode the input, always reuse the previous table.
|
98
|
+
* If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.
|
99
|
+
*/
|
100
|
+
HUF_flags_preferRepeat = (1 << 2),
|
101
|
+
/**
|
102
|
+
* If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress.
|
103
|
+
* If unset: Always histogram the entire input.
|
104
|
+
*/
|
105
|
+
HUF_flags_suspectUncompressible = (1 << 3),
|
106
|
+
/**
|
107
|
+
* If set: Don't use assembly implementations
|
108
|
+
* If unset: Allow using assembly implementations
|
109
|
+
*/
|
110
|
+
HUF_flags_disableAsm = (1 << 4),
|
111
|
+
/**
|
112
|
+
* If set: Don't use the fast decoding loop, always use the fallback decoding loop.
|
113
|
+
* If unset: Use the fast decoding loop when possible.
|
114
|
+
*/
|
115
|
+
HUF_flags_disableFast = (1 << 5)
|
116
|
+
} HUF_flags_e;
|
171
117
|
|
172
118
|
|
173
119
|
/* ****************************************
|
174
120
|
* HUF detailed API
|
175
121
|
* ****************************************/
|
122
|
+
#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra
|
176
123
|
|
177
124
|
/*! HUF_compress() does the following:
|
178
125
|
* 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
|
@@ -185,12 +132,12 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
|
185
132
|
* For example, it's possible to compress several blocks using the same 'CTable',
|
186
133
|
* or to save and regenerate 'CTable' using external methods.
|
187
134
|
*/
|
188
|
-
unsigned
|
189
|
-
|
190
|
-
|
135
|
+
unsigned HUF_minTableLog(unsigned symbolCardinality);
|
136
|
+
unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue);
|
137
|
+
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace,
|
138
|
+
size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */
|
191
139
|
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
|
192
|
-
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
193
|
-
size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
|
140
|
+
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
|
194
141
|
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
195
142
|
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
196
143
|
|
@@ -199,6 +146,7 @@ typedef enum {
|
|
199
146
|
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
|
200
147
|
HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
|
201
148
|
} HUF_repeat;
|
149
|
+
|
202
150
|
/** HUF_compress4X_repeat() :
|
203
151
|
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
204
152
|
* If it uses hufTable it does not modify hufTable or repeat.
|
@@ -209,13 +157,13 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
|
|
209
157
|
const void* src, size_t srcSize,
|
210
158
|
unsigned maxSymbolValue, unsigned tableLog,
|
211
159
|
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
212
|
-
HUF_CElt* hufTable, HUF_repeat* repeat, int
|
160
|
+
HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
|
213
161
|
|
214
162
|
/** HUF_buildCTable_wksp() :
|
215
163
|
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
216
164
|
* `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
|
217
165
|
*/
|
218
|
-
#define HUF_CTABLE_WORKSPACE_SIZE_U32 (
|
166
|
+
#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192)
|
219
167
|
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
|
220
168
|
size_t HUF_buildCTable_wksp (HUF_CElt* tree,
|
221
169
|
const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
|
@@ -241,7 +189,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
|
|
241
189
|
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
|
242
190
|
const void* src, size_t srcSize,
|
243
191
|
void* workspace, size_t wkspSize,
|
244
|
-
int
|
192
|
+
int flags);
|
245
193
|
|
246
194
|
/** HUF_readCTable() :
|
247
195
|
* Loading a CTable saved with HUF_writeCTable() */
|
@@ -279,32 +227,12 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
|
|
279
227
|
#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
|
280
228
|
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
|
281
229
|
|
282
|
-
#ifndef HUF_FORCE_DECOMPRESS_X2
|
283
|
-
size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
|
284
|
-
size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
|
285
|
-
#endif
|
286
|
-
#ifndef HUF_FORCE_DECOMPRESS_X1
|
287
|
-
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
|
288
|
-
size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
|
289
|
-
#endif
|
290
|
-
|
291
|
-
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
292
|
-
#ifndef HUF_FORCE_DECOMPRESS_X2
|
293
|
-
size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
294
|
-
#endif
|
295
|
-
#ifndef HUF_FORCE_DECOMPRESS_X1
|
296
|
-
size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
297
|
-
#endif
|
298
|
-
|
299
230
|
|
300
231
|
/* ====================== */
|
301
232
|
/* single stream variants */
|
302
233
|
/* ====================== */
|
303
234
|
|
304
|
-
size_t
|
305
|
-
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */
|
306
|
-
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
307
|
-
size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
|
235
|
+
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
|
308
236
|
/** HUF_compress1X_repeat() :
|
309
237
|
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
310
238
|
* If it uses hufTable it does not modify hufTable or repeat.
|
@@ -315,49 +243,30 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
|
|
315
243
|
const void* src, size_t srcSize,
|
316
244
|
unsigned maxSymbolValue, unsigned tableLog,
|
317
245
|
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
318
|
-
HUF_CElt* hufTable, HUF_repeat* repeat, int
|
319
|
-
|
320
|
-
size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
|
321
|
-
#ifndef HUF_FORCE_DECOMPRESS_X1
|
322
|
-
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
|
323
|
-
#endif
|
324
|
-
|
325
|
-
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
326
|
-
size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
|
327
|
-
#ifndef HUF_FORCE_DECOMPRESS_X2
|
328
|
-
size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
329
|
-
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
|
330
|
-
#endif
|
331
|
-
#ifndef HUF_FORCE_DECOMPRESS_X1
|
332
|
-
size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
333
|
-
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
|
334
|
-
#endif
|
246
|
+
HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
|
335
247
|
|
336
|
-
size_t
|
337
|
-
#ifndef HUF_FORCE_DECOMPRESS_X2
|
338
|
-
size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
339
|
-
#endif
|
248
|
+
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
340
249
|
#ifndef HUF_FORCE_DECOMPRESS_X1
|
341
|
-
size_t
|
250
|
+
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */
|
342
251
|
#endif
|
343
252
|
|
344
253
|
/* BMI2 variants.
|
345
254
|
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
|
346
255
|
*/
|
347
|
-
size_t
|
256
|
+
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
|
348
257
|
#ifndef HUF_FORCE_DECOMPRESS_X2
|
349
|
-
size_t
|
258
|
+
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
350
259
|
#endif
|
351
|
-
size_t
|
352
|
-
size_t
|
260
|
+
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
|
261
|
+
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
353
262
|
#ifndef HUF_FORCE_DECOMPRESS_X2
|
354
|
-
size_t
|
263
|
+
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
|
355
264
|
#endif
|
356
265
|
#ifndef HUF_FORCE_DECOMPRESS_X1
|
357
|
-
size_t
|
266
|
+
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
|
358
267
|
#endif
|
359
268
|
|
360
|
-
#endif
|
269
|
+
#endif /* HUF_H_298734234 */
|
361
270
|
|
362
271
|
#if defined (__cplusplus)
|
363
272
|
}
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -133,21 +133,15 @@ MEM_STATIC size_t MEM_swapST(size_t in);
|
|
133
133
|
/*-**************************************************************
|
134
134
|
* Memory I/O Implementation
|
135
135
|
*****************************************************************/
|
136
|
-
/* MEM_FORCE_MEMORY_ACCESS :
|
137
|
-
*
|
138
|
-
*
|
139
|
-
* The below switch allow to select different access method for improved performance.
|
140
|
-
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
141
|
-
* Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
|
142
|
-
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
136
|
+
/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory:
|
137
|
+
* Method 0 : always use `memcpy()`. Safe and portable.
|
138
|
+
* Method 1 : Use compiler extension to set unaligned access.
|
143
139
|
* Method 2 : direct access. This method is portable but violate C standard.
|
144
140
|
* It can generate buggy code on targets depending on alignment.
|
145
|
-
*
|
146
|
-
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
|
147
|
-
* Prefer these methods in priority order (0 > 1 > 2)
|
141
|
+
* Default : method 1 if supported, else method 0
|
148
142
|
*/
|
149
143
|
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
150
|
-
#
|
144
|
+
# ifdef __GNUC__
|
151
145
|
# define MEM_FORCE_MEMORY_ACCESS 1
|
152
146
|
# endif
|
153
147
|
#endif
|
@@ -190,30 +184,19 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
|
|
190
184
|
|
191
185
|
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
|
192
186
|
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
typedef struct { U16 v; } unalign16;
|
198
|
-
typedef struct { U32 v; } unalign32;
|
199
|
-
typedef struct { U64 v; } unalign64;
|
200
|
-
typedef struct { size_t v; } unalignArch;
|
201
|
-
__pragma( pack(pop) )
|
202
|
-
#else
|
203
|
-
typedef struct { U16 v; } __attribute__((packed)) unalign16;
|
204
|
-
typedef struct { U32 v; } __attribute__((packed)) unalign32;
|
205
|
-
typedef struct { U64 v; } __attribute__((packed)) unalign64;
|
206
|
-
typedef struct { size_t v; } __attribute__((packed)) unalignArch;
|
207
|
-
#endif
|
187
|
+
typedef __attribute__((aligned(1))) U16 unalign16;
|
188
|
+
typedef __attribute__((aligned(1))) U32 unalign32;
|
189
|
+
typedef __attribute__((aligned(1))) U64 unalign64;
|
190
|
+
typedef __attribute__((aligned(1))) size_t unalignArch;
|
208
191
|
|
209
|
-
MEM_STATIC U16 MEM_read16(const void* ptr) { return (
|
210
|
-
MEM_STATIC U32 MEM_read32(const void* ptr) { return (
|
211
|
-
MEM_STATIC U64 MEM_read64(const void* ptr) { return (
|
212
|
-
MEM_STATIC size_t MEM_readST(const void* ptr) { return (
|
192
|
+
MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; }
|
193
|
+
MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; }
|
194
|
+
MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; }
|
195
|
+
MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; }
|
213
196
|
|
214
|
-
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { (
|
215
|
-
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { (
|
216
|
-
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { (
|
197
|
+
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; }
|
198
|
+
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; }
|
199
|
+
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; }
|
217
200
|
|
218
201
|
#else
|
219
202
|
|
@@ -257,6 +240,14 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
|
257
240
|
|
258
241
|
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
259
242
|
|
243
|
+
MEM_STATIC U32 MEM_swap32_fallback(U32 in)
|
244
|
+
{
|
245
|
+
return ((in << 24) & 0xff000000 ) |
|
246
|
+
((in << 8) & 0x00ff0000 ) |
|
247
|
+
((in >> 8) & 0x0000ff00 ) |
|
248
|
+
((in >> 24) & 0x000000ff );
|
249
|
+
}
|
250
|
+
|
260
251
|
MEM_STATIC U32 MEM_swap32(U32 in)
|
261
252
|
{
|
262
253
|
#if defined(_MSC_VER) /* Visual Studio */
|
@@ -265,22 +256,13 @@ MEM_STATIC U32 MEM_swap32(U32 in)
|
|
265
256
|
|| (defined(__clang__) && __has_builtin(__builtin_bswap32))
|
266
257
|
return __builtin_bswap32(in);
|
267
258
|
#else
|
268
|
-
return
|
269
|
-
((in << 8) & 0x00ff0000 ) |
|
270
|
-
((in >> 8) & 0x0000ff00 ) |
|
271
|
-
((in >> 24) & 0x000000ff );
|
259
|
+
return MEM_swap32_fallback(in);
|
272
260
|
#endif
|
273
261
|
}
|
274
262
|
|
275
|
-
MEM_STATIC U64
|
263
|
+
MEM_STATIC U64 MEM_swap64_fallback(U64 in)
|
276
264
|
{
|
277
|
-
|
278
|
-
return _byteswap_uint64(in);
|
279
|
-
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
280
|
-
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
|
281
|
-
return __builtin_bswap64(in);
|
282
|
-
#else
|
283
|
-
return ((in << 56) & 0xff00000000000000ULL) |
|
265
|
+
return ((in << 56) & 0xff00000000000000ULL) |
|
284
266
|
((in << 40) & 0x00ff000000000000ULL) |
|
285
267
|
((in << 24) & 0x0000ff0000000000ULL) |
|
286
268
|
((in << 8) & 0x000000ff00000000ULL) |
|
@@ -288,6 +270,17 @@ MEM_STATIC U64 MEM_swap64(U64 in)
|
|
288
270
|
((in >> 24) & 0x0000000000ff0000ULL) |
|
289
271
|
((in >> 40) & 0x000000000000ff00ULL) |
|
290
272
|
((in >> 56) & 0x00000000000000ffULL);
|
273
|
+
}
|
274
|
+
|
275
|
+
MEM_STATIC U64 MEM_swap64(U64 in)
|
276
|
+
{
|
277
|
+
#if defined(_MSC_VER) /* Visual Studio */
|
278
|
+
return _byteswap_uint64(in);
|
279
|
+
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
280
|
+
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
|
281
|
+
return __builtin_bswap64(in);
|
282
|
+
#else
|
283
|
+
return MEM_swap64_fallback(in);
|
291
284
|
#endif
|
292
285
|
}
|
293
286
|
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -10,9 +10,9 @@
|
|
10
10
|
|
11
11
|
|
12
12
|
/* ====== Dependencies ======= */
|
13
|
+
#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */
|
13
14
|
#include "zstd_deps.h" /* size_t */
|
14
15
|
#include "debug.h" /* assert */
|
15
|
-
#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
16
16
|
#include "pool.h"
|
17
17
|
|
18
18
|
/* ====== Compiler specifics ====== */
|
@@ -86,7 +86,7 @@ static void* POOL_thread(void* opaque) {
|
|
86
86
|
{ POOL_job const job = ctx->queue[ctx->queueHead];
|
87
87
|
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
|
88
88
|
ctx->numThreadsBusy++;
|
89
|
-
ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
|
89
|
+
ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
|
90
90
|
/* Unlock the mutex, signal a pusher, and run the job */
|
91
91
|
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
92
92
|
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
@@ -96,15 +96,14 @@ static void* POOL_thread(void* opaque) {
|
|
96
96
|
/* If the intended queue size was 0, signal after finishing job */
|
97
97
|
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
98
98
|
ctx->numThreadsBusy--;
|
99
|
-
|
100
|
-
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
101
|
-
}
|
99
|
+
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
102
100
|
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
103
101
|
}
|
104
102
|
} /* for (;;) */
|
105
103
|
assert(0); /* Unreachable */
|
106
104
|
}
|
107
105
|
|
106
|
+
/* ZSTD_createThreadPool() : public access point */
|
108
107
|
POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
|
109
108
|
return POOL_create (numThreads, 0);
|
110
109
|
}
|
@@ -114,7 +113,8 @@ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
|
114
113
|
}
|
115
114
|
|
116
115
|
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
117
|
-
ZSTD_customMem customMem)
|
116
|
+
ZSTD_customMem customMem)
|
117
|
+
{
|
118
118
|
POOL_ctx* ctx;
|
119
119
|
/* Check parameters */
|
120
120
|
if (!numThreads) { return NULL; }
|
@@ -126,7 +126,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
126
126
|
* empty and full queues.
|
127
127
|
*/
|
128
128
|
ctx->queueSize = queueSize + 1;
|
129
|
-
ctx->queue = (POOL_job*)
|
129
|
+
ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem);
|
130
130
|
ctx->queueHead = 0;
|
131
131
|
ctx->queueTail = 0;
|
132
132
|
ctx->numThreadsBusy = 0;
|
@@ -140,7 +140,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
140
140
|
}
|
141
141
|
ctx->shutdown = 0;
|
142
142
|
/* Allocate space for the thread handles */
|
143
|
-
ctx->threads = (ZSTD_pthread_t*)
|
143
|
+
ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
144
144
|
ctx->threadCapacity = 0;
|
145
145
|
ctx->customMem = customMem;
|
146
146
|
/* Check for errors */
|
@@ -173,7 +173,7 @@ static void POOL_join(POOL_ctx* ctx) {
|
|
173
173
|
/* Join all of the threads */
|
174
174
|
{ size_t i;
|
175
175
|
for (i = 0; i < ctx->threadCapacity; ++i) {
|
176
|
-
ZSTD_pthread_join(ctx->threads[i]
|
176
|
+
ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */
|
177
177
|
} }
|
178
178
|
}
|
179
179
|
|
@@ -188,11 +188,22 @@ void POOL_free(POOL_ctx *ctx) {
|
|
188
188
|
ZSTD_customFree(ctx, ctx->customMem);
|
189
189
|
}
|
190
190
|
|
191
|
+
/*! POOL_joinJobs() :
|
192
|
+
* Waits for all queued jobs to finish executing.
|
193
|
+
*/
|
194
|
+
void POOL_joinJobs(POOL_ctx* ctx) {
|
195
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
196
|
+
while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
|
197
|
+
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
198
|
+
}
|
199
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
200
|
+
}
|
201
|
+
|
191
202
|
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
|
192
203
|
POOL_free (pool);
|
193
204
|
}
|
194
205
|
|
195
|
-
size_t POOL_sizeof(POOL_ctx
|
206
|
+
size_t POOL_sizeof(const POOL_ctx* ctx) {
|
196
207
|
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
197
208
|
return sizeof(*ctx)
|
198
209
|
+ ctx->queueSize * sizeof(POOL_job)
|
@@ -209,7 +220,7 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
|
|
209
220
|
return 0;
|
210
221
|
}
|
211
222
|
/* numThreads > threadCapacity */
|
212
|
-
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)
|
223
|
+
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
|
213
224
|
if (!threadPool) return 1;
|
214
225
|
/* replace existing thread pool */
|
215
226
|
ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
|
@@ -257,9 +268,12 @@ static int isQueueFull(POOL_ctx const* ctx) {
|
|
257
268
|
}
|
258
269
|
|
259
270
|
|
260
|
-
static void
|
271
|
+
static void
|
272
|
+
POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
|
261
273
|
{
|
262
|
-
POOL_job
|
274
|
+
POOL_job job;
|
275
|
+
job.function = function;
|
276
|
+
job.opaque = opaque;
|
263
277
|
assert(ctx != NULL);
|
264
278
|
if (ctx->shutdown) return;
|
265
279
|
|
@@ -313,7 +327,9 @@ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
|
313
327
|
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
314
328
|
}
|
315
329
|
|
316
|
-
POOL_ctx*
|
330
|
+
POOL_ctx*
|
331
|
+
POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
|
332
|
+
{
|
317
333
|
(void)numThreads;
|
318
334
|
(void)queueSize;
|
319
335
|
(void)customMem;
|
@@ -325,6 +341,11 @@ void POOL_free(POOL_ctx* ctx) {
|
|
325
341
|
(void)ctx;
|
326
342
|
}
|
327
343
|
|
344
|
+
void POOL_joinJobs(POOL_ctx* ctx){
|
345
|
+
assert(!ctx || ctx == &g_poolCtx);
|
346
|
+
(void)ctx;
|
347
|
+
}
|
348
|
+
|
328
349
|
int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
|
329
350
|
(void)ctx; (void)numThreads;
|
330
351
|
return 0;
|
@@ -341,7 +362,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
|
341
362
|
return 1;
|
342
363
|
}
|
343
364
|
|
344
|
-
size_t POOL_sizeof(POOL_ctx* ctx) {
|
365
|
+
size_t POOL_sizeof(const POOL_ctx* ctx) {
|
345
366
|
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
346
367
|
assert(ctx == &g_poolCtx);
|
347
368
|
return sizeof(*ctx);
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -38,6 +38,12 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
38
38
|
*/
|
39
39
|
void POOL_free(POOL_ctx* ctx);
|
40
40
|
|
41
|
+
|
42
|
+
/*! POOL_joinJobs() :
|
43
|
+
* Waits for all queued jobs to finish executing.
|
44
|
+
*/
|
45
|
+
void POOL_joinJobs(POOL_ctx* ctx);
|
46
|
+
|
41
47
|
/*! POOL_resize() :
|
42
48
|
* Expands or shrinks pool's number of threads.
|
43
49
|
* This is more efficient than releasing + creating a new context,
|
@@ -53,7 +59,7 @@ int POOL_resize(POOL_ctx* ctx, size_t numThreads);
|
|
53
59
|
* @return threadpool memory usage
|
54
60
|
* note : compatible with NULL (returns 0 in this case)
|
55
61
|
*/
|
56
|
-
size_t POOL_sizeof(POOL_ctx* ctx);
|
62
|
+
size_t POOL_sizeof(const POOL_ctx* ctx);
|
57
63
|
|
58
64
|
/*! POOL_function :
|
59
65
|
* The function type that can be added to a thread pool.
|
@@ -70,7 +76,7 @@ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
|
|
70
76
|
|
71
77
|
|
72
78
|
/*! POOL_tryAdd() :
|
73
|
-
* Add the job `function(opaque)` to thread pool _if_ a
|
79
|
+
* Add the job `function(opaque)` to thread pool _if_ a queue slot is available.
|
74
80
|
* Returns immediately even if not (does not block).
|
75
81
|
* @return : 1 if successful, 0 if not.
|
76
82
|
*/
|