zstd-ruby 1.5.5.0 → 1.5.6.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/README.md +2 -2
- data/ext/zstdruby/extconf.rb +2 -0
- data/ext/zstdruby/libzstd/common/allocations.h +1 -1
- data/ext/zstdruby/libzstd/common/bitstream.h +49 -29
- data/ext/zstdruby/libzstd/common/compiler.h +114 -22
- data/ext/zstdruby/libzstd/common/cpu.h +36 -0
- data/ext/zstdruby/libzstd/common/debug.c +6 -0
- data/ext/zstdruby/libzstd/common/debug.h +20 -11
- data/ext/zstdruby/libzstd/common/error_private.h +45 -36
- data/ext/zstdruby/libzstd/common/fse.h +3 -2
- data/ext/zstdruby/libzstd/common/fse_decompress.c +19 -17
- data/ext/zstdruby/libzstd/common/huf.h +14 -1
- data/ext/zstdruby/libzstd/common/mem.h +0 -9
- data/ext/zstdruby/libzstd/common/pool.c +1 -1
- data/ext/zstdruby/libzstd/common/pool.h +1 -1
- data/ext/zstdruby/libzstd/common/portability_macros.h +2 -0
- data/ext/zstdruby/libzstd/common/threading.c +8 -2
- data/ext/zstdruby/libzstd/common/xxhash.c +5 -11
- data/ext/zstdruby/libzstd/common/xxhash.h +2341 -1007
- data/ext/zstdruby/libzstd/common/zstd_internal.h +5 -5
- data/ext/zstdruby/libzstd/compress/fse_compress.c +8 -7
- data/ext/zstdruby/libzstd/compress/huf_compress.c +54 -25
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +282 -161
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +29 -27
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +224 -113
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +19 -13
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +17 -5
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +11 -0
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +14 -6
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +129 -87
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +103 -28
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +8 -2
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +216 -112
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +31 -7
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +94 -79
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +188 -126
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +38 -19
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +84 -32
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +231 -208
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +1 -1
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +2 -0
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +16 -12
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +2 -8
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +2 -2
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +12 -6
- data/ext/zstdruby/libzstd/zstd.h +129 -60
- data/ext/zstdruby/streaming_compress.c +23 -3
- data/ext/zstdruby/streaming_decompress.c +23 -3
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +2 -2
@@ -17,30 +17,40 @@ extern "C" {
|
|
17
17
|
|
18
18
|
#include "zstd_compress_internal.h"
|
19
19
|
|
20
|
+
#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
|
21
|
+
|| !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
|
22
|
+
|| !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
|
20
23
|
/* used in ZSTD_loadDictionaryContent() */
|
21
24
|
void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
|
25
|
+
#endif
|
22
26
|
|
27
|
+
#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
|
23
28
|
size_t ZSTD_compressBlock_btopt(
|
24
29
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
25
30
|
void const* src, size_t srcSize);
|
26
|
-
size_t
|
31
|
+
size_t ZSTD_compressBlock_btopt_dictMatchState(
|
27
32
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
28
33
|
void const* src, size_t srcSize);
|
29
|
-
size_t
|
34
|
+
size_t ZSTD_compressBlock_btopt_extDict(
|
30
35
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
31
36
|
void const* src, size_t srcSize);
|
32
37
|
|
38
|
+
#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt
|
39
|
+
#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE ZSTD_compressBlock_btopt_dictMatchState
|
40
|
+
#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT ZSTD_compressBlock_btopt_extDict
|
41
|
+
#else
|
42
|
+
#define ZSTD_COMPRESSBLOCK_BTOPT NULL
|
43
|
+
#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE NULL
|
44
|
+
#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT NULL
|
45
|
+
#endif
|
33
46
|
|
34
|
-
|
47
|
+
#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
|
48
|
+
size_t ZSTD_compressBlock_btultra(
|
35
49
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
36
50
|
void const* src, size_t srcSize);
|
37
51
|
size_t ZSTD_compressBlock_btultra_dictMatchState(
|
38
52
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
39
53
|
void const* src, size_t srcSize);
|
40
|
-
|
41
|
-
size_t ZSTD_compressBlock_btopt_extDict(
|
42
|
-
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
43
|
-
void const* src, size_t srcSize);
|
44
54
|
size_t ZSTD_compressBlock_btultra_extDict(
|
45
55
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
46
56
|
void const* src, size_t srcSize);
|
@@ -48,6 +58,20 @@ size_t ZSTD_compressBlock_btultra_extDict(
|
|
48
58
|
/* note : no btultra2 variant for extDict nor dictMatchState,
|
49
59
|
* because btultra2 is not meant to work with dictionaries
|
50
60
|
* and is only specific for the first block (no prefix) */
|
61
|
+
size_t ZSTD_compressBlock_btultra2(
|
62
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
63
|
+
void const* src, size_t srcSize);
|
64
|
+
|
65
|
+
#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra
|
66
|
+
#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState
|
67
|
+
#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict
|
68
|
+
#define ZSTD_COMPRESSBLOCK_BTULTRA2 ZSTD_compressBlock_btultra2
|
69
|
+
#else
|
70
|
+
#define ZSTD_COMPRESSBLOCK_BTULTRA NULL
|
71
|
+
#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE NULL
|
72
|
+
#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT NULL
|
73
|
+
#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL
|
74
|
+
#endif
|
51
75
|
|
52
76
|
#if defined (__cplusplus)
|
53
77
|
}
|
@@ -15,17 +15,13 @@
|
|
15
15
|
#endif
|
16
16
|
|
17
17
|
|
18
|
-
/* ====== Constants ====== */
|
19
|
-
#define ZSTDMT_OVERLAPLOG_DEFAULT 0
|
20
|
-
|
21
|
-
|
22
18
|
/* ====== Dependencies ====== */
|
23
|
-
#include "../common/allocations.h"
|
19
|
+
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
|
24
20
|
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
|
25
21
|
#include "../common/mem.h" /* MEM_STATIC */
|
26
22
|
#include "../common/pool.h" /* threadpool */
|
27
23
|
#include "../common/threading.h" /* mutex */
|
28
|
-
#include "zstd_compress_internal.h"
|
24
|
+
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
|
29
25
|
#include "zstd_ldm.h"
|
30
26
|
#include "zstdmt_compress.h"
|
31
27
|
|
@@ -44,12 +40,13 @@
|
|
44
40
|
# include <unistd.h>
|
45
41
|
# include <sys/times.h>
|
46
42
|
|
47
|
-
# define DEBUG_PRINTHEX(l,p,n)
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
43
|
+
# define DEBUG_PRINTHEX(l,p,n) \
|
44
|
+
do { \
|
45
|
+
unsigned debug_u; \
|
46
|
+
for (debug_u=0; debug_u<(n); debug_u++) \
|
47
|
+
RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
|
48
|
+
RAWLOG(l, " \n"); \
|
49
|
+
} while (0)
|
53
50
|
|
54
51
|
static unsigned long long GetCurrentClockTimeMicroseconds(void)
|
55
52
|
{
|
@@ -61,25 +58,28 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void)
|
|
61
58
|
} }
|
62
59
|
|
63
60
|
#define MUTEX_WAIT_TIME_DLEVEL 6
|
64
|
-
#define ZSTD_PTHREAD_MUTEX_LOCK(mutex)
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
unsigned long long const
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
}
|
61
|
+
#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) \
|
62
|
+
do { \
|
63
|
+
if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
|
64
|
+
unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
|
65
|
+
ZSTD_pthread_mutex_lock(mutex); \
|
66
|
+
{ unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
|
67
|
+
unsigned long long const elapsedTime = (afterTime-beforeTime); \
|
68
|
+
if (elapsedTime > 1000) { \
|
69
|
+
/* or whatever threshold you like; I'm using 1 millisecond here */ \
|
70
|
+
DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, \
|
71
|
+
"Thread took %llu microseconds to acquire mutex %s \n", \
|
72
|
+
elapsedTime, #mutex); \
|
73
|
+
} } \
|
74
|
+
} else { \
|
75
|
+
ZSTD_pthread_mutex_lock(mutex); \
|
76
|
+
} \
|
77
|
+
} while (0)
|
78
78
|
|
79
79
|
#else
|
80
80
|
|
81
81
|
# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
|
82
|
-
# define DEBUG_PRINTHEX(l,p,n) {}
|
82
|
+
# define DEBUG_PRINTHEX(l,p,n) do { } while (0)
|
83
83
|
|
84
84
|
#endif
|
85
85
|
|
@@ -100,18 +100,39 @@ typedef struct ZSTDMT_bufferPool_s {
|
|
100
100
|
unsigned totalBuffers;
|
101
101
|
unsigned nbBuffers;
|
102
102
|
ZSTD_customMem cMem;
|
103
|
-
buffer_t
|
103
|
+
buffer_t* buffers;
|
104
104
|
} ZSTDMT_bufferPool;
|
105
105
|
|
106
|
+
static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
|
107
|
+
{
|
108
|
+
DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
|
109
|
+
if (!bufPool) return; /* compatibility with free on NULL */
|
110
|
+
if (bufPool->buffers) {
|
111
|
+
unsigned u;
|
112
|
+
for (u=0; u<bufPool->totalBuffers; u++) {
|
113
|
+
DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->buffers[u].start);
|
114
|
+
ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem);
|
115
|
+
}
|
116
|
+
ZSTD_customFree(bufPool->buffers, bufPool->cMem);
|
117
|
+
}
|
118
|
+
ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
|
119
|
+
ZSTD_customFree(bufPool, bufPool->cMem);
|
120
|
+
}
|
121
|
+
|
106
122
|
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem)
|
107
123
|
{
|
108
|
-
ZSTDMT_bufferPool* const bufPool =
|
109
|
-
|
124
|
+
ZSTDMT_bufferPool* const bufPool =
|
125
|
+
(ZSTDMT_bufferPool*)ZSTD_customCalloc(sizeof(ZSTDMT_bufferPool), cMem);
|
110
126
|
if (bufPool==NULL) return NULL;
|
111
127
|
if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
|
112
128
|
ZSTD_customFree(bufPool, cMem);
|
113
129
|
return NULL;
|
114
130
|
}
|
131
|
+
bufPool->buffers = (buffer_t*)ZSTD_customCalloc(maxNbBuffers * sizeof(buffer_t), cMem);
|
132
|
+
if (bufPool->buffers==NULL) {
|
133
|
+
ZSTDMT_freeBufferPool(bufPool);
|
134
|
+
return NULL;
|
135
|
+
}
|
115
136
|
bufPool->bufferSize = 64 KB;
|
116
137
|
bufPool->totalBuffers = maxNbBuffers;
|
117
138
|
bufPool->nbBuffers = 0;
|
@@ -119,32 +140,19 @@ static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_cu
|
|
119
140
|
return bufPool;
|
120
141
|
}
|
121
142
|
|
122
|
-
static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
|
123
|
-
{
|
124
|
-
unsigned u;
|
125
|
-
DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
|
126
|
-
if (!bufPool) return; /* compatibility with free on NULL */
|
127
|
-
for (u=0; u<bufPool->totalBuffers; u++) {
|
128
|
-
DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
|
129
|
-
ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem);
|
130
|
-
}
|
131
|
-
ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
|
132
|
-
ZSTD_customFree(bufPool, bufPool->cMem);
|
133
|
-
}
|
134
|
-
|
135
143
|
/* only works at initialization, not during compression */
|
136
144
|
static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
|
137
145
|
{
|
138
|
-
size_t const poolSize = sizeof(*bufPool)
|
139
|
-
|
146
|
+
size_t const poolSize = sizeof(*bufPool);
|
147
|
+
size_t const arraySize = bufPool->totalBuffers * sizeof(buffer_t);
|
140
148
|
unsigned u;
|
141
149
|
size_t totalBufferSize = 0;
|
142
150
|
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
|
143
151
|
for (u=0; u<bufPool->totalBuffers; u++)
|
144
|
-
totalBufferSize += bufPool->
|
152
|
+
totalBufferSize += bufPool->buffers[u].capacity;
|
145
153
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
146
154
|
|
147
|
-
return poolSize + totalBufferSize;
|
155
|
+
return poolSize + arraySize + totalBufferSize;
|
148
156
|
}
|
149
157
|
|
150
158
|
/* ZSTDMT_setBufferSize() :
|
@@ -187,9 +195,9 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
|
|
187
195
|
DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
|
188
196
|
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
|
189
197
|
if (bufPool->nbBuffers) { /* try to use an existing buffer */
|
190
|
-
buffer_t const buf = bufPool->
|
198
|
+
buffer_t const buf = bufPool->buffers[--(bufPool->nbBuffers)];
|
191
199
|
size_t const availBufferSize = buf.capacity;
|
192
|
-
bufPool->
|
200
|
+
bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer;
|
193
201
|
if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
|
194
202
|
/* large enough, but not too much */
|
195
203
|
DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
|
@@ -250,14 +258,14 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
|
250
258
|
if (buf.start == NULL) return; /* compatible with release on NULL */
|
251
259
|
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
|
252
260
|
if (bufPool->nbBuffers < bufPool->totalBuffers) {
|
253
|
-
bufPool->
|
261
|
+
bufPool->buffers[bufPool->nbBuffers++] = buf; /* stored for later use */
|
254
262
|
DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
|
255
263
|
(U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
|
256
264
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
257
265
|
return;
|
258
266
|
}
|
259
267
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
260
|
-
/* Reached bufferPool capacity (should not happen) */
|
268
|
+
/* Reached bufferPool capacity (note: should not happen) */
|
261
269
|
DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
|
262
270
|
ZSTD_customFree(buf.start, bufPool->cMem);
|
263
271
|
}
|
@@ -350,16 +358,20 @@ typedef struct {
|
|
350
358
|
int totalCCtx;
|
351
359
|
int availCCtx;
|
352
360
|
ZSTD_customMem cMem;
|
353
|
-
ZSTD_CCtx
|
361
|
+
ZSTD_CCtx** cctxs;
|
354
362
|
} ZSTDMT_CCtxPool;
|
355
363
|
|
356
|
-
/* note : all CCtx borrowed from the pool
|
364
|
+
/* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */
|
357
365
|
static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
|
358
366
|
{
|
359
|
-
|
360
|
-
for (cid=0; cid<pool->totalCCtx; cid++)
|
361
|
-
ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
|
367
|
+
if (!pool) return;
|
362
368
|
ZSTD_pthread_mutex_destroy(&pool->poolMutex);
|
369
|
+
if (pool->cctxs) {
|
370
|
+
int cid;
|
371
|
+
for (cid=0; cid<pool->totalCCtx; cid++)
|
372
|
+
ZSTD_freeCCtx(pool->cctxs[cid]); /* free compatible with NULL */
|
373
|
+
ZSTD_customFree(pool->cctxs, pool->cMem);
|
374
|
+
}
|
363
375
|
ZSTD_customFree(pool, pool->cMem);
|
364
376
|
}
|
365
377
|
|
@@ -368,19 +380,24 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
|
|
368
380
|
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
|
369
381
|
ZSTD_customMem cMem)
|
370
382
|
{
|
371
|
-
ZSTDMT_CCtxPool* const cctxPool =
|
372
|
-
|
383
|
+
ZSTDMT_CCtxPool* const cctxPool =
|
384
|
+
(ZSTDMT_CCtxPool*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtxPool), cMem);
|
373
385
|
assert(nbWorkers > 0);
|
374
386
|
if (!cctxPool) return NULL;
|
375
387
|
if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
|
376
388
|
ZSTD_customFree(cctxPool, cMem);
|
377
389
|
return NULL;
|
378
390
|
}
|
379
|
-
cctxPool->cMem = cMem;
|
380
391
|
cctxPool->totalCCtx = nbWorkers;
|
392
|
+
cctxPool->cctxs = (ZSTD_CCtx**)ZSTD_customCalloc(nbWorkers * sizeof(ZSTD_CCtx*), cMem);
|
393
|
+
if (!cctxPool->cctxs) {
|
394
|
+
ZSTDMT_freeCCtxPool(cctxPool);
|
395
|
+
return NULL;
|
396
|
+
}
|
397
|
+
cctxPool->cMem = cMem;
|
398
|
+
cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem);
|
399
|
+
if (!cctxPool->cctxs[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
|
381
400
|
cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
|
382
|
-
cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
|
383
|
-
if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
|
384
401
|
DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
|
385
402
|
return cctxPool;
|
386
403
|
}
|
@@ -402,16 +419,16 @@ static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
|
|
402
419
|
{
|
403
420
|
ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
|
404
421
|
{ unsigned const nbWorkers = cctxPool->totalCCtx;
|
405
|
-
size_t const poolSize = sizeof(*cctxPool)
|
406
|
-
|
407
|
-
unsigned u;
|
422
|
+
size_t const poolSize = sizeof(*cctxPool);
|
423
|
+
size_t const arraySize = cctxPool->totalCCtx * sizeof(ZSTD_CCtx*);
|
408
424
|
size_t totalCCtxSize = 0;
|
425
|
+
unsigned u;
|
409
426
|
for (u=0; u<nbWorkers; u++) {
|
410
|
-
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->
|
427
|
+
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]);
|
411
428
|
}
|
412
429
|
ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
|
413
430
|
assert(nbWorkers > 0);
|
414
|
-
return poolSize + totalCCtxSize;
|
431
|
+
return poolSize + arraySize + totalCCtxSize;
|
415
432
|
}
|
416
433
|
}
|
417
434
|
|
@@ -421,7 +438,7 @@ static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
|
|
421
438
|
ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
|
422
439
|
if (cctxPool->availCCtx) {
|
423
440
|
cctxPool->availCCtx--;
|
424
|
-
{ ZSTD_CCtx* const cctx = cctxPool->
|
441
|
+
{ ZSTD_CCtx* const cctx = cctxPool->cctxs[cctxPool->availCCtx];
|
425
442
|
ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
|
426
443
|
return cctx;
|
427
444
|
} }
|
@@ -435,7 +452,7 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
|
|
435
452
|
if (cctx==NULL) return; /* compatibility with release on NULL */
|
436
453
|
ZSTD_pthread_mutex_lock(&pool->poolMutex);
|
437
454
|
if (pool->availCCtx < pool->totalCCtx)
|
438
|
-
pool->
|
455
|
+
pool->cctxs[pool->availCCtx++] = cctx;
|
439
456
|
else {
|
440
457
|
/* pool overflow : should not happen, since totalCCtx==nbWorkers */
|
441
458
|
DEBUGLOG(4, "CCtx pool overflow : free cctx");
|
@@ -601,11 +618,8 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
|
|
601
618
|
ZSTD_pthread_mutex_unlock(&serialState->mutex);
|
602
619
|
|
603
620
|
if (seqStore.size > 0) {
|
604
|
-
|
605
|
-
jobCCtx, seqStore.seq, seqStore.size);
|
621
|
+
ZSTD_referenceExternalSequences(jobCCtx, seqStore.seq, seqStore.size);
|
606
622
|
assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable);
|
607
|
-
assert(!ZSTD_isError(err));
|
608
|
-
(void)err;
|
609
623
|
}
|
610
624
|
}
|
611
625
|
|
@@ -657,12 +671,13 @@ typedef struct {
|
|
657
671
|
unsigned frameChecksumNeeded; /* used only by mtctx */
|
658
672
|
} ZSTDMT_jobDescription;
|
659
673
|
|
660
|
-
#define JOB_ERROR(e)
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
674
|
+
#define JOB_ERROR(e) \
|
675
|
+
do { \
|
676
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
|
677
|
+
job->cSize = e; \
|
678
|
+
ZSTD_pthread_mutex_unlock(&job->job_mutex); \
|
679
|
+
goto _endJob; \
|
680
|
+
} while (0)
|
666
681
|
|
667
682
|
/* ZSTDMT_compressionJob() is a POOL_function type */
|
668
683
|
static void ZSTDMT_compressionJob(void* jobDescription)
|
@@ -1091,7 +1106,7 @@ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
|
|
1091
1106
|
{ unsigned jobNb;
|
1092
1107
|
unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
|
1093
1108
|
DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
|
1094
|
-
mtctx->doneJobID, lastJobNb, mtctx->jobReady)
|
1109
|
+
mtctx->doneJobID, lastJobNb, mtctx->jobReady);
|
1095
1110
|
for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
|
1096
1111
|
unsigned const wJobID = jobNb & mtctx->jobIDMask;
|
1097
1112
|
ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
|