zstdlib 0.3.0-x86-mingw32 → 0.8.0-x86-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGES.md +30 -1
- data/README.md +2 -2
- data/Rakefile +1 -1
- data/ext/zstdlib/extconf.rb +3 -3
- data/ext/zstdlib/ruby/zlib-2.7/zstdlib.c +4895 -0
- data/ext/zstdlib/ruby/zlib-3.0/zstdlib.c +4994 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/bitstream.h +59 -51
- data/ext/zstdlib/zstd-1.5.0/lib/common/compiler.h +289 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/cpu.h +1 -3
- data/ext/zstdlib/zstd-1.5.0/lib/common/debug.c +24 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/debug.h +22 -49
- data/ext/zstdlib/zstd-1.5.0/lib/common/entropy_common.c +362 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/error_private.c +3 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/error_private.h +8 -4
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/fse.h +50 -42
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/fse_decompress.c +149 -55
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/huf.h +43 -39
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/mem.h +69 -25
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/pool.c +30 -20
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/pool.h +3 -3
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/threading.c +51 -4
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/threading.h +36 -4
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/xxhash.c +40 -92
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/xxhash.h +12 -32
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/zstd_common.c +10 -10
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_deps.h +111 -0
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_internal.h +490 -0
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_trace.h +154 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/fse_compress.c +47 -63
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/hist.c +41 -63
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/hist.h +13 -33
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/huf_compress.c +332 -193
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_compress.c +6393 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_internal.h +522 -86
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_literals.c +25 -16
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_literals.h +2 -2
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.c +50 -24
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.h +11 -4
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_compress_superblock.c +572 -0
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_compress_superblock.h +32 -0
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_cwksp.h +662 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_double_fast.c +43 -41
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_double_fast.h +2 -2
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_fast.c +85 -80
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_fast.h +2 -2
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.c +2184 -0
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.h +125 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_ldm.c +333 -208
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_ldm.h +15 -3
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_ldm_geartab.h +103 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_opt.c +228 -129
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_opt.h +1 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstdmt_compress.c +151 -440
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstdmt_compress.h +110 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/huf_decompress.c +395 -276
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_ddict.c +20 -16
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_ddict.h +3 -3
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_decompress.c +628 -231
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.c +606 -380
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.h +8 -5
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_decompress_internal.h +39 -9
- data/ext/zstdlib/zstd-1.5.0/lib/zdict.h +452 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/zstd.h +740 -153
- data/ext/zstdlib/{zstd-1.4.2/lib/common → zstd-1.5.0/lib}/zstd_errors.h +3 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzclose.c +1 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzcompatibility.h +1 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzguts.h +0 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzlib.c +9 -9
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzread.c +16 -8
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzwrite.c +8 -8
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.c +131 -45
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.h +1 -1
- data/lib/2.2/zstdlib.so +0 -0
- data/lib/2.3/zstdlib.so +0 -0
- data/lib/2.4/zstdlib.so +0 -0
- data/lib/2.5/zstdlib.so +0 -0
- data/lib/2.6/zstdlib.so +0 -0
- data/lib/2.7/zstdlib.so +0 -0
- metadata +76 -67
- data/ext/zstdlib/zstd-1.4.2/lib/common/compiler.h +0 -147
- data/ext/zstdlib/zstd-1.4.2/lib/common/debug.c +0 -44
- data/ext/zstdlib/zstd-1.4.2/lib/common/entropy_common.c +0 -236
- data/ext/zstdlib/zstd-1.4.2/lib/common/zstd_internal.h +0 -371
- data/ext/zstdlib/zstd-1.4.2/lib/compress/zstd_compress.c +0 -3904
- data/ext/zstdlib/zstd-1.4.2/lib/compress/zstd_lazy.c +0 -1111
- data/ext/zstdlib/zstd-1.4.2/lib/compress/zstd_lazy.h +0 -67
- data/ext/zstdlib/zstd-1.4.2/lib/compress/zstdmt_compress.h +0 -192
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -15,10 +15,10 @@
|
|
15
15
|
|
16
16
|
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
17
17
|
{
|
18
|
-
BYTE* const ostart = (BYTE*
|
18
|
+
BYTE* const ostart = (BYTE*)dst;
|
19
19
|
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
20
20
|
|
21
|
-
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
|
21
|
+
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
|
22
22
|
|
23
23
|
switch(flSize)
|
24
24
|
{
|
@@ -35,13 +35,14 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
|
|
35
35
|
assert(0);
|
36
36
|
}
|
37
37
|
|
38
|
-
|
38
|
+
ZSTD_memcpy(ostart + flSize, src, srcSize);
|
39
|
+
DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
|
39
40
|
return srcSize + flSize;
|
40
41
|
}
|
41
42
|
|
42
43
|
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
43
44
|
{
|
44
|
-
BYTE* const ostart = (BYTE*
|
45
|
+
BYTE* const ostart = (BYTE*)dst;
|
45
46
|
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
46
47
|
|
47
48
|
(void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
|
@@ -62,6 +63,7 @@ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void*
|
|
62
63
|
}
|
63
64
|
|
64
65
|
ostart[flSize] = *(const BYTE*)src;
|
66
|
+
DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
|
65
67
|
return flSize+1;
|
66
68
|
}
|
67
69
|
|
@@ -70,7 +72,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|
70
72
|
ZSTD_strategy strategy, int disableLiteralCompression,
|
71
73
|
void* dst, size_t dstCapacity,
|
72
74
|
const void* src, size_t srcSize,
|
73
|
-
void*
|
75
|
+
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
74
76
|
const int bmi2)
|
75
77
|
{
|
76
78
|
size_t const minGain = ZSTD_minGain(srcSize, strategy);
|
@@ -80,11 +82,11 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|
80
82
|
symbolEncodingType_e hType = set_compressed;
|
81
83
|
size_t cLitSize;
|
82
84
|
|
83
|
-
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
|
84
|
-
disableLiteralCompression);
|
85
|
+
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
|
86
|
+
disableLiteralCompression, (U32)srcSize);
|
85
87
|
|
86
88
|
/* Prepare nextEntropy assuming reusing the existing table */
|
87
|
-
|
89
|
+
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
88
90
|
|
89
91
|
if (disableLiteralCompression)
|
90
92
|
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
@@ -99,22 +101,28 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|
99
101
|
{ HUF_repeat repeat = prevHuf->repeatMode;
|
100
102
|
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
|
101
103
|
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
|
102
|
-
cLitSize = singleStream ?
|
103
|
-
|
104
|
-
|
105
|
-
|
104
|
+
cLitSize = singleStream ?
|
105
|
+
HUF_compress1X_repeat(
|
106
|
+
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
|
107
|
+
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
|
108
|
+
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
|
109
|
+
HUF_compress4X_repeat(
|
110
|
+
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
|
111
|
+
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
|
112
|
+
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
|
106
113
|
if (repeat != HUF_repeat_none) {
|
107
114
|
/* reused the existing table */
|
115
|
+
DEBUGLOG(5, "Reusing previous huffman table");
|
108
116
|
hType = set_repeat;
|
109
117
|
}
|
110
118
|
}
|
111
119
|
|
112
|
-
if ((cLitSize==0)
|
113
|
-
|
120
|
+
if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
|
121
|
+
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
114
122
|
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
115
123
|
}
|
116
124
|
if (cLitSize==1) {
|
117
|
-
|
125
|
+
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
118
126
|
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
|
119
127
|
}
|
120
128
|
|
@@ -145,5 +153,6 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|
145
153
|
default: /* not possible : lhSize is {3,4,5} */
|
146
154
|
assert(0);
|
147
155
|
}
|
156
|
+
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
|
148
157
|
return lhSize+cLitSize;
|
149
158
|
}
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -23,7 +23,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|
23
23
|
ZSTD_strategy strategy, int disableLiteralCompression,
|
24
24
|
void* dst, size_t dstCapacity,
|
25
25
|
const void* src, size_t srcSize,
|
26
|
-
void*
|
26
|
+
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
27
27
|
const int bmi2);
|
28
28
|
|
29
29
|
#endif /* ZSTD_COMPRESS_LITERALS_H */
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -50,6 +50,19 @@ static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
|
|
50
50
|
return maxSymbolValue;
|
51
51
|
}
|
52
52
|
|
53
|
+
/**
|
54
|
+
* Returns true if we should use ncount=-1 else we should
|
55
|
+
* use ncount=1 for low probability symbols instead.
|
56
|
+
*/
|
57
|
+
static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
|
58
|
+
{
|
59
|
+
/* Heuristic: This should cover most blocks <= 16K and
|
60
|
+
* start to fade out after 16K to about 32K depending on
|
61
|
+
* comprssibility.
|
62
|
+
*/
|
63
|
+
return nbSeq >= 2048;
|
64
|
+
}
|
65
|
+
|
53
66
|
/**
|
54
67
|
* Returns the cost in bytes of encoding the normalized count header.
|
55
68
|
* Returns an error if any of the helper functions return an error.
|
@@ -60,7 +73,7 @@ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
|
|
60
73
|
BYTE wksp[FSE_NCOUNTBOUND];
|
61
74
|
S16 norm[MaxSeq + 1];
|
62
75
|
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
|
63
|
-
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
|
76
|
+
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
|
64
77
|
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
|
65
78
|
}
|
66
79
|
|
@@ -72,6 +85,8 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
|
|
72
85
|
{
|
73
86
|
unsigned cost = 0;
|
74
87
|
unsigned s;
|
88
|
+
|
89
|
+
assert(total > 0);
|
75
90
|
for (s = 0; s <= max; ++s) {
|
76
91
|
unsigned norm = (unsigned)((256 * count[s]) / total);
|
77
92
|
if (count[s] != 0 && norm == 0)
|
@@ -86,7 +101,7 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
|
|
86
101
|
* Returns the cost in bits of encoding the distribution in count using ctable.
|
87
102
|
* Returns an error if ctable cannot represent all the symbols in count.
|
88
103
|
*/
|
89
|
-
|
104
|
+
size_t ZSTD_fseBitCost(
|
90
105
|
FSE_CTable const* ctable,
|
91
106
|
unsigned const* count,
|
92
107
|
unsigned const max)
|
@@ -96,18 +111,22 @@ static size_t ZSTD_fseBitCost(
|
|
96
111
|
unsigned s;
|
97
112
|
FSE_CState_t cstate;
|
98
113
|
FSE_initCState(&cstate, ctable);
|
99
|
-
|
100
|
-
|
114
|
+
if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
|
115
|
+
DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
|
101
116
|
ZSTD_getFSEMaxSymbolValue(ctable), max);
|
117
|
+
return ERROR(GENERIC);
|
118
|
+
}
|
102
119
|
for (s = 0; s <= max; ++s) {
|
103
120
|
unsigned const tableLog = cstate.stateLog;
|
104
121
|
unsigned const badCost = (tableLog + 1) << kAccuracyLog;
|
105
122
|
unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
|
106
123
|
if (count[s] == 0)
|
107
124
|
continue;
|
108
|
-
|
109
|
-
|
110
|
-
|
125
|
+
if (bitCost >= badCost) {
|
126
|
+
DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
|
127
|
+
return ERROR(GENERIC);
|
128
|
+
}
|
129
|
+
cost += (size_t)count[s] * bitCost;
|
111
130
|
}
|
112
131
|
return cost >> kAccuracyLog;
|
113
132
|
}
|
@@ -117,15 +136,15 @@ static size_t ZSTD_fseBitCost(
|
|
117
136
|
* table described by norm. The max symbol support by norm is assumed >= max.
|
118
137
|
* norm must be valid for every symbol with non-zero probability in count.
|
119
138
|
*/
|
120
|
-
|
121
|
-
|
139
|
+
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
|
140
|
+
unsigned const* count, unsigned const max)
|
122
141
|
{
|
123
142
|
unsigned const shift = 8 - accuracyLog;
|
124
143
|
size_t cost = 0;
|
125
144
|
unsigned s;
|
126
145
|
assert(accuracyLog <= 8);
|
127
146
|
for (s = 0; s <= max; ++s) {
|
128
|
-
unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
|
147
|
+
unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
|
129
148
|
unsigned const norm256 = normAcc << shift;
|
130
149
|
assert(norm256 > 0);
|
131
150
|
assert(norm256 < 256);
|
@@ -215,6 +234,11 @@ ZSTD_selectEncodingType(
|
|
215
234
|
return set_compressed;
|
216
235
|
}
|
217
236
|
|
237
|
+
typedef struct {
|
238
|
+
S16 norm[MaxSeq + 1];
|
239
|
+
U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
|
240
|
+
} ZSTD_BuildCTableWksp;
|
241
|
+
|
218
242
|
size_t
|
219
243
|
ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
220
244
|
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
|
@@ -222,7 +246,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|
222
246
|
const BYTE* codeTable, size_t nbSeq,
|
223
247
|
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
224
248
|
const FSE_CTable* prevCTable, size_t prevCTableSize,
|
225
|
-
void*
|
249
|
+
void* entropyWorkspace, size_t entropyWorkspaceSize)
|
226
250
|
{
|
227
251
|
BYTE* op = (BYTE*)dst;
|
228
252
|
const BYTE* const oend = op + dstCapacity;
|
@@ -230,18 +254,18 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|
230
254
|
|
231
255
|
switch (type) {
|
232
256
|
case set_rle:
|
233
|
-
FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
|
234
|
-
RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
|
257
|
+
FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
|
258
|
+
RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
|
235
259
|
*op = codeTable[0];
|
236
260
|
return 1;
|
237
261
|
case set_repeat:
|
238
|
-
|
262
|
+
ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
|
239
263
|
return 0;
|
240
264
|
case set_basic:
|
241
|
-
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog,
|
265
|
+
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
|
242
266
|
return 0;
|
243
267
|
case set_compressed: {
|
244
|
-
|
268
|
+
ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
|
245
269
|
size_t nbSeq_1 = nbSeq;
|
246
270
|
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
|
247
271
|
if (count[codeTable[nbSeq-1]] > 1) {
|
@@ -249,14 +273,16 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|
249
273
|
nbSeq_1--;
|
250
274
|
}
|
251
275
|
assert(nbSeq_1 > 1);
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
276
|
+
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
|
277
|
+
(void)entropyWorkspaceSize;
|
278
|
+
FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
|
279
|
+
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog); /* overflow protected */
|
280
|
+
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
|
281
|
+
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
|
256
282
|
return NCountSize;
|
257
283
|
}
|
258
284
|
}
|
259
|
-
default: assert(0); RETURN_ERROR(GENERIC);
|
285
|
+
default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
|
260
286
|
}
|
261
287
|
}
|
262
288
|
|
@@ -290,7 +316,7 @@ ZSTD_encodeSequences_body(
|
|
290
316
|
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
291
317
|
if (longOffsets) {
|
292
318
|
U32 const ofBits = ofCodeTable[nbSeq-1];
|
293
|
-
|
319
|
+
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
294
320
|
if (extraBits) {
|
295
321
|
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
|
296
322
|
BIT_flushBits(&blockStream);
|
@@ -327,7 +353,7 @@ ZSTD_encodeSequences_body(
|
|
327
353
|
BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
|
328
354
|
if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
|
329
355
|
if (longOffsets) {
|
330
|
-
|
356
|
+
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
331
357
|
if (extraBits) {
|
332
358
|
BIT_addBits(&blockStream, sequences[n].offset, extraBits);
|
333
359
|
BIT_flushBits(&blockStream); /* (7)*/
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -11,8 +11,8 @@
|
|
11
11
|
#ifndef ZSTD_COMPRESS_SEQUENCES_H
|
12
12
|
#define ZSTD_COMPRESS_SEQUENCES_H
|
13
13
|
|
14
|
-
#include "fse.h" /* FSE_repeat, FSE_CTable */
|
15
|
-
#include "zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
|
14
|
+
#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
|
15
|
+
#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
|
16
16
|
|
17
17
|
typedef enum {
|
18
18
|
ZSTD_defaultDisallowed = 0,
|
@@ -35,7 +35,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|
35
35
|
const BYTE* codeTable, size_t nbSeq,
|
36
36
|
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
37
37
|
const FSE_CTable* prevCTable, size_t prevCTableSize,
|
38
|
-
void*
|
38
|
+
void* entropyWorkspace, size_t entropyWorkspaceSize);
|
39
39
|
|
40
40
|
size_t ZSTD_encodeSequences(
|
41
41
|
void* dst, size_t dstCapacity,
|
@@ -44,4 +44,11 @@ size_t ZSTD_encodeSequences(
|
|
44
44
|
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
45
45
|
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
|
46
46
|
|
47
|
+
size_t ZSTD_fseBitCost(
|
48
|
+
FSE_CTable const* ctable,
|
49
|
+
unsigned const* count,
|
50
|
+
unsigned const max);
|
51
|
+
|
52
|
+
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
|
53
|
+
unsigned const* count, unsigned const max);
|
47
54
|
#endif /* ZSTD_COMPRESS_SEQUENCES_H */
|
@@ -0,0 +1,572 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* This source code is licensed under both the BSD-style license (found in the
|
6
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
7
|
+
* in the COPYING file in the root directory of this source tree).
|
8
|
+
* You may select, at your option, one of the above-listed licenses.
|
9
|
+
*/
|
10
|
+
|
11
|
+
/*-*************************************
|
12
|
+
* Dependencies
|
13
|
+
***************************************/
|
14
|
+
#include "zstd_compress_superblock.h"
|
15
|
+
|
16
|
+
#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
|
17
|
+
#include "hist.h" /* HIST_countFast_wksp */
|
18
|
+
#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
|
19
|
+
#include "zstd_compress_sequences.h"
|
20
|
+
#include "zstd_compress_literals.h"
|
21
|
+
|
22
|
+
/** ZSTD_compressSubBlock_literal() :
|
23
|
+
* Compresses literals section for a sub-block.
|
24
|
+
* When we have to write the Huffman table we will sometimes choose a header
|
25
|
+
* size larger than necessary. This is because we have to pick the header size
|
26
|
+
* before we know the table size + compressed size, so we have a bound on the
|
27
|
+
* table size. If we guessed incorrectly, we fall back to uncompressed literals.
|
28
|
+
*
|
29
|
+
* We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
|
30
|
+
* in writing the header, otherwise it is set to 0.
|
31
|
+
*
|
32
|
+
* hufMetadata->hType has literals block type info.
|
33
|
+
* If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
|
34
|
+
* If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
|
35
|
+
* If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
|
36
|
+
* If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
|
37
|
+
* and the following sub-blocks' literals sections will be Treeless_Literals_Block.
|
38
|
+
* @return : compressed size of literals section of a sub-block
|
39
|
+
* Or 0 if it unable to compress.
|
40
|
+
* Or error code */
|
41
|
+
static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
|
42
|
+
const ZSTD_hufCTablesMetadata_t* hufMetadata,
|
43
|
+
const BYTE* literals, size_t litSize,
|
44
|
+
void* dst, size_t dstSize,
|
45
|
+
const int bmi2, int writeEntropy, int* entropyWritten)
|
46
|
+
{
|
47
|
+
size_t const header = writeEntropy ? 200 : 0;
|
48
|
+
size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
|
49
|
+
BYTE* const ostart = (BYTE*)dst;
|
50
|
+
BYTE* const oend = ostart + dstSize;
|
51
|
+
BYTE* op = ostart + lhSize;
|
52
|
+
U32 const singleStream = lhSize == 3;
|
53
|
+
symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
|
54
|
+
size_t cLitSize = 0;
|
55
|
+
|
56
|
+
(void)bmi2; /* TODO bmi2... */
|
57
|
+
|
58
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
|
59
|
+
|
60
|
+
*entropyWritten = 0;
|
61
|
+
if (litSize == 0 || hufMetadata->hType == set_basic) {
|
62
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
|
63
|
+
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
64
|
+
} else if (hufMetadata->hType == set_rle) {
|
65
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
|
66
|
+
return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
|
67
|
+
}
|
68
|
+
|
69
|
+
assert(litSize > 0);
|
70
|
+
assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
|
71
|
+
|
72
|
+
if (writeEntropy && hufMetadata->hType == set_compressed) {
|
73
|
+
ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
|
74
|
+
op += hufMetadata->hufDesSize;
|
75
|
+
cLitSize += hufMetadata->hufDesSize;
|
76
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
|
77
|
+
}
|
78
|
+
|
79
|
+
/* TODO bmi2 */
|
80
|
+
{ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
|
81
|
+
: HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
|
82
|
+
op += cSize;
|
83
|
+
cLitSize += cSize;
|
84
|
+
if (cSize == 0 || ERR_isError(cSize)) {
|
85
|
+
DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
|
86
|
+
return 0;
|
87
|
+
}
|
88
|
+
/* If we expand and we aren't writing a header then emit uncompressed */
|
89
|
+
if (!writeEntropy && cLitSize >= litSize) {
|
90
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
|
91
|
+
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
92
|
+
}
|
93
|
+
/* If we are writing headers then allow expansion that doesn't change our header size. */
|
94
|
+
if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
|
95
|
+
assert(cLitSize > litSize);
|
96
|
+
DEBUGLOG(5, "Literals expanded beyond allowed header size");
|
97
|
+
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
98
|
+
}
|
99
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
|
100
|
+
}
|
101
|
+
|
102
|
+
/* Build header */
|
103
|
+
switch(lhSize)
|
104
|
+
{
|
105
|
+
case 3: /* 2 - 2 - 10 - 10 */
|
106
|
+
{ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
|
107
|
+
MEM_writeLE24(ostart, lhc);
|
108
|
+
break;
|
109
|
+
}
|
110
|
+
case 4: /* 2 - 2 - 14 - 14 */
|
111
|
+
{ U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
|
112
|
+
MEM_writeLE32(ostart, lhc);
|
113
|
+
break;
|
114
|
+
}
|
115
|
+
case 5: /* 2 - 2 - 18 - 18 */
|
116
|
+
{ U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
|
117
|
+
MEM_writeLE32(ostart, lhc);
|
118
|
+
ostart[4] = (BYTE)(cLitSize >> 10);
|
119
|
+
break;
|
120
|
+
}
|
121
|
+
default: /* not possible : lhSize is {3,4,5} */
|
122
|
+
assert(0);
|
123
|
+
}
|
124
|
+
*entropyWritten = 1;
|
125
|
+
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
|
126
|
+
return op-ostart;
|
127
|
+
}
|
128
|
+
|
129
|
+
static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
|
130
|
+
const seqDef* const sstart = sequences;
|
131
|
+
const seqDef* const send = sequences + nbSeq;
|
132
|
+
const seqDef* sp = sstart;
|
133
|
+
size_t matchLengthSum = 0;
|
134
|
+
size_t litLengthSum = 0;
|
135
|
+
while (send-sp > 0) {
|
136
|
+
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
|
137
|
+
litLengthSum += seqLen.litLength;
|
138
|
+
matchLengthSum += seqLen.matchLength;
|
139
|
+
sp++;
|
140
|
+
}
|
141
|
+
assert(litLengthSum <= litSize);
|
142
|
+
if (!lastSequence) {
|
143
|
+
assert(litLengthSum == litSize);
|
144
|
+
}
|
145
|
+
return matchLengthSum + litSize;
|
146
|
+
}
|
147
|
+
|
148
|
+
/** ZSTD_compressSubBlock_sequences() :
|
149
|
+
* Compresses sequences section for a sub-block.
|
150
|
+
* fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
|
151
|
+
* symbol compression modes for the super-block.
|
152
|
+
* The first successfully compressed block will have these in its header.
|
153
|
+
* We set entropyWritten=1 when we succeed in compressing the sequences.
|
154
|
+
* The following sub-blocks will always have repeat mode.
|
155
|
+
* @return : compressed size of sequences section of a sub-block
|
156
|
+
* Or 0 if it is unable to compress
|
157
|
+
* Or error code. */
|
158
|
+
static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
|
159
|
+
const ZSTD_fseCTablesMetadata_t* fseMetadata,
|
160
|
+
const seqDef* sequences, size_t nbSeq,
|
161
|
+
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
|
162
|
+
const ZSTD_CCtx_params* cctxParams,
|
163
|
+
void* dst, size_t dstCapacity,
|
164
|
+
const int bmi2, int writeEntropy, int* entropyWritten)
|
165
|
+
{
|
166
|
+
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
|
167
|
+
BYTE* const ostart = (BYTE*)dst;
|
168
|
+
BYTE* const oend = ostart + dstCapacity;
|
169
|
+
BYTE* op = ostart;
|
170
|
+
BYTE* seqHead;
|
171
|
+
|
172
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
|
173
|
+
|
174
|
+
*entropyWritten = 0;
|
175
|
+
/* Sequences Header */
|
176
|
+
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
|
177
|
+
dstSize_tooSmall, "");
|
178
|
+
if (nbSeq < 0x7F)
|
179
|
+
*op++ = (BYTE)nbSeq;
|
180
|
+
else if (nbSeq < LONGNBSEQ)
|
181
|
+
op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
|
182
|
+
else
|
183
|
+
op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
|
184
|
+
if (nbSeq==0) {
|
185
|
+
return op - ostart;
|
186
|
+
}
|
187
|
+
|
188
|
+
/* seqHead : flags for FSE encoding type */
|
189
|
+
seqHead = op++;
|
190
|
+
|
191
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
|
192
|
+
|
193
|
+
if (writeEntropy) {
|
194
|
+
const U32 LLtype = fseMetadata->llType;
|
195
|
+
const U32 Offtype = fseMetadata->ofType;
|
196
|
+
const U32 MLtype = fseMetadata->mlType;
|
197
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
|
198
|
+
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
|
199
|
+
ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
|
200
|
+
op += fseMetadata->fseTablesSize;
|
201
|
+
} else {
|
202
|
+
const U32 repeat = set_repeat;
|
203
|
+
*seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
|
204
|
+
}
|
205
|
+
|
206
|
+
{ size_t const bitstreamSize = ZSTD_encodeSequences(
|
207
|
+
op, oend - op,
|
208
|
+
fseTables->matchlengthCTable, mlCode,
|
209
|
+
fseTables->offcodeCTable, ofCode,
|
210
|
+
fseTables->litlengthCTable, llCode,
|
211
|
+
sequences, nbSeq,
|
212
|
+
longOffsets, bmi2);
|
213
|
+
FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
|
214
|
+
op += bitstreamSize;
|
215
|
+
/* zstd versions <= 1.3.4 mistakenly report corruption when
|
216
|
+
* FSE_readNCount() receives a buffer < 4 bytes.
|
217
|
+
* Fixed by https://github.com/facebook/zstd/pull/1146.
|
218
|
+
* This can happen when the last set_compressed table present is 2
|
219
|
+
* bytes and the bitstream is only one byte.
|
220
|
+
* In this exceedingly rare case, we will simply emit an uncompressed
|
221
|
+
* block, since it isn't worth optimizing.
|
222
|
+
*/
|
223
|
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
224
|
+
if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
|
225
|
+
/* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
|
226
|
+
assert(fseMetadata->lastCountSize + bitstreamSize == 3);
|
227
|
+
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
|
228
|
+
"emitting an uncompressed block.");
|
229
|
+
return 0;
|
230
|
+
}
|
231
|
+
#endif
|
232
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
|
233
|
+
}
|
234
|
+
|
235
|
+
/* zstd versions <= 1.4.0 mistakenly report error when
|
236
|
+
* sequences section body size is less than 3 bytes.
|
237
|
+
* Fixed by https://github.com/facebook/zstd/pull/1664.
|
238
|
+
* This can happen when the previous sequences section block is compressed
|
239
|
+
* with rle mode and the current block's sequences section is compressed
|
240
|
+
* with repeat mode where sequences section body size can be 1 byte.
|
241
|
+
*/
|
242
|
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
243
|
+
if (op-seqHead < 4) {
|
244
|
+
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
|
245
|
+
"an uncompressed block when sequences are < 4 bytes");
|
246
|
+
return 0;
|
247
|
+
}
|
248
|
+
#endif
|
249
|
+
|
250
|
+
*entropyWritten = 1;
|
251
|
+
return op - ostart;
|
252
|
+
}
|
253
|
+
|
254
|
+
/** ZSTD_compressSubBlock() :
|
255
|
+
* Compresses a single sub-block.
|
256
|
+
* @return : compressed size of the sub-block
|
257
|
+
* Or 0 if it failed to compress. */
|
258
|
+
static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
|
259
|
+
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
260
|
+
const seqDef* sequences, size_t nbSeq,
|
261
|
+
const BYTE* literals, size_t litSize,
|
262
|
+
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
|
263
|
+
const ZSTD_CCtx_params* cctxParams,
|
264
|
+
void* dst, size_t dstCapacity,
|
265
|
+
const int bmi2,
|
266
|
+
int writeLitEntropy, int writeSeqEntropy,
|
267
|
+
int* litEntropyWritten, int* seqEntropyWritten,
|
268
|
+
U32 lastBlock)
|
269
|
+
{
|
270
|
+
BYTE* const ostart = (BYTE*)dst;
|
271
|
+
BYTE* const oend = ostart + dstCapacity;
|
272
|
+
BYTE* op = ostart + ZSTD_blockHeaderSize;
|
273
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
|
274
|
+
litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
|
275
|
+
{ size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
|
276
|
+
&entropyMetadata->hufMetadata, literals, litSize,
|
277
|
+
op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
|
278
|
+
FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
|
279
|
+
if (cLitSize == 0) return 0;
|
280
|
+
op += cLitSize;
|
281
|
+
}
|
282
|
+
{ size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
|
283
|
+
&entropyMetadata->fseMetadata,
|
284
|
+
sequences, nbSeq,
|
285
|
+
llCode, mlCode, ofCode,
|
286
|
+
cctxParams,
|
287
|
+
op, oend-op,
|
288
|
+
bmi2, writeSeqEntropy, seqEntropyWritten);
|
289
|
+
FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
|
290
|
+
if (cSeqSize == 0) return 0;
|
291
|
+
op += cSeqSize;
|
292
|
+
}
|
293
|
+
/* Write block header */
|
294
|
+
{ size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
|
295
|
+
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
|
296
|
+
MEM_writeLE24(ostart, cBlockHeader24);
|
297
|
+
}
|
298
|
+
return op-ostart;
|
299
|
+
}
|
300
|
+
|
301
|
+
static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
|
302
|
+
const ZSTD_hufCTables_t* huf,
|
303
|
+
const ZSTD_hufCTablesMetadata_t* hufMetadata,
|
304
|
+
void* workspace, size_t wkspSize,
|
305
|
+
int writeEntropy)
|
306
|
+
{
|
307
|
+
unsigned* const countWksp = (unsigned*)workspace;
|
308
|
+
unsigned maxSymbolValue = 255;
|
309
|
+
size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
|
310
|
+
|
311
|
+
if (hufMetadata->hType == set_basic) return litSize;
|
312
|
+
else if (hufMetadata->hType == set_rle) return 1;
|
313
|
+
else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
|
314
|
+
size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
|
315
|
+
if (ZSTD_isError(largest)) return litSize;
|
316
|
+
{ size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
|
317
|
+
if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
|
318
|
+
return cLitSizeEstimate + literalSectionHeaderSize;
|
319
|
+
} }
|
320
|
+
assert(0); /* impossible */
|
321
|
+
return 0;
|
322
|
+
}
|
323
|
+
|
324
|
+
static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
|
325
|
+
const BYTE* codeTable, unsigned maxCode,
|
326
|
+
size_t nbSeq, const FSE_CTable* fseCTable,
|
327
|
+
const U32* additionalBits,
|
328
|
+
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
329
|
+
void* workspace, size_t wkspSize)
|
330
|
+
{
|
331
|
+
unsigned* const countWksp = (unsigned*)workspace;
|
332
|
+
const BYTE* ctp = codeTable;
|
333
|
+
const BYTE* const ctStart = ctp;
|
334
|
+
const BYTE* const ctEnd = ctStart + nbSeq;
|
335
|
+
size_t cSymbolTypeSizeEstimateInBits = 0;
|
336
|
+
unsigned max = maxCode;
|
337
|
+
|
338
|
+
HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
|
339
|
+
if (type == set_basic) {
|
340
|
+
/* We selected this encoding type, so it must be valid. */
|
341
|
+
assert(max <= defaultMax);
|
342
|
+
cSymbolTypeSizeEstimateInBits = max <= defaultMax
|
343
|
+
? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
|
344
|
+
: ERROR(GENERIC);
|
345
|
+
} else if (type == set_rle) {
|
346
|
+
cSymbolTypeSizeEstimateInBits = 0;
|
347
|
+
} else if (type == set_compressed || type == set_repeat) {
|
348
|
+
cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
|
349
|
+
}
|
350
|
+
if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
|
351
|
+
while (ctp < ctEnd) {
|
352
|
+
if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
|
353
|
+
else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
|
354
|
+
ctp++;
|
355
|
+
}
|
356
|
+
return cSymbolTypeSizeEstimateInBits / 8;
|
357
|
+
}
|
358
|
+
|
359
|
+
static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
|
360
|
+
const BYTE* llCodeTable,
|
361
|
+
const BYTE* mlCodeTable,
|
362
|
+
size_t nbSeq,
|
363
|
+
const ZSTD_fseCTables_t* fseTables,
|
364
|
+
const ZSTD_fseCTablesMetadata_t* fseMetadata,
|
365
|
+
void* workspace, size_t wkspSize,
|
366
|
+
int writeEntropy)
|
367
|
+
{
|
368
|
+
size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
|
369
|
+
size_t cSeqSizeEstimate = 0;
|
370
|
+
if (nbSeq == 0) return sequencesSectionHeaderSize;
|
371
|
+
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
|
372
|
+
nbSeq, fseTables->offcodeCTable, NULL,
|
373
|
+
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
|
374
|
+
workspace, wkspSize);
|
375
|
+
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
|
376
|
+
nbSeq, fseTables->litlengthCTable, LL_bits,
|
377
|
+
LL_defaultNorm, LL_defaultNormLog, MaxLL,
|
378
|
+
workspace, wkspSize);
|
379
|
+
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
|
380
|
+
nbSeq, fseTables->matchlengthCTable, ML_bits,
|
381
|
+
ML_defaultNorm, ML_defaultNormLog, MaxML,
|
382
|
+
workspace, wkspSize);
|
383
|
+
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
|
384
|
+
return cSeqSizeEstimate + sequencesSectionHeaderSize;
|
385
|
+
}
|
386
|
+
|
387
|
+
static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
|
388
|
+
const BYTE* ofCodeTable,
|
389
|
+
const BYTE* llCodeTable,
|
390
|
+
const BYTE* mlCodeTable,
|
391
|
+
size_t nbSeq,
|
392
|
+
const ZSTD_entropyCTables_t* entropy,
|
393
|
+
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
394
|
+
void* workspace, size_t wkspSize,
|
395
|
+
int writeLitEntropy, int writeSeqEntropy) {
|
396
|
+
size_t cSizeEstimate = 0;
|
397
|
+
cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
|
398
|
+
&entropy->huf, &entropyMetadata->hufMetadata,
|
399
|
+
workspace, wkspSize, writeLitEntropy);
|
400
|
+
cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
|
401
|
+
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
|
402
|
+
workspace, wkspSize, writeSeqEntropy);
|
403
|
+
return cSizeEstimate + ZSTD_blockHeaderSize;
|
404
|
+
}
|
405
|
+
|
406
|
+
static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
|
407
|
+
{
|
408
|
+
if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
|
409
|
+
return 1;
|
410
|
+
if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
|
411
|
+
return 1;
|
412
|
+
if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
|
413
|
+
return 1;
|
414
|
+
return 0;
|
415
|
+
}
|
416
|
+
|
417
|
+
/** ZSTD_compressSubBlock_multi() :
|
418
|
+
* Breaks super-block into multiple sub-blocks and compresses them.
|
419
|
+
* Entropy will be written to the first block.
|
420
|
+
* The following blocks will use repeat mode to compress.
|
421
|
+
* All sub-blocks are compressed blocks (no raw or rle blocks).
|
422
|
+
* @return : compressed size of the super block (which is multiple ZSTD blocks)
|
423
|
+
* Or 0 if it failed to compress. */
|
424
|
+
static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
|
425
|
+
const ZSTD_compressedBlockState_t* prevCBlock,
|
426
|
+
ZSTD_compressedBlockState_t* nextCBlock,
|
427
|
+
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
428
|
+
const ZSTD_CCtx_params* cctxParams,
|
429
|
+
void* dst, size_t dstCapacity,
|
430
|
+
const void* src, size_t srcSize,
|
431
|
+
const int bmi2, U32 lastBlock,
|
432
|
+
void* workspace, size_t wkspSize)
|
433
|
+
{
|
434
|
+
const seqDef* const sstart = seqStorePtr->sequencesStart;
|
435
|
+
const seqDef* const send = seqStorePtr->sequences;
|
436
|
+
const seqDef* sp = sstart;
|
437
|
+
const BYTE* const lstart = seqStorePtr->litStart;
|
438
|
+
const BYTE* const lend = seqStorePtr->lit;
|
439
|
+
const BYTE* lp = lstart;
|
440
|
+
BYTE const* ip = (BYTE const*)src;
|
441
|
+
BYTE const* const iend = ip + srcSize;
|
442
|
+
BYTE* const ostart = (BYTE*)dst;
|
443
|
+
BYTE* const oend = ostart + dstCapacity;
|
444
|
+
BYTE* op = ostart;
|
445
|
+
const BYTE* llCodePtr = seqStorePtr->llCode;
|
446
|
+
const BYTE* mlCodePtr = seqStorePtr->mlCode;
|
447
|
+
const BYTE* ofCodePtr = seqStorePtr->ofCode;
|
448
|
+
size_t targetCBlockSize = cctxParams->targetCBlockSize;
|
449
|
+
size_t litSize, seqCount;
|
450
|
+
int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
|
451
|
+
int writeSeqEntropy = 1;
|
452
|
+
int lastSequence = 0;
|
453
|
+
|
454
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
|
455
|
+
(unsigned)(lend-lp), (unsigned)(send-sstart));
|
456
|
+
|
457
|
+
litSize = 0;
|
458
|
+
seqCount = 0;
|
459
|
+
do {
|
460
|
+
size_t cBlockSizeEstimate = 0;
|
461
|
+
if (sstart == send) {
|
462
|
+
lastSequence = 1;
|
463
|
+
} else {
|
464
|
+
const seqDef* const sequence = sp + seqCount;
|
465
|
+
lastSequence = sequence == send - 1;
|
466
|
+
litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
|
467
|
+
seqCount++;
|
468
|
+
}
|
469
|
+
if (lastSequence) {
|
470
|
+
assert(lp <= lend);
|
471
|
+
assert(litSize <= (size_t)(lend - lp));
|
472
|
+
litSize = (size_t)(lend - lp);
|
473
|
+
}
|
474
|
+
/* I think there is an optimization opportunity here.
|
475
|
+
* Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
|
476
|
+
* since it recalculates estimate from scratch.
|
477
|
+
* For example, it would recount literal distribution and symbol codes everytime.
|
478
|
+
*/
|
479
|
+
cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
|
480
|
+
&nextCBlock->entropy, entropyMetadata,
|
481
|
+
workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
|
482
|
+
if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
|
483
|
+
int litEntropyWritten = 0;
|
484
|
+
int seqEntropyWritten = 0;
|
485
|
+
const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
|
486
|
+
const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
|
487
|
+
sp, seqCount,
|
488
|
+
lp, litSize,
|
489
|
+
llCodePtr, mlCodePtr, ofCodePtr,
|
490
|
+
cctxParams,
|
491
|
+
op, oend-op,
|
492
|
+
bmi2, writeLitEntropy, writeSeqEntropy,
|
493
|
+
&litEntropyWritten, &seqEntropyWritten,
|
494
|
+
lastBlock && lastSequence);
|
495
|
+
FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
|
496
|
+
if (cSize > 0 && cSize < decompressedSize) {
|
497
|
+
DEBUGLOG(5, "Committed the sub-block");
|
498
|
+
assert(ip + decompressedSize <= iend);
|
499
|
+
ip += decompressedSize;
|
500
|
+
sp += seqCount;
|
501
|
+
lp += litSize;
|
502
|
+
op += cSize;
|
503
|
+
llCodePtr += seqCount;
|
504
|
+
mlCodePtr += seqCount;
|
505
|
+
ofCodePtr += seqCount;
|
506
|
+
litSize = 0;
|
507
|
+
seqCount = 0;
|
508
|
+
/* Entropy only needs to be written once */
|
509
|
+
if (litEntropyWritten) {
|
510
|
+
writeLitEntropy = 0;
|
511
|
+
}
|
512
|
+
if (seqEntropyWritten) {
|
513
|
+
writeSeqEntropy = 0;
|
514
|
+
}
|
515
|
+
}
|
516
|
+
}
|
517
|
+
} while (!lastSequence);
|
518
|
+
if (writeLitEntropy) {
|
519
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
|
520
|
+
ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
|
521
|
+
}
|
522
|
+
if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
|
523
|
+
/* If we haven't written our entropy tables, then we've violated our contract and
|
524
|
+
* must emit an uncompressed block.
|
525
|
+
*/
|
526
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
|
527
|
+
return 0;
|
528
|
+
}
|
529
|
+
if (ip < iend) {
|
530
|
+
size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
|
531
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
|
532
|
+
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
|
533
|
+
assert(cSize != 0);
|
534
|
+
op += cSize;
|
535
|
+
/* We have to regenerate the repcodes because we've skipped some sequences */
|
536
|
+
if (sp < send) {
|
537
|
+
seqDef const* seq;
|
538
|
+
repcodes_t rep;
|
539
|
+
ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
|
540
|
+
for (seq = sstart; seq < sp; ++seq) {
|
541
|
+
rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
|
542
|
+
}
|
543
|
+
ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
|
544
|
+
}
|
545
|
+
}
|
546
|
+
DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
|
547
|
+
return op-ostart;
|
548
|
+
}
|
549
|
+
|
550
|
+
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
|
551
|
+
void* dst, size_t dstCapacity,
|
552
|
+
void const* src, size_t srcSize,
|
553
|
+
unsigned lastBlock) {
|
554
|
+
ZSTD_entropyCTablesMetadata_t entropyMetadata;
|
555
|
+
|
556
|
+
FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
|
557
|
+
&zc->blockState.prevCBlock->entropy,
|
558
|
+
&zc->blockState.nextCBlock->entropy,
|
559
|
+
&zc->appliedParams,
|
560
|
+
&entropyMetadata,
|
561
|
+
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
|
562
|
+
|
563
|
+
return ZSTD_compressSubBlock_multi(&zc->seqStore,
|
564
|
+
zc->blockState.prevCBlock,
|
565
|
+
zc->blockState.nextCBlock,
|
566
|
+
&entropyMetadata,
|
567
|
+
&zc->appliedParams,
|
568
|
+
dst, dstCapacity,
|
569
|
+
src, srcSize,
|
570
|
+
zc->bmi2, lastBlock,
|
571
|
+
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
|
572
|
+
}
|