zstdlib 0.7.0-x86-mingw32 → 0.10.0-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (138) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +20 -0
  3. data/README.md +7 -1
  4. data/Rakefile +38 -8
  5. data/ext/{zstdlib → zstdlib_c}/extconf.rb +11 -6
  6. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.2/zstdlib.c +2 -2
  7. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.3/zstdlib.c +2 -2
  8. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.4/zstdlib.c +2 -2
  9. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.5/zstdlib.c +2 -2
  10. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.6/zstdlib.c +2 -2
  11. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.7/zstdlib.c +2 -2
  12. data/ext/zstdlib_c/ruby/zlib-3.0/zstdlib.c +4994 -0
  13. data/ext/zstdlib_c/ruby/zlib-3.1/zstdlib.c +5076 -0
  14. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/adler32.c +0 -0
  15. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/compress.c +0 -0
  16. data/ext/zstdlib_c/zlib-1.2.12/crc32.c +1116 -0
  17. data/ext/zstdlib_c/zlib-1.2.12/crc32.h +9446 -0
  18. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/deflate.c +78 -30
  19. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/deflate.h +12 -15
  20. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzclose.c +0 -0
  21. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzguts.h +3 -2
  22. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzlib.c +5 -3
  23. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzread.c +5 -7
  24. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzwrite.c +25 -13
  25. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/infback.c +2 -1
  26. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inffast.c +14 -14
  27. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inffast.h +0 -0
  28. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inffixed.h +0 -0
  29. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inflate.c +39 -8
  30. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inflate.h +3 -2
  31. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inftrees.c +3 -3
  32. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inftrees.h +0 -0
  33. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/trees.c +27 -48
  34. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/trees.h +0 -0
  35. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/uncompr.c +0 -0
  36. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/zconf.h +0 -0
  37. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/zlib.h +123 -100
  38. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/zutil.c +2 -2
  39. data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/zutil.h +12 -9
  40. data/ext/{zstdlib → zstdlib_c}/zlib.mk +0 -0
  41. data/ext/{zstdlib → zstdlib_c}/zlibwrapper/zlibwrapper.c +1 -5
  42. data/ext/{zstdlib → zstdlib_c}/zlibwrapper.mk +0 -0
  43. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/bitstream.h +46 -22
  44. data/ext/zstdlib_c/zstd-1.5.2/lib/common/compiler.h +335 -0
  45. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/cpu.h +1 -3
  46. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/debug.c +1 -1
  47. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/debug.h +12 -19
  48. data/ext/zstdlib_c/zstd-1.5.2/lib/common/entropy_common.c +368 -0
  49. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/error_private.c +2 -1
  50. data/ext/zstdlib_c/zstd-1.5.2/lib/common/error_private.h +159 -0
  51. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/fse.h +41 -12
  52. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/fse_decompress.c +139 -22
  53. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/huf.h +47 -23
  54. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/mem.h +87 -98
  55. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/pool.c +34 -23
  56. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/pool.h +4 -4
  57. data/ext/zstdlib_c/zstd-1.5.2/lib/common/portability_macros.h +137 -0
  58. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/threading.c +6 -5
  59. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/threading.h +0 -0
  60. data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.c +24 -0
  61. data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.h +5686 -0
  62. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_common.c +10 -10
  63. data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_deps.h +111 -0
  64. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_internal.h +191 -145
  65. data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_trace.h +163 -0
  66. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/clevels.h +134 -0
  67. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/fse_compress.c +89 -46
  68. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.c +27 -29
  69. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.h +2 -2
  70. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/huf_compress.c +1370 -0
  71. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress.c +2917 -868
  72. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_internal.h +458 -125
  73. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.c +12 -11
  74. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.h +4 -2
  75. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.c +41 -18
  76. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.h +1 -1
  77. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_superblock.c +26 -298
  78. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_superblock.h +1 -1
  79. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_cwksp.h +234 -83
  80. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.c +313 -138
  81. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.h +1 -1
  82. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.c +329 -150
  83. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.h +1 -1
  84. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_lazy.c +2104 -0
  85. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_lazy.h +125 -0
  86. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.c +321 -216
  87. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.h +9 -2
  88. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_ldm_geartab.h +106 -0
  89. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.c +412 -166
  90. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.h +1 -1
  91. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstdmt_compress.c +169 -453
  92. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstdmt_compress.h +113 -0
  93. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/huf_decompress.c +1044 -403
  94. data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/huf_decompress_amd64.S +585 -0
  95. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.c +9 -9
  96. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.h +2 -2
  97. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress.c +450 -105
  98. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_block.c +913 -273
  99. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_block.h +14 -5
  100. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_internal.h +59 -12
  101. data/ext/zstdlib_c/zstd-1.5.2/lib/zdict.h +452 -0
  102. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/zstd.h +699 -214
  103. data/ext/{zstdlib/zstd-1.4.5/lib/common → zstdlib_c/zstd-1.5.2/lib}/zstd_errors.h +2 -1
  104. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzclose.c +0 -0
  105. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzcompatibility.h +1 -1
  106. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzguts.h +0 -0
  107. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzlib.c +0 -0
  108. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzread.c +0 -0
  109. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzwrite.c +0 -0
  110. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.c +133 -44
  111. data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.h +1 -1
  112. data/ext/zstdlib_c/zstd.mk +15 -0
  113. data/lib/2.4/zstdlib_c.so +0 -0
  114. data/lib/2.5/zstdlib_c.so +0 -0
  115. data/lib/2.6/zstdlib_c.so +0 -0
  116. data/lib/2.7/zstdlib_c.so +0 -0
  117. data/lib/3.0/zstdlib_c.so +0 -0
  118. data/lib/3.1/zstdlib_c.so +0 -0
  119. data/lib/zstdlib.rb +2 -2
  120. metadata +125 -116
  121. data/ext/zstdlib/zlib-1.2.11/crc32.c +0 -442
  122. data/ext/zstdlib/zlib-1.2.11/crc32.h +0 -441
  123. data/ext/zstdlib/zstd-1.4.5/lib/common/compiler.h +0 -175
  124. data/ext/zstdlib/zstd-1.4.5/lib/common/entropy_common.c +0 -216
  125. data/ext/zstdlib/zstd-1.4.5/lib/common/error_private.h +0 -80
  126. data/ext/zstdlib/zstd-1.4.5/lib/common/xxhash.c +0 -864
  127. data/ext/zstdlib/zstd-1.4.5/lib/common/xxhash.h +0 -285
  128. data/ext/zstdlib/zstd-1.4.5/lib/compress/huf_compress.c +0 -798
  129. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.c +0 -1138
  130. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.h +0 -67
  131. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstdmt_compress.h +0 -192
  132. data/ext/zstdlib/zstd.mk +0 -14
  133. data/lib/2.2/zstdlib.so +0 -0
  134. data/lib/2.3/zstdlib.so +0 -0
  135. data/lib/2.4/zstdlib.so +0 -0
  136. data/lib/2.5/zstdlib.so +0 -0
  137. data/lib/2.6/zstdlib.so +0 -0
  138. data/lib/2.7/zstdlib.so +0 -0
@@ -1,798 +0,0 @@
1
- /* ******************************************************************
2
- * Huffman encoder, part of New Generation Entropy library
3
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
4
- *
5
- * You can contact the author at :
6
- * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
7
- * - Public forum : https://groups.google.com/forum/#!forum/lz4c
8
- *
9
- * This source code is licensed under both the BSD-style license (found in the
10
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11
- * in the COPYING file in the root directory of this source tree).
12
- * You may select, at your option, one of the above-listed licenses.
13
- ****************************************************************** */
14
-
15
- /* **************************************************************
16
- * Compiler specifics
17
- ****************************************************************/
18
- #ifdef _MSC_VER /* Visual Studio */
19
- # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
20
- #endif
21
-
22
-
23
- /* **************************************************************
24
- * Includes
25
- ****************************************************************/
26
- #include <string.h> /* memcpy, memset */
27
- #include <stdio.h> /* printf (debug) */
28
- #include "../common/compiler.h"
29
- #include "../common/bitstream.h"
30
- #include "hist.h"
31
- #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
32
- #include "../common/fse.h" /* header compression */
33
- #define HUF_STATIC_LINKING_ONLY
34
- #include "../common/huf.h"
35
- #include "../common/error_private.h"
36
-
37
-
38
- /* **************************************************************
39
- * Error Management
40
- ****************************************************************/
41
- #define HUF_isError ERR_isError
42
- #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
43
-
44
-
45
- /* **************************************************************
46
- * Utils
47
- ****************************************************************/
48
- unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
49
- {
50
- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
51
- }
52
-
53
-
54
- /* *******************************************************
55
- * HUF : Huffman block compression
56
- *********************************************************/
57
- /* HUF_compressWeights() :
58
- * Same as FSE_compress(), but dedicated to huff0's weights compression.
59
- * The use case needs much less stack memory.
60
- * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
61
- */
62
- #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
63
- static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
64
- {
65
- BYTE* const ostart = (BYTE*) dst;
66
- BYTE* op = ostart;
67
- BYTE* const oend = ostart + dstSize;
68
-
69
- unsigned maxSymbolValue = HUF_TABLELOG_MAX;
70
- U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
71
-
72
- FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
73
- BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
74
-
75
- unsigned count[HUF_TABLELOG_MAX+1];
76
- S16 norm[HUF_TABLELOG_MAX+1];
77
-
78
- /* init conditions */
79
- if (wtSize <= 1) return 0; /* Not compressible */
80
-
81
- /* Scan input and build symbol stats */
82
- { unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize); /* never fails */
83
- if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
84
- if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
85
- }
86
-
87
- tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
88
- CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
89
-
90
- /* Write table description header */
91
- { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), norm, maxSymbolValue, tableLog) );
92
- op += hSize;
93
- }
94
-
95
- /* Compress */
96
- CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
97
- { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, CTable) );
98
- if (cSize == 0) return 0; /* not enough space for compressed data */
99
- op += cSize;
100
- }
101
-
102
- return (size_t)(op-ostart);
103
- }
104
-
105
-
106
- struct HUF_CElt_s {
107
- U16 val;
108
- BYTE nbBits;
109
- }; /* typedef'd to HUF_CElt within "huf.h" */
110
-
111
- /*! HUF_writeCTable() :
112
- `CTable` : Huffman tree to save, using huf representation.
113
- @return : size of saved CTable */
114
- size_t HUF_writeCTable (void* dst, size_t maxDstSize,
115
- const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
116
- {
117
- BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
118
- BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
119
- BYTE* op = (BYTE*)dst;
120
- U32 n;
121
-
122
- /* check conditions */
123
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
124
-
125
- /* convert to weight */
126
- bitsToWeight[0] = 0;
127
- for (n=1; n<huffLog+1; n++)
128
- bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
129
- for (n=0; n<maxSymbolValue; n++)
130
- huffWeight[n] = bitsToWeight[CTable[n].nbBits];
131
-
132
- /* attempt weights compression by FSE */
133
- { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
134
- if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
135
- op[0] = (BYTE)hSize;
136
- return hSize+1;
137
- } }
138
-
139
- /* write raw values as 4-bits (max : 15) */
140
- if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
141
- if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
142
- op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
143
- huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
144
- for (n=0; n<maxSymbolValue; n+=2)
145
- op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
146
- return ((maxSymbolValue+1)/2) + 1;
147
- }
148
-
149
-
150
- size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
151
- {
152
- BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
153
- U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
154
- U32 tableLog = 0;
155
- U32 nbSymbols = 0;
156
-
157
- /* get symbol weights */
158
- CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
159
-
160
- /* check result */
161
- if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
162
- if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
163
-
164
- /* Prepare base value per rank */
165
- { U32 n, nextRankStart = 0;
166
- for (n=1; n<=tableLog; n++) {
167
- U32 current = nextRankStart;
168
- nextRankStart += (rankVal[n] << (n-1));
169
- rankVal[n] = current;
170
- } }
171
-
172
- /* fill nbBits */
173
- *hasZeroWeights = 0;
174
- { U32 n; for (n=0; n<nbSymbols; n++) {
175
- const U32 w = huffWeight[n];
176
- *hasZeroWeights |= (w == 0);
177
- CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
178
- } }
179
-
180
- /* fill val */
181
- { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
182
- U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
183
- { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
184
- /* determine stating value per rank */
185
- valPerRank[tableLog+1] = 0; /* for w==0 */
186
- { U16 min = 0;
187
- U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
188
- valPerRank[n] = min; /* get starting value within each rank */
189
- min += nbPerRank[n];
190
- min >>= 1;
191
- } }
192
- /* assign value within rank, symbol order */
193
- { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
194
- }
195
-
196
- *maxSymbolValuePtr = nbSymbols - 1;
197
- return readSize;
198
- }
199
-
200
- U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
201
- {
202
- const HUF_CElt* table = (const HUF_CElt*)symbolTable;
203
- assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
204
- return table[symbolValue].nbBits;
205
- }
206
-
207
-
208
- typedef struct nodeElt_s {
209
- U32 count;
210
- U16 parent;
211
- BYTE byte;
212
- BYTE nbBits;
213
- } nodeElt;
214
-
215
- static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
216
- {
217
- const U32 largestBits = huffNode[lastNonNull].nbBits;
218
- if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
219
-
220
- /* there are several too large elements (at least >= 2) */
221
- { int totalCost = 0;
222
- const U32 baseCost = 1 << (largestBits - maxNbBits);
223
- int n = (int)lastNonNull;
224
-
225
- while (huffNode[n].nbBits > maxNbBits) {
226
- totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
227
- huffNode[n].nbBits = (BYTE)maxNbBits;
228
- n --;
229
- } /* n stops at huffNode[n].nbBits <= maxNbBits */
230
- while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
231
-
232
- /* renorm totalCost */
233
- totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
234
-
235
- /* repay normalized cost */
236
- { U32 const noSymbol = 0xF0F0F0F0;
237
- U32 rankLast[HUF_TABLELOG_MAX+2];
238
-
239
- /* Get pos of last (smallest) symbol per rank */
240
- memset(rankLast, 0xF0, sizeof(rankLast));
241
- { U32 currentNbBits = maxNbBits;
242
- int pos;
243
- for (pos=n ; pos >= 0; pos--) {
244
- if (huffNode[pos].nbBits >= currentNbBits) continue;
245
- currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
246
- rankLast[maxNbBits-currentNbBits] = (U32)pos;
247
- } }
248
-
249
- while (totalCost > 0) {
250
- U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
251
- for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
252
- U32 const highPos = rankLast[nBitsToDecrease];
253
- U32 const lowPos = rankLast[nBitsToDecrease-1];
254
- if (highPos == noSymbol) continue;
255
- if (lowPos == noSymbol) break;
256
- { U32 const highTotal = huffNode[highPos].count;
257
- U32 const lowTotal = 2 * huffNode[lowPos].count;
258
- if (highTotal <= lowTotal) break;
259
- } }
260
- /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
261
- /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
262
- while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
263
- nBitsToDecrease ++;
264
- totalCost -= 1 << (nBitsToDecrease-1);
265
- if (rankLast[nBitsToDecrease-1] == noSymbol)
266
- rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
267
- huffNode[rankLast[nBitsToDecrease]].nbBits ++;
268
- if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
269
- rankLast[nBitsToDecrease] = noSymbol;
270
- else {
271
- rankLast[nBitsToDecrease]--;
272
- if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
273
- rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
274
- } } /* while (totalCost > 0) */
275
-
276
- while (totalCost < 0) { /* Sometimes, cost correction overshoot */
277
- if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
278
- while (huffNode[n].nbBits == maxNbBits) n--;
279
- huffNode[n+1].nbBits--;
280
- assert(n >= 0);
281
- rankLast[1] = (U32)(n+1);
282
- totalCost++;
283
- continue;
284
- }
285
- huffNode[ rankLast[1] + 1 ].nbBits--;
286
- rankLast[1]++;
287
- totalCost ++;
288
- } } } /* there are several too large elements (at least >= 2) */
289
-
290
- return maxNbBits;
291
- }
292
-
293
- typedef struct {
294
- U32 base;
295
- U32 current;
296
- } rankPos;
297
-
298
- typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
299
-
300
- #define RANK_POSITION_TABLE_SIZE 32
301
-
302
- typedef struct {
303
- huffNodeTable huffNodeTbl;
304
- rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
305
- } HUF_buildCTable_wksp_tables;
306
-
307
- static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
308
- {
309
- U32 n;
310
-
311
- memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
312
- for (n=0; n<=maxSymbolValue; n++) {
313
- U32 r = BIT_highbit32(count[n] + 1);
314
- rankPosition[r].base ++;
315
- }
316
- for (n=30; n>0; n--) rankPosition[n-1].base += rankPosition[n].base;
317
- for (n=0; n<32; n++) rankPosition[n].current = rankPosition[n].base;
318
- for (n=0; n<=maxSymbolValue; n++) {
319
- U32 const c = count[n];
320
- U32 const r = BIT_highbit32(c+1) + 1;
321
- U32 pos = rankPosition[r].current++;
322
- while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
323
- huffNode[pos] = huffNode[pos-1];
324
- pos--;
325
- }
326
- huffNode[pos].count = c;
327
- huffNode[pos].byte = (BYTE)n;
328
- }
329
- }
330
-
331
-
332
- /** HUF_buildCTable_wksp() :
333
- * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
334
- * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
335
- */
336
- #define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
337
-
338
- size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
339
- {
340
- HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
341
- nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
342
- nodeElt* const huffNode = huffNode0+1;
343
- int nonNullRank;
344
- int lowS, lowN;
345
- int nodeNb = STARTNODE;
346
- int n, nodeRoot;
347
-
348
- /* safety checks */
349
- if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
350
- if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
351
- return ERROR(workSpace_tooSmall);
352
- if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
353
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
354
- return ERROR(maxSymbolValue_tooLarge);
355
- memset(huffNode0, 0, sizeof(huffNodeTable));
356
-
357
- /* sort, decreasing order */
358
- HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
359
-
360
- /* init for parents */
361
- nonNullRank = (int)maxSymbolValue;
362
- while(huffNode[nonNullRank].count == 0) nonNullRank--;
363
- lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
364
- huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
365
- huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
366
- nodeNb++; lowS-=2;
367
- for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
368
- huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
369
-
370
- /* create parents */
371
- while (nodeNb <= nodeRoot) {
372
- int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
373
- int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
374
- huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
375
- huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
376
- nodeNb++;
377
- }
378
-
379
- /* distribute weights (unlimited tree height) */
380
- huffNode[nodeRoot].nbBits = 0;
381
- for (n=nodeRoot-1; n>=STARTNODE; n--)
382
- huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
383
- for (n=0; n<=nonNullRank; n++)
384
- huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
385
-
386
- /* enforce maxTableLog */
387
- maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
388
-
389
- /* fill result into tree (val, nbBits) */
390
- { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
391
- U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
392
- int const alphabetSize = (int)(maxSymbolValue + 1);
393
- if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
394
- for (n=0; n<=nonNullRank; n++)
395
- nbPerRank[huffNode[n].nbBits]++;
396
- /* determine stating value per rank */
397
- { U16 min = 0;
398
- for (n=(int)maxNbBits; n>0; n--) {
399
- valPerRank[n] = min; /* get starting value within each rank */
400
- min += nbPerRank[n];
401
- min >>= 1;
402
- } }
403
- for (n=0; n<alphabetSize; n++)
404
- tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
405
- for (n=0; n<alphabetSize; n++)
406
- tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
407
- }
408
-
409
- return maxNbBits;
410
- }
411
-
412
- /** HUF_buildCTable() :
413
- * @return : maxNbBits
414
- * Note : count is used before tree is written, so they can safely overlap
415
- */
416
- size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
417
- {
418
- HUF_buildCTable_wksp_tables workspace;
419
- return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace));
420
- }
421
-
422
- size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
423
- {
424
- size_t nbBits = 0;
425
- int s;
426
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
427
- nbBits += CTable[s].nbBits * count[s];
428
- }
429
- return nbBits >> 3;
430
- }
431
-
432
- int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
433
- int bad = 0;
434
- int s;
435
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
436
- bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
437
- }
438
- return !bad;
439
- }
440
-
441
- size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
442
-
443
- FORCE_INLINE_TEMPLATE void
444
- HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
445
- {
446
- BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
447
- }
448
-
449
- #define HUF_FLUSHBITS(s) BIT_flushBits(s)
450
-
451
- #define HUF_FLUSHBITS_1(stream) \
452
- if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
453
-
454
- #define HUF_FLUSHBITS_2(stream) \
455
- if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
456
-
457
- FORCE_INLINE_TEMPLATE size_t
458
- HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
459
- const void* src, size_t srcSize,
460
- const HUF_CElt* CTable)
461
- {
462
- const BYTE* ip = (const BYTE*) src;
463
- BYTE* const ostart = (BYTE*)dst;
464
- BYTE* const oend = ostart + dstSize;
465
- BYTE* op = ostart;
466
- size_t n;
467
- BIT_CStream_t bitC;
468
-
469
- /* init */
470
- if (dstSize < 8) return 0; /* not enough space to compress */
471
- { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
472
- if (HUF_isError(initErr)) return 0; }
473
-
474
- n = srcSize & ~3; /* join to mod 4 */
475
- switch (srcSize & 3)
476
- {
477
- case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
478
- HUF_FLUSHBITS_2(&bitC);
479
- /* fall-through */
480
- case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
481
- HUF_FLUSHBITS_1(&bitC);
482
- /* fall-through */
483
- case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
484
- HUF_FLUSHBITS(&bitC);
485
- /* fall-through */
486
- case 0 : /* fall-through */
487
- default: break;
488
- }
489
-
490
- for (; n>0; n-=4) { /* note : n&3==0 at this stage */
491
- HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
492
- HUF_FLUSHBITS_1(&bitC);
493
- HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
494
- HUF_FLUSHBITS_2(&bitC);
495
- HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
496
- HUF_FLUSHBITS_1(&bitC);
497
- HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
498
- HUF_FLUSHBITS(&bitC);
499
- }
500
-
501
- return BIT_closeCStream(&bitC);
502
- }
503
-
504
- #if DYNAMIC_BMI2
505
-
506
- static TARGET_ATTRIBUTE("bmi2") size_t
507
- HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
508
- const void* src, size_t srcSize,
509
- const HUF_CElt* CTable)
510
- {
511
- return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
512
- }
513
-
514
- static size_t
515
- HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
516
- const void* src, size_t srcSize,
517
- const HUF_CElt* CTable)
518
- {
519
- return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
520
- }
521
-
522
- static size_t
523
- HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
524
- const void* src, size_t srcSize,
525
- const HUF_CElt* CTable, const int bmi2)
526
- {
527
- if (bmi2) {
528
- return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
529
- }
530
- return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
531
- }
532
-
533
- #else
534
-
535
- static size_t
536
- HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
537
- const void* src, size_t srcSize,
538
- const HUF_CElt* CTable, const int bmi2)
539
- {
540
- (void)bmi2;
541
- return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
542
- }
543
-
544
- #endif
545
-
546
- size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
547
- {
548
- return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
549
- }
550
-
551
-
552
- static size_t
553
- HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
554
- const void* src, size_t srcSize,
555
- const HUF_CElt* CTable, int bmi2)
556
- {
557
- size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
558
- const BYTE* ip = (const BYTE*) src;
559
- const BYTE* const iend = ip + srcSize;
560
- BYTE* const ostart = (BYTE*) dst;
561
- BYTE* const oend = ostart + dstSize;
562
- BYTE* op = ostart;
563
-
564
- if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
565
- if (srcSize < 12) return 0; /* no saving possible : too small input */
566
- op += 6; /* jumpTable */
567
-
568
- assert(op <= oend);
569
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
570
- if (cSize==0) return 0;
571
- assert(cSize <= 65535);
572
- MEM_writeLE16(ostart, (U16)cSize);
573
- op += cSize;
574
- }
575
-
576
- ip += segmentSize;
577
- assert(op <= oend);
578
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
579
- if (cSize==0) return 0;
580
- assert(cSize <= 65535);
581
- MEM_writeLE16(ostart+2, (U16)cSize);
582
- op += cSize;
583
- }
584
-
585
- ip += segmentSize;
586
- assert(op <= oend);
587
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
588
- if (cSize==0) return 0;
589
- assert(cSize <= 65535);
590
- MEM_writeLE16(ostart+4, (U16)cSize);
591
- op += cSize;
592
- }
593
-
594
- ip += segmentSize;
595
- assert(op <= oend);
596
- assert(ip <= iend);
597
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
598
- if (cSize==0) return 0;
599
- op += cSize;
600
- }
601
-
602
- return (size_t)(op-ostart);
603
- }
604
-
605
- size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
606
- {
607
- return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
608
- }
609
-
610
- typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
611
-
612
- static size_t HUF_compressCTable_internal(
613
- BYTE* const ostart, BYTE* op, BYTE* const oend,
614
- const void* src, size_t srcSize,
615
- HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
616
- {
617
- size_t const cSize = (nbStreams==HUF_singleStream) ?
618
- HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
619
- HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
620
- if (HUF_isError(cSize)) { return cSize; }
621
- if (cSize==0) { return 0; } /* uncompressible */
622
- op += cSize;
623
- /* check compressibility */
624
- assert(op >= ostart);
625
- if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
626
- return (size_t)(op-ostart);
627
- }
628
-
629
- typedef struct {
630
- unsigned count[HUF_SYMBOLVALUE_MAX + 1];
631
- HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
632
- HUF_buildCTable_wksp_tables buildCTable_wksp;
633
- } HUF_compress_tables_t;
634
-
635
- /* HUF_compress_internal() :
636
- * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
637
- static size_t
638
- HUF_compress_internal (void* dst, size_t dstSize,
639
- const void* src, size_t srcSize,
640
- unsigned maxSymbolValue, unsigned huffLog,
641
- HUF_nbStreams_e nbStreams,
642
- void* workSpace, size_t wkspSize,
643
- HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
644
- const int bmi2)
645
- {
646
- HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
647
- BYTE* const ostart = (BYTE*)dst;
648
- BYTE* const oend = ostart + dstSize;
649
- BYTE* op = ostart;
650
-
651
- HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
652
-
653
- /* checks & inits */
654
- if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
655
- if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
656
- if (!srcSize) return 0; /* Uncompressed */
657
- if (!dstSize) return 0; /* cannot fit anything within dst budget */
658
- if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
659
- if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
660
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
661
- if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
662
- if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
663
-
664
- /* Heuristic : If old table is valid, use it for small inputs */
665
- if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
666
- return HUF_compressCTable_internal(ostart, op, oend,
667
- src, srcSize,
668
- nbStreams, oldHufTable, bmi2);
669
- }
670
-
671
- /* Scan input and build symbol stats */
672
- { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );
673
- if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
674
- if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
675
- }
676
-
677
- /* Check validity of previous table */
678
- if ( repeat
679
- && *repeat == HUF_repeat_check
680
- && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
681
- *repeat = HUF_repeat_none;
682
- }
683
- /* Heuristic : use existing table for small inputs */
684
- if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
685
- return HUF_compressCTable_internal(ostart, op, oend,
686
- src, srcSize,
687
- nbStreams, oldHufTable, bmi2);
688
- }
689
-
690
- /* Build Huffman Tree */
691
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
692
- { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
693
- maxSymbolValue, huffLog,
694
- &table->buildCTable_wksp, sizeof(table->buildCTable_wksp));
695
- CHECK_F(maxBits);
696
- huffLog = (U32)maxBits;
697
- /* Zero unused symbols in CTable, so we can check it for validity */
698
- memset(table->CTable + (maxSymbolValue + 1), 0,
699
- sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
700
- }
701
-
702
- /* Write table description header */
703
- { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );
704
- /* Check if using previous huffman table is beneficial */
705
- if (repeat && *repeat != HUF_repeat_none) {
706
- size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
707
- size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
708
- if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
709
- return HUF_compressCTable_internal(ostart, op, oend,
710
- src, srcSize,
711
- nbStreams, oldHufTable, bmi2);
712
- } }
713
-
714
- /* Use the new huffman table */
715
- if (hSize + 12ul >= srcSize) { return 0; }
716
- op += hSize;
717
- if (repeat) { *repeat = HUF_repeat_none; }
718
- if (oldHufTable)
719
- memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
720
- }
721
- return HUF_compressCTable_internal(ostart, op, oend,
722
- src, srcSize,
723
- nbStreams, table->CTable, bmi2);
724
- }
725
-
726
-
727
- size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
728
- const void* src, size_t srcSize,
729
- unsigned maxSymbolValue, unsigned huffLog,
730
- void* workSpace, size_t wkspSize)
731
- {
732
- return HUF_compress_internal(dst, dstSize, src, srcSize,
733
- maxSymbolValue, huffLog, HUF_singleStream,
734
- workSpace, wkspSize,
735
- NULL, NULL, 0, 0 /*bmi2*/);
736
- }
737
-
738
- size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
739
- const void* src, size_t srcSize,
740
- unsigned maxSymbolValue, unsigned huffLog,
741
- void* workSpace, size_t wkspSize,
742
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
743
- {
744
- return HUF_compress_internal(dst, dstSize, src, srcSize,
745
- maxSymbolValue, huffLog, HUF_singleStream,
746
- workSpace, wkspSize, hufTable,
747
- repeat, preferRepeat, bmi2);
748
- }
749
-
750
- size_t HUF_compress1X (void* dst, size_t dstSize,
751
- const void* src, size_t srcSize,
752
- unsigned maxSymbolValue, unsigned huffLog)
753
- {
754
- unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
755
- return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
756
- }
757
-
758
- /* HUF_compress4X_repeat():
759
- * compress input using 4 streams.
760
- * provide workspace to generate compression tables */
761
- size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
762
- const void* src, size_t srcSize,
763
- unsigned maxSymbolValue, unsigned huffLog,
764
- void* workSpace, size_t wkspSize)
765
- {
766
- return HUF_compress_internal(dst, dstSize, src, srcSize,
767
- maxSymbolValue, huffLog, HUF_fourStreams,
768
- workSpace, wkspSize,
769
- NULL, NULL, 0, 0 /*bmi2*/);
770
- }
771
-
772
- /* HUF_compress4X_repeat():
773
- * compress input using 4 streams.
774
- * re-use an existing huffman compression table */
775
- size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
776
- const void* src, size_t srcSize,
777
- unsigned maxSymbolValue, unsigned huffLog,
778
- void* workSpace, size_t wkspSize,
779
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
780
- {
781
- return HUF_compress_internal(dst, dstSize, src, srcSize,
782
- maxSymbolValue, huffLog, HUF_fourStreams,
783
- workSpace, wkspSize,
784
- hufTable, repeat, preferRepeat, bmi2);
785
- }
786
-
787
- size_t HUF_compress2 (void* dst, size_t dstSize,
788
- const void* src, size_t srcSize,
789
- unsigned maxSymbolValue, unsigned huffLog)
790
- {
791
- unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
792
- return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
793
- }
794
-
795
- size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
796
- {
797
- return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
798
- }