zstdlib 0.3.0-x64-mingw32 → 0.4.0-x64-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +7 -0
  3. data/README.md +1 -1
  4. data/Rakefile +1 -1
  5. data/ext/zstdlib/extconf.rb +1 -1
  6. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/bitstream.h +4 -0
  7. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/compiler.h +3 -3
  8. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/cpu.h +0 -0
  9. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/debug.c +0 -0
  10. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/debug.h +0 -0
  11. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/entropy_common.c +0 -0
  12. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/error_private.c +0 -0
  13. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/error_private.h +0 -0
  14. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/fse.h +0 -0
  15. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/fse_decompress.c +0 -0
  16. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/huf.h +0 -0
  17. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/mem.h +1 -1
  18. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/pool.c +0 -0
  19. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/pool.h +0 -0
  20. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/threading.c +0 -0
  21. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/threading.h +0 -0
  22. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/xxhash.c +8 -2
  23. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/xxhash.h +0 -0
  24. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/zstd_common.c +0 -0
  25. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/zstd_errors.h +0 -0
  26. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/common/zstd_internal.h +5 -3
  27. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/fse_compress.c +0 -0
  28. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/hist.c +0 -0
  29. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/hist.h +0 -0
  30. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/huf_compress.c +0 -0
  31. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_compress.c +14 -12
  32. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_compress_internal.h +45 -15
  33. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_compress_literals.c +0 -0
  34. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_compress_literals.h +0 -0
  35. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_compress_sequences.c +0 -0
  36. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_compress_sequences.h +0 -0
  37. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_double_fast.c +2 -3
  38. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_double_fast.h +0 -0
  39. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_fast.c +10 -8
  40. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_fast.h +0 -0
  41. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_lazy.c +30 -26
  42. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_lazy.h +0 -0
  43. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_ldm.c +0 -0
  44. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_ldm.h +0 -0
  45. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_opt.c +5 -5
  46. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstd_opt.h +0 -0
  47. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstdmt_compress.c +0 -0
  48. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/compress/zstdmt_compress.h +0 -0
  49. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/decompress/huf_decompress.c +0 -0
  50. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/decompress/zstd_ddict.c +0 -0
  51. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/decompress/zstd_ddict.h +0 -0
  52. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/decompress/zstd_decompress.c +2 -1
  53. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/decompress/zstd_decompress_block.c +5 -2
  54. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/decompress/zstd_decompress_block.h +0 -0
  55. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/decompress/zstd_decompress_internal.h +0 -0
  56. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/lib/zstd.h +10 -8
  57. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/zlibWrapper/gzclose.c +0 -0
  58. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/zlibWrapper/gzcompatibility.h +0 -0
  59. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/zlibWrapper/gzguts.h +0 -0
  60. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/zlibWrapper/gzlib.c +0 -0
  61. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/zlibWrapper/gzread.c +0 -0
  62. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/zlibWrapper/gzwrite.c +0 -0
  63. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/zlibWrapper/zstd_zlibwrapper.c +0 -0
  64. data/ext/zstdlib/{zstd-1.4.2 → zstd-1.4.3}/zlibWrapper/zstd_zlibwrapper.h +0 -0
  65. data/lib/2.2/zstdlib.so +0 -0
  66. data/lib/2.3/zstdlib.so +0 -0
  67. data/lib/2.4/zstdlib.so +0 -0
  68. data/lib/2.5/zstdlib.so +0 -0
  69. data/lib/2.6/zstdlib.so +0 -0
  70. metadata +62 -62
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e0dd179cd1e8551f04c912d0fac05cdbc046cbd6509af74db0f29378afe267be
4
- data.tar.gz: 24964962448bae96154b6f2bd68fd7c956d4d0dad0c4b386faf898acfd598574
3
+ metadata.gz: 7961596fedcd960dbafb1f54c2979cb65506619273a6e9a93d2e96165d2f40fb
4
+ data.tar.gz: 5ebf420761484d6c0dfd87be293e1cce9613e57212abbd17e1806a9a4bfe49a6
5
5
  SHA512:
6
- metadata.gz: d9166fe5f597cd9a0d495d71c06942e16b7c4ec653e579e5d3298c0b050618d6dd1714b1b93a58c13e6deaa05613d7dec4096930e3c43c6c828a7fdafda035d5
7
- data.tar.gz: 3fbf8fa4ecf05dedd34c74f5230afdff3c357772690a3ddcf83af6687e23e5d4e1f79d9ebf3c3d66f98dbd8594482ee93d677e434e34aa9717d89d9a1efeae81
6
+ metadata.gz: 0bf6c0688c63e2521ae8d083744b7a91c19bdee700b38a4aa01b4463b99e120dd54fd034ae2cbe6226516c4cd32e29dc79aab811fe6b9f3f540f5296c995f77d
7
+ data.tar.gz: 4b09a7b3a8bc574e067cf087ed20e62bae82328638c69e52c92b819fc8f210bd92c7a90a85befd70f250c187e45b49849677525239b562af818c67e0cbfffe85
data/CHANGES.md CHANGED
@@ -1,3 +1,10 @@
1
+ # 0.4.0
2
+
3
+ Zstd version update to `1.4.3`
4
+
5
+ Relocated to GitHub.
6
+
7
+
1
8
  # 0.3.0
2
9
 
3
10
  Zstd version update to `1.4.2`
data/README.md CHANGED
@@ -75,7 +75,7 @@ The `BEST_SPEED` constant remains unchanged.
75
75
 
76
76
  ## Availability
77
77
 
78
- _zstdlib_ home page on [bitbucket.org](https://bitbucket.org/fougas/zstdlib).
78
+ _zstdlib_ home page on [GitHub](https://github.com/okhlybov/zstdlib).
79
79
 
80
80
  Source code and Windows-specific multi-versioned binary gems can be obtained from [rubygems.org](https://rubygems.org/gems/zstdlib).
81
81
 
data/Rakefile CHANGED
@@ -25,5 +25,5 @@ end
25
25
  task :fat do
26
26
  require 'rake_compiler_dock'
27
27
  sh 'bundle package'
28
- RakeCompilerDock.sh 'bundle --local && rake cross native gem'
28
+ RakeCompilerDock.sh 'gem install bundle && bundle --local && rake cross native gem'
29
29
  end
@@ -6,7 +6,7 @@ require 'fileutils'
6
6
  include RbConfig
7
7
  include FileUtils
8
8
 
9
- ZSTD_VERSION = '1.4.2'
9
+ ZSTD_VERSION = '1.4.3'
10
10
  ZLIB_VERSION = '1.2.11'
11
11
  RB_VERSION = CONFIG['MAJOR']+'.'+CONFIG['MINOR']
12
12
  ZMOD_VERSION = RB_VERSION >= '2.3' ? '2.6' : RB_VERSION # Review requirements with every new zlib module release!
@@ -57,6 +57,8 @@ extern "C" {
57
57
  =========================================*/
58
58
  #if defined(__BMI__) && defined(__GNUC__)
59
59
  # include <immintrin.h> /* support for bextr (experimental) */
60
+ #elif defined(__ICCARM__)
61
+ # include <intrinsics.h>
60
62
  #endif
61
63
 
62
64
  #define STREAM_ACCUMULATOR_MIN_32 25
@@ -163,6 +165,8 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
163
165
  return (unsigned) r;
164
166
  # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
165
167
  return 31 - __builtin_clz (val);
168
+ # elif defined(__ICCARM__) /* IAR Intrinsic */
169
+ return 31 - __CLZ(val);
166
170
  # else /* Software version */
167
171
  static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
168
172
  11, 14, 16, 18, 22, 25, 3, 30,
@@ -23,7 +23,7 @@
23
23
  # define INLINE_KEYWORD
24
24
  #endif
25
25
 
26
- #if defined(__GNUC__)
26
+ #if defined(__GNUC__) || defined(__ICCARM__)
27
27
  # define FORCE_INLINE_ATTR __attribute__((always_inline))
28
28
  #elif defined(_MSC_VER)
29
29
  # define FORCE_INLINE_ATTR __forceinline
@@ -65,7 +65,7 @@
65
65
  #ifdef _MSC_VER
66
66
  # define FORCE_NOINLINE static __declspec(noinline)
67
67
  #else
68
- # ifdef __GNUC__
68
+ # if defined(__GNUC__) || defined(__ICCARM__)
69
69
  # define FORCE_NOINLINE static __attribute__((__noinline__))
70
70
  # else
71
71
  # define FORCE_NOINLINE static
@@ -76,7 +76,7 @@
76
76
  #ifndef __has_attribute
77
77
  #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
78
78
  #endif
79
- #if defined(__GNUC__)
79
+ #if defined(__GNUC__) || defined(__ICCARM__)
80
80
  # define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
81
81
  #else
82
82
  # define TARGET_ATTRIBUTE(target)
@@ -102,7 +102,7 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
102
102
  #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
103
103
  # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
104
104
  # define MEM_FORCE_MEMORY_ACCESS 2
105
- # elif defined(__INTEL_COMPILER) || defined(__GNUC__)
105
+ # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
106
106
  # define MEM_FORCE_MEMORY_ACCESS 1
107
107
  # endif
108
108
  #endif
@@ -53,7 +53,8 @@
53
53
  # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
54
54
  # define XXH_FORCE_MEMORY_ACCESS 2
55
55
  # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
56
- (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
56
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
57
+ defined(__ICCARM__)
57
58
  # define XXH_FORCE_MEMORY_ACCESS 1
58
59
  # endif
59
60
  #endif
@@ -120,7 +121,7 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
120
121
  # define INLINE_KEYWORD
121
122
  #endif
122
123
 
123
- #if defined(__GNUC__)
124
+ #if defined(__GNUC__) || defined(__ICCARM__)
124
125
  # define FORCE_INLINE_ATTR __attribute__((always_inline))
125
126
  #elif defined(_MSC_VER)
126
127
  # define FORCE_INLINE_ATTR __forceinline
@@ -206,7 +207,12 @@ static U64 XXH_read64(const void* memPtr)
206
207
  # define XXH_rotl32(x,r) _rotl(x,r)
207
208
  # define XXH_rotl64(x,r) _rotl64(x,r)
208
209
  #else
210
+ #if defined(__ICCARM__)
211
+ # include <intrinsics.h>
212
+ # define XXH_rotl32(x,r) __ROR(x,(32 - r))
213
+ #else
209
214
  # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
215
+ #endif
210
216
  # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
211
217
  #endif
212
218
 
@@ -56,9 +56,9 @@ extern "C" {
56
56
  /**
57
57
  * Return the specified error if the condition evaluates to true.
58
58
  *
59
- * In debug modes, prints additional information. In order to do that
60
- * (particularly, printing the conditional that failed), this can't just wrap
61
- * RETURN_ERROR().
59
+ * In debug modes, prints additional information.
60
+ * In order to do that (particularly, printing the conditional that failed),
61
+ * this can't just wrap RETURN_ERROR().
62
62
  */
63
63
  #define RETURN_ERROR_IF(cond, err, ...) \
64
64
  if (cond) { \
@@ -324,6 +324,8 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
324
324
  return (unsigned)r;
325
325
  # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
326
326
  return 31 - __builtin_clz(val);
327
+ # elif defined(__ICCARM__) /* IAR Intrinsic */
328
+ return 31 - __CLZ(val);
327
329
  # else /* Software version */
328
330
  static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
329
331
  U32 v = val;
@@ -1955,7 +1955,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
1955
1955
  BYTE* const ostart = (BYTE*)dst;
1956
1956
  BYTE* const oend = ostart + dstCapacity;
1957
1957
  BYTE* op = ostart;
1958
- size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
1958
+ size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
1959
1959
  BYTE* seqHead;
1960
1960
  BYTE* lastNCount = NULL;
1961
1961
 
@@ -1964,7 +1964,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
1964
1964
 
1965
1965
  /* Compress literals */
1966
1966
  { const BYTE* const literals = seqStorePtr->litStart;
1967
- size_t const litSize = seqStorePtr->lit - literals;
1967
+ size_t const litSize = (size_t)(seqStorePtr->lit - literals);
1968
1968
  size_t const cSize = ZSTD_compressLiterals(
1969
1969
  &prevEntropy->huf, &nextEntropy->huf,
1970
1970
  cctxParams->cParams.strategy,
@@ -1991,7 +1991,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
1991
1991
  if (nbSeq==0) {
1992
1992
  /* Copy the old tables over as if we repeated them */
1993
1993
  memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
1994
- return op - ostart;
1994
+ return (size_t)(op - ostart);
1995
1995
  }
1996
1996
 
1997
1997
  /* seqHead : flags for FSE encoding type */
@@ -2012,7 +2012,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2012
2012
  ZSTD_defaultAllowed, strategy);
2013
2013
  assert(set_basic < set_compressed && set_rle < set_compressed);
2014
2014
  assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2015
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
2015
+ { size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
2016
2016
  count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
2017
2017
  prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
2018
2018
  workspace, wkspSize);
@@ -2035,7 +2035,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2035
2035
  OF_defaultNorm, OF_defaultNormLog,
2036
2036
  defaultPolicy, strategy);
2037
2037
  assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2038
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
2038
+ { size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
2039
2039
  count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
2040
2040
  prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
2041
2041
  workspace, wkspSize);
@@ -2056,7 +2056,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2056
2056
  ML_defaultNorm, ML_defaultNormLog,
2057
2057
  ZSTD_defaultAllowed, strategy);
2058
2058
  assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2059
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
2059
+ { size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
2060
2060
  count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
2061
2061
  prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
2062
2062
  workspace, wkspSize);
@@ -2070,7 +2070,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2070
2070
  *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
2071
2071
 
2072
2072
  { size_t const bitstreamSize = ZSTD_encodeSequences(
2073
- op, oend - op,
2073
+ op, (size_t)(oend - op),
2074
2074
  CTable_MatchLength, mlCodeTable,
2075
2075
  CTable_OffsetBits, ofCodeTable,
2076
2076
  CTable_LitLength, llCodeTable,
@@ -2097,7 +2097,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2097
2097
  }
2098
2098
 
2099
2099
  DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
2100
- return op - ostart;
2100
+ return (size_t)(op - ostart);
2101
2101
  }
2102
2102
 
2103
2103
  MEM_STATIC size_t
@@ -2270,7 +2270,8 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
2270
2270
  {
2271
2271
  size_t cSize;
2272
2272
  DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
2273
- (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate);
2273
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
2274
+ (unsigned)zc->blockState.matchState.nextToUpdate);
2274
2275
 
2275
2276
  { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
2276
2277
  FORWARD_IF_ERROR(bss);
@@ -2538,8 +2539,9 @@ size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
2538
2539
 
2539
2540
  size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2540
2541
  {
2541
- size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
2542
- RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
2542
+ DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
2543
+ { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
2544
+ RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong); }
2543
2545
 
2544
2546
  return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
2545
2547
  }
@@ -2564,7 +2566,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
2564
2566
  if (srcSize <= HASH_READ_SIZE) return 0;
2565
2567
 
2566
2568
  while (iend - ip > HASH_READ_SIZE) {
2567
- size_t const remaining = iend - ip;
2569
+ size_t const remaining = (size_t)(iend - ip);
2568
2570
  size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
2569
2571
  const BYTE* const ichunk = ip + chunk;
2570
2572
 
@@ -134,9 +134,15 @@ typedef struct {
134
134
  typedef struct ZSTD_matchState_t ZSTD_matchState_t;
135
135
  struct ZSTD_matchState_t {
136
136
  ZSTD_window_t window; /* State for window round buffer management */
137
- U32 loadedDictEnd; /* index of end of dictionary, within context's referential. When dict referential is copied into active context (i.e. not attached), effectively same value as dictSize, since referential starts from zero */
137
+ U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
138
+ * When loadedDictEnd != 0, a dictionary is in use, and still valid.
139
+ * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
140
+ * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
141
+ * When dict referential is copied into active context (i.e. not attached),
142
+ * loadedDictEnd == dictSize, since referential starts from zero.
143
+ */
138
144
  U32 nextToUpdate; /* index from which to continue table update */
139
- U32 hashLog3; /* dispatch table : larger == faster, more memory */
145
+ U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
140
146
  U32* hashTable;
141
147
  U32* hashTable3;
142
148
  U32* chainTable;
@@ -350,7 +356,7 @@ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const v
350
356
  /* copy Literals */
351
357
  assert(seqStorePtr->maxNbLit <= 128 KB);
352
358
  assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
353
- ZSTD_wildcopy(seqStorePtr->lit, literals, litLength, ZSTD_no_overlap);
359
+ ZSTD_wildcopy(seqStorePtr->lit, literals, (ptrdiff_t)litLength, ZSTD_no_overlap);
354
360
  seqStorePtr->lit += litLength;
355
361
 
356
362
  /* literal Length */
@@ -763,24 +769,37 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
763
769
 
764
770
  /* Similar to ZSTD_window_enforceMaxDist(),
765
771
  * but only invalidates dictionary
766
- * when input progresses beyond window size. */
772
+ * when input progresses beyond window size.
773
+ * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
774
+ * loadedDictEnd uses same referential as window->base
775
+ * maxDist is the window size */
767
776
  MEM_STATIC void
768
- ZSTD_checkDictValidity(ZSTD_window_t* window,
777
+ ZSTD_checkDictValidity(const ZSTD_window_t* window,
769
778
  const void* blockEnd,
770
779
  U32 maxDist,
771
780
  U32* loadedDictEndPtr,
772
781
  const ZSTD_matchState_t** dictMatchStatePtr)
773
782
  {
774
- U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
775
- U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
776
- DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
777
- (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
778
-
779
- if (loadedDictEnd && (blockEndIdx > maxDist + loadedDictEnd)) {
780
- /* On reaching window size, dictionaries are invalidated */
781
- if (loadedDictEndPtr) *loadedDictEndPtr = 0;
782
- if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
783
- }
783
+ assert(loadedDictEndPtr != NULL);
784
+ assert(dictMatchStatePtr != NULL);
785
+ { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
786
+ U32 const loadedDictEnd = *loadedDictEndPtr;
787
+ DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
788
+ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
789
+ assert(blockEndIdx >= loadedDictEnd);
790
+
791
+ if (blockEndIdx > loadedDictEnd + maxDist) {
792
+ /* On reaching window size, dictionaries are invalidated.
793
+ * For simplification, if window size is reached anywhere within next block,
794
+ * the dictionary is invalidated for the full block.
795
+ */
796
+ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
797
+ *loadedDictEndPtr = 0;
798
+ *dictMatchStatePtr = NULL;
799
+ } else {
800
+ if (*loadedDictEndPtr != 0) {
801
+ DEBUGLOG(6, "dictionary considered valid for current block");
802
+ } } }
784
803
  }
785
804
 
786
805
  /**
@@ -822,6 +841,17 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
822
841
  return contiguous;
823
842
  }
824
843
 
844
+ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
845
+ {
846
+ U32 const maxDistance = 1U << windowLog;
847
+ U32 const lowestValid = ms->window.lowLimit;
848
+ U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
849
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
850
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
851
+ return matchLowest;
852
+ }
853
+
854
+
825
855
 
826
856
  /* debug functions */
827
857
  #if (DEBUGLEVEL>=2)
@@ -65,6 +65,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
65
65
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
66
66
  const U32 lowestValid = ms->window.dictLimit;
67
67
  const U32 maxDistance = 1U << cParams->windowLog;
68
+ /* presumes that, if there is a dictionary, it must be using Attach mode */
68
69
  const U32 prefixLowestIndex = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
69
70
  const BYTE* const prefixLowest = base + prefixLowestIndex;
70
71
  const BYTE* const iend = istart + srcSize;
@@ -369,9 +370,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
369
370
  const BYTE* const ilimit = iend - 8;
370
371
  const BYTE* const base = ms->window.base;
371
372
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
372
- const U32 maxDistance = 1U << cParams->windowLog;
373
- const U32 lowestValid = ms->window.lowLimit;
374
- const U32 lowLimit = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
373
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
375
374
  const U32 dictStartIndex = lowLimit;
376
375
  const U32 dictLimit = ms->window.dictLimit;
377
376
  const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
@@ -71,6 +71,7 @@ size_t ZSTD_compressBlock_fast_generic(
71
71
  U32 offsetSaved = 0;
72
72
 
73
73
  /* init */
74
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
74
75
  ip0 += (ip0 == prefixStart);
75
76
  ip1 = ip0 + 1;
76
77
  {
@@ -239,6 +240,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
239
240
  assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
240
241
 
241
242
  /* init */
243
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
242
244
  ip += (dictAndPrefixLength == 0);
243
245
  /* dictMatchState repCode checks don't currently handle repCode == 0
244
246
  * disabling. */
@@ -379,9 +381,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
379
381
  const BYTE* ip = istart;
380
382
  const BYTE* anchor = istart;
381
383
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
382
- const U32 maxDistance = 1U << cParams->windowLog;
383
- const U32 validLow = ms->window.lowLimit;
384
- const U32 lowLimit = (endIndex - validLow > maxDistance) ? endIndex - maxDistance : validLow;
384
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
385
385
  const U32 dictStartIndex = lowLimit;
386
386
  const BYTE* const dictStart = dictBase + dictStartIndex;
387
387
  const U32 dictLimit = ms->window.dictLimit;
@@ -392,6 +392,8 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
392
392
  const BYTE* const ilimit = iend - 8;
393
393
  U32 offset_1=rep[0], offset_2=rep[1];
394
394
 
395
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic");
396
+
395
397
  /* switch to "regular" variant if extDict is invalidated due to maxDistance */
396
398
  if (prefixStartIndex == dictStartIndex)
397
399
  return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
@@ -412,8 +414,8 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
412
414
 
413
415
  if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
414
416
  && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
415
- const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
416
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
417
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
418
+ mLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
417
419
  ip++;
418
420
  ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
419
421
  } else {
@@ -423,8 +425,8 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
423
425
  ip += ((ip-anchor) >> kSearchStrength) + stepSize;
424
426
  continue;
425
427
  }
426
- { const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
427
- const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
428
+ { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
429
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
428
430
  U32 offset;
429
431
  mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
430
432
  while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
@@ -451,7 +453,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
451
453
  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
452
454
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
453
455
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
454
- U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
456
+ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
455
457
  ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
456
458
  hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
457
459
  ip += repLength2;
@@ -242,9 +242,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
242
242
 
243
243
  const BYTE* const base = ms->window.base;
244
244
  U32 const current = (U32)(ip-base);
245
- U32 const maxDistance = 1U << cParams->windowLog;
246
- U32 const windowValid = ms->window.lowLimit;
247
- U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
245
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
248
246
 
249
247
  U32* const bt = ms->chainTable;
250
248
  U32 const btLog = cParams->chainLog - 1;
@@ -497,8 +495,10 @@ size_t ZSTD_HcFindBestMatch_generic (
497
495
  const BYTE* const dictEnd = dictBase + dictLimit;
498
496
  const U32 current = (U32)(ip-base);
499
497
  const U32 maxDistance = 1U << cParams->windowLog;
500
- const U32 lowValid = ms->window.lowLimit;
501
- const U32 lowLimit = (current - lowValid > maxDistance) ? current - maxDistance : lowValid;
498
+ const U32 lowestValid = ms->window.lowLimit;
499
+ const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
500
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
501
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
502
502
  const U32 minChain = current > chainSize ? current - chainSize : 0;
503
503
  U32 nbAttempts = 1U << cParams->searchLog;
504
504
  size_t ml=4-1;
@@ -619,12 +619,14 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
619
619
  /* *******************************
620
620
  * Common parser - lazy strategy
621
621
  *********************************/
622
- FORCE_INLINE_TEMPLATE
623
- size_t ZSTD_compressBlock_lazy_generic(
622
+ typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
623
+
624
+ FORCE_INLINE_TEMPLATE size_t
625
+ ZSTD_compressBlock_lazy_generic(
624
626
  ZSTD_matchState_t* ms, seqStore_t* seqStore,
625
627
  U32 rep[ZSTD_REP_NUM],
626
628
  const void* src, size_t srcSize,
627
- const U32 searchMethod, const U32 depth,
629
+ const searchMethod_e searchMethod, const U32 depth,
628
630
  ZSTD_dictMode_e const dictMode)
629
631
  {
630
632
  const BYTE* const istart = (const BYTE*)src;
@@ -640,8 +642,10 @@ size_t ZSTD_compressBlock_lazy_generic(
640
642
  ZSTD_matchState_t* ms,
641
643
  const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
642
644
  searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
643
- (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
644
- (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
645
+ (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS
646
+ : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
647
+ (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS
648
+ : ZSTD_HcFindBestMatch_selectMLS);
645
649
  U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
646
650
 
647
651
  const ZSTD_matchState_t* const dms = ms->dictMatchState;
@@ -850,7 +854,7 @@ _storeSequence:
850
854
  rep[1] = offset_2 ? offset_2 : savedOffset;
851
855
 
852
856
  /* Return the last literals size */
853
- return iend - anchor;
857
+ return (size_t)(iend - anchor);
854
858
  }
855
859
 
856
860
 
@@ -858,56 +862,56 @@ size_t ZSTD_compressBlock_btlazy2(
858
862
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
859
863
  void const* src, size_t srcSize)
860
864
  {
861
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
865
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
862
866
  }
863
867
 
864
868
  size_t ZSTD_compressBlock_lazy2(
865
869
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
866
870
  void const* src, size_t srcSize)
867
871
  {
868
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
872
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
869
873
  }
870
874
 
871
875
  size_t ZSTD_compressBlock_lazy(
872
876
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
873
877
  void const* src, size_t srcSize)
874
878
  {
875
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
879
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
876
880
  }
877
881
 
878
882
  size_t ZSTD_compressBlock_greedy(
879
883
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
880
884
  void const* src, size_t srcSize)
881
885
  {
882
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
886
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
883
887
  }
884
888
 
885
889
  size_t ZSTD_compressBlock_btlazy2_dictMatchState(
886
890
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
887
891
  void const* src, size_t srcSize)
888
892
  {
889
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
893
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
890
894
  }
891
895
 
892
896
  size_t ZSTD_compressBlock_lazy2_dictMatchState(
893
897
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
894
898
  void const* src, size_t srcSize)
895
899
  {
896
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
900
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
897
901
  }
898
902
 
899
903
  size_t ZSTD_compressBlock_lazy_dictMatchState(
900
904
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
901
905
  void const* src, size_t srcSize)
902
906
  {
903
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
907
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
904
908
  }
905
909
 
906
910
  size_t ZSTD_compressBlock_greedy_dictMatchState(
907
911
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
908
912
  void const* src, size_t srcSize)
909
913
  {
910
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
914
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
911
915
  }
912
916
 
913
917
 
@@ -916,7 +920,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
916
920
  ZSTD_matchState_t* ms, seqStore_t* seqStore,
917
921
  U32 rep[ZSTD_REP_NUM],
918
922
  const void* src, size_t srcSize,
919
- const U32 searchMethod, const U32 depth)
923
+ const searchMethod_e searchMethod, const U32 depth)
920
924
  {
921
925
  const BYTE* const istart = (const BYTE*)src;
922
926
  const BYTE* ip = istart;
@@ -934,7 +938,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
934
938
  typedef size_t (*searchMax_f)(
935
939
  ZSTD_matchState_t* ms,
936
940
  const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
937
- searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
941
+ searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
938
942
 
939
943
  U32 offset_1 = rep[0], offset_2 = rep[1];
940
944
 
@@ -1075,7 +1079,7 @@ _storeSequence:
1075
1079
  rep[1] = offset_2;
1076
1080
 
1077
1081
  /* Return the last literals size */
1078
- return iend - anchor;
1082
+ return (size_t)(iend - anchor);
1079
1083
  }
1080
1084
 
1081
1085
 
@@ -1083,7 +1087,7 @@ size_t ZSTD_compressBlock_greedy_extDict(
1083
1087
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1084
1088
  void const* src, size_t srcSize)
1085
1089
  {
1086
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
1090
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
1087
1091
  }
1088
1092
 
1089
1093
  size_t ZSTD_compressBlock_lazy_extDict(
@@ -1091,7 +1095,7 @@ size_t ZSTD_compressBlock_lazy_extDict(
1091
1095
  void const* src, size_t srcSize)
1092
1096
 
1093
1097
  {
1094
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
1098
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
1095
1099
  }
1096
1100
 
1097
1101
  size_t ZSTD_compressBlock_lazy2_extDict(
@@ -1099,7 +1103,7 @@ size_t ZSTD_compressBlock_lazy2_extDict(
1099
1103
  void const* src, size_t srcSize)
1100
1104
 
1101
1105
  {
1102
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
1106
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
1103
1107
  }
1104
1108
 
1105
1109
  size_t ZSTD_compressBlock_btlazy2_extDict(
@@ -1107,5 +1111,5 @@ size_t ZSTD_compressBlock_btlazy2_extDict(
1107
1111
  void const* src, size_t srcSize)
1108
1112
 
1109
1113
  {
1110
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
1114
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
1111
1115
  }
@@ -552,7 +552,6 @@ U32 ZSTD_insertBtAndGetAllMatches (
552
552
  {
553
553
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
554
554
  U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
555
- U32 const maxDistance = 1U << cParams->windowLog;
556
555
  const BYTE* const base = ms->window.base;
557
556
  U32 const current = (U32)(ip-base);
558
557
  U32 const hashLog = cParams->hashLog;
@@ -569,8 +568,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
569
568
  const BYTE* const dictEnd = dictBase + dictLimit;
570
569
  const BYTE* const prefixStart = base + dictLimit;
571
570
  U32 const btLow = (btMask >= current) ? 0 : current - btMask;
572
- U32 const windowValid = ms->window.lowLimit;
573
- U32 const windowLow = ((current - windowValid) > maxDistance) ? current - maxDistance : windowValid;
571
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
574
572
  U32 const matchLow = windowLow ? windowLow : 1;
575
573
  U32* smallerPtr = bt + 2*(current&btMask);
576
574
  U32* largerPtr = bt + 2*(current&btMask) + 1;
@@ -674,19 +672,21 @@ U32 ZSTD_insertBtAndGetAllMatches (
674
672
 
675
673
  while (nbCompares-- && (matchIndex >= matchLow)) {
676
674
  U32* const nextPtr = bt + 2*(matchIndex & btMask);
677
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
678
675
  const BYTE* match;
676
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
679
677
  assert(current > matchIndex);
680
678
 
681
679
  if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
682
680
  assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
683
681
  match = base + matchIndex;
682
+ if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
684
683
  matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
685
684
  } else {
686
685
  match = dictBase + matchIndex;
686
+ assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
687
687
  matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
688
688
  if (matchIndex+matchLength >= dictLimit)
689
- match = base + matchIndex; /* prepare for match[matchLength] */
689
+ match = base + matchIndex; /* prepare for match[matchLength] read */
690
690
  }
691
691
 
692
692
  if (matchLength > bestLength) {
@@ -574,9 +574,10 @@ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
574
574
  }
575
575
 
576
576
  /** ZSTD_insertBlock() :
577
- insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
577
+ * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
578
578
  size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
579
579
  {
580
+ DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
580
581
  ZSTD_checkContinuity(dctx, blockStart);
581
582
  dctx->previousDstEnd = (const char*)blockStart + blockSize;
582
583
  return blockSize;
@@ -79,6 +79,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
79
79
  size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
80
80
  const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
81
81
  {
82
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
82
83
  RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
83
84
 
84
85
  { const BYTE* const istart = (const BYTE*) src;
@@ -87,6 +88,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
87
88
  switch(litEncType)
88
89
  {
89
90
  case set_repeat:
91
+ DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
90
92
  RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
91
93
  /* fall-through */
92
94
 
@@ -116,7 +118,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
116
118
  /* 2 - 2 - 18 - 18 */
117
119
  lhSize = 5;
118
120
  litSize = (lhc >> 4) & 0x3FFFF;
119
- litCSize = (lhc >> 22) + (istart[4] << 10);
121
+ litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
120
122
  break;
121
123
  }
122
124
  RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
@@ -391,7 +393,8 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
391
393
  symbolNext[s] = 1;
392
394
  } else {
393
395
  if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
394
- symbolNext[s] = normalizedCounter[s];
396
+ assert(normalizedCounter[s]>=0);
397
+ symbolNext[s] = (U16)normalizedCounter[s];
395
398
  } } }
396
399
  memcpy(dt, &DTableH, sizeof(DTableH));
397
400
  }
@@ -71,7 +71,7 @@ extern "C" {
71
71
  /*------ Version ------*/
72
72
  #define ZSTD_VERSION_MAJOR 1
73
73
  #define ZSTD_VERSION_MINOR 4
74
- #define ZSTD_VERSION_RELEASE 2
74
+ #define ZSTD_VERSION_RELEASE 3
75
75
 
76
76
  #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
77
77
  ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
@@ -1909,7 +1909,7 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
1909
1909
  /*!
1910
1910
  Block functions produce and decode raw zstd blocks, without frame metadata.
1911
1911
  Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
1912
- User will have to take in charge required information to regenerate data, such as compressed and content sizes.
1912
+ But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
1913
1913
 
1914
1914
  A few rules to respect :
1915
1915
  - Compressing and decompressing require a context structure
@@ -1920,12 +1920,14 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
1920
1920
  + copyCCtx() and copyDCtx() can be used too
1921
1921
  - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
1922
1922
  + If input is larger than a block size, it's necessary to split input data into multiple blocks
1923
- + For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
1924
- Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
1925
- - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
1926
- In which case, nothing is produced into `dst` !
1927
- + User must test for such outcome and deal directly with uncompressed data
1928
- + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
1923
+ + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
1924
+ Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
1925
+ - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
1926
+ ===> In which case, nothing is produced into `dst` !
1927
+ + User __must__ test for such outcome and deal directly with uncompressed data
1928
+ + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
1929
+ Doing so would mess up with statistics history, leading to potential data corruption.
1930
+ + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
1929
1931
  + In case of multiple successive blocks, should some of them be uncompressed,
1930
1932
  decoder must be informed of their existence in order to follow proper history.
1931
1933
  Use ZSTD_insertBlock() for such a case.
data/lib/2.2/zstdlib.so CHANGED
Binary file
data/lib/2.3/zstdlib.so CHANGED
Binary file
data/lib/2.4/zstdlib.so CHANGED
Binary file
data/lib/2.5/zstdlib.so CHANGED
Binary file
data/lib/2.6/zstdlib.so CHANGED
Binary file
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: zstdlib
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.0
4
+ version: 0.4.0
5
5
  platform: x64-mingw32
6
6
  authors:
7
7
  - Oleg A. Khlybov
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-08-09 00:00:00.000000000 Z
11
+ date: 2019-09-03 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -127,65 +127,65 @@ files:
127
127
  - ext/zstdlib/zlib.mk
128
128
  - ext/zstdlib/zlibwrapper.mk
129
129
  - ext/zstdlib/zlibwrapper/zlibwrapper.c
130
- - ext/zstdlib/zstd-1.4.2/lib/common/bitstream.h
131
- - ext/zstdlib/zstd-1.4.2/lib/common/compiler.h
132
- - ext/zstdlib/zstd-1.4.2/lib/common/cpu.h
133
- - ext/zstdlib/zstd-1.4.2/lib/common/debug.c
134
- - ext/zstdlib/zstd-1.4.2/lib/common/debug.h
135
- - ext/zstdlib/zstd-1.4.2/lib/common/entropy_common.c
136
- - ext/zstdlib/zstd-1.4.2/lib/common/error_private.c
137
- - ext/zstdlib/zstd-1.4.2/lib/common/error_private.h
138
- - ext/zstdlib/zstd-1.4.2/lib/common/fse.h
139
- - ext/zstdlib/zstd-1.4.2/lib/common/fse_decompress.c
140
- - ext/zstdlib/zstd-1.4.2/lib/common/huf.h
141
- - ext/zstdlib/zstd-1.4.2/lib/common/mem.h
142
- - ext/zstdlib/zstd-1.4.2/lib/common/pool.c
143
- - ext/zstdlib/zstd-1.4.2/lib/common/pool.h
144
- - ext/zstdlib/zstd-1.4.2/lib/common/threading.c
145
- - ext/zstdlib/zstd-1.4.2/lib/common/threading.h
146
- - ext/zstdlib/zstd-1.4.2/lib/common/xxhash.c
147
- - ext/zstdlib/zstd-1.4.2/lib/common/xxhash.h
148
- - ext/zstdlib/zstd-1.4.2/lib/common/zstd_common.c
149
- - ext/zstdlib/zstd-1.4.2/lib/common/zstd_errors.h
150
- - ext/zstdlib/zstd-1.4.2/lib/common/zstd_internal.h
151
- - ext/zstdlib/zstd-1.4.2/lib/compress/fse_compress.c
152
- - ext/zstdlib/zstd-1.4.2/lib/compress/hist.c
153
- - ext/zstdlib/zstd-1.4.2/lib/compress/hist.h
154
- - ext/zstdlib/zstd-1.4.2/lib/compress/huf_compress.c
155
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_compress.c
156
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_compress_internal.h
157
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_compress_literals.c
158
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_compress_literals.h
159
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_compress_sequences.c
160
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_compress_sequences.h
161
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_double_fast.c
162
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_double_fast.h
163
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_fast.c
164
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_fast.h
165
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_lazy.c
166
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_lazy.h
167
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_ldm.c
168
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_ldm.h
169
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_opt.c
170
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstd_opt.h
171
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstdmt_compress.c
172
- - ext/zstdlib/zstd-1.4.2/lib/compress/zstdmt_compress.h
173
- - ext/zstdlib/zstd-1.4.2/lib/decompress/huf_decompress.c
174
- - ext/zstdlib/zstd-1.4.2/lib/decompress/zstd_ddict.c
175
- - ext/zstdlib/zstd-1.4.2/lib/decompress/zstd_ddict.h
176
- - ext/zstdlib/zstd-1.4.2/lib/decompress/zstd_decompress.c
177
- - ext/zstdlib/zstd-1.4.2/lib/decompress/zstd_decompress_block.c
178
- - ext/zstdlib/zstd-1.4.2/lib/decompress/zstd_decompress_block.h
179
- - ext/zstdlib/zstd-1.4.2/lib/decompress/zstd_decompress_internal.h
180
- - ext/zstdlib/zstd-1.4.2/lib/zstd.h
181
- - ext/zstdlib/zstd-1.4.2/zlibWrapper/gzclose.c
182
- - ext/zstdlib/zstd-1.4.2/zlibWrapper/gzcompatibility.h
183
- - ext/zstdlib/zstd-1.4.2/zlibWrapper/gzguts.h
184
- - ext/zstdlib/zstd-1.4.2/zlibWrapper/gzlib.c
185
- - ext/zstdlib/zstd-1.4.2/zlibWrapper/gzread.c
186
- - ext/zstdlib/zstd-1.4.2/zlibWrapper/gzwrite.c
187
- - ext/zstdlib/zstd-1.4.2/zlibWrapper/zstd_zlibwrapper.c
188
- - ext/zstdlib/zstd-1.4.2/zlibWrapper/zstd_zlibwrapper.h
130
+ - ext/zstdlib/zstd-1.4.3/lib/common/bitstream.h
131
+ - ext/zstdlib/zstd-1.4.3/lib/common/compiler.h
132
+ - ext/zstdlib/zstd-1.4.3/lib/common/cpu.h
133
+ - ext/zstdlib/zstd-1.4.3/lib/common/debug.c
134
+ - ext/zstdlib/zstd-1.4.3/lib/common/debug.h
135
+ - ext/zstdlib/zstd-1.4.3/lib/common/entropy_common.c
136
+ - ext/zstdlib/zstd-1.4.3/lib/common/error_private.c
137
+ - ext/zstdlib/zstd-1.4.3/lib/common/error_private.h
138
+ - ext/zstdlib/zstd-1.4.3/lib/common/fse.h
139
+ - ext/zstdlib/zstd-1.4.3/lib/common/fse_decompress.c
140
+ - ext/zstdlib/zstd-1.4.3/lib/common/huf.h
141
+ - ext/zstdlib/zstd-1.4.3/lib/common/mem.h
142
+ - ext/zstdlib/zstd-1.4.3/lib/common/pool.c
143
+ - ext/zstdlib/zstd-1.4.3/lib/common/pool.h
144
+ - ext/zstdlib/zstd-1.4.3/lib/common/threading.c
145
+ - ext/zstdlib/zstd-1.4.3/lib/common/threading.h
146
+ - ext/zstdlib/zstd-1.4.3/lib/common/xxhash.c
147
+ - ext/zstdlib/zstd-1.4.3/lib/common/xxhash.h
148
+ - ext/zstdlib/zstd-1.4.3/lib/common/zstd_common.c
149
+ - ext/zstdlib/zstd-1.4.3/lib/common/zstd_errors.h
150
+ - ext/zstdlib/zstd-1.4.3/lib/common/zstd_internal.h
151
+ - ext/zstdlib/zstd-1.4.3/lib/compress/fse_compress.c
152
+ - ext/zstdlib/zstd-1.4.3/lib/compress/hist.c
153
+ - ext/zstdlib/zstd-1.4.3/lib/compress/hist.h
154
+ - ext/zstdlib/zstd-1.4.3/lib/compress/huf_compress.c
155
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_compress.c
156
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_compress_internal.h
157
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_compress_literals.c
158
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_compress_literals.h
159
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_compress_sequences.c
160
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_compress_sequences.h
161
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_double_fast.c
162
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_double_fast.h
163
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_fast.c
164
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_fast.h
165
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_lazy.c
166
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_lazy.h
167
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_ldm.c
168
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_ldm.h
169
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_opt.c
170
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstd_opt.h
171
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstdmt_compress.c
172
+ - ext/zstdlib/zstd-1.4.3/lib/compress/zstdmt_compress.h
173
+ - ext/zstdlib/zstd-1.4.3/lib/decompress/huf_decompress.c
174
+ - ext/zstdlib/zstd-1.4.3/lib/decompress/zstd_ddict.c
175
+ - ext/zstdlib/zstd-1.4.3/lib/decompress/zstd_ddict.h
176
+ - ext/zstdlib/zstd-1.4.3/lib/decompress/zstd_decompress.c
177
+ - ext/zstdlib/zstd-1.4.3/lib/decompress/zstd_decompress_block.c
178
+ - ext/zstdlib/zstd-1.4.3/lib/decompress/zstd_decompress_block.h
179
+ - ext/zstdlib/zstd-1.4.3/lib/decompress/zstd_decompress_internal.h
180
+ - ext/zstdlib/zstd-1.4.3/lib/zstd.h
181
+ - ext/zstdlib/zstd-1.4.3/zlibWrapper/gzclose.c
182
+ - ext/zstdlib/zstd-1.4.3/zlibWrapper/gzcompatibility.h
183
+ - ext/zstdlib/zstd-1.4.3/zlibWrapper/gzguts.h
184
+ - ext/zstdlib/zstd-1.4.3/zlibWrapper/gzlib.c
185
+ - ext/zstdlib/zstd-1.4.3/zlibWrapper/gzread.c
186
+ - ext/zstdlib/zstd-1.4.3/zlibWrapper/gzwrite.c
187
+ - ext/zstdlib/zstd-1.4.3/zlibWrapper/zstd_zlibwrapper.c
188
+ - ext/zstdlib/zstd-1.4.3/zlibWrapper/zstd_zlibwrapper.h
189
189
  - ext/zstdlib/zstd.mk
190
190
  - lib/2.2/zstdlib.so
191
191
  - lib/2.3/zstdlib.so
@@ -194,7 +194,7 @@ files:
194
194
  - lib/2.6/zstdlib.so
195
195
  - lib/zstdlib.rb
196
196
  - test/zstdlib_test.rb
197
- homepage: https://bitbucket.org/fougas/zstdlib
197
+ homepage: https://github.com/okhlybov/zstdlib
198
198
  licenses:
199
199
  - BSD-3-Clause
200
200
  metadata: {}