zstdlib 0.7.0-x86-mingw32 → 0.8.0-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (81) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +5 -0
  3. data/ext/zstdlib/extconf.rb +1 -1
  4. data/ext/zstdlib/ruby/zlib-3.0/zstdlib.c +4994 -0
  5. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/bitstream.h +25 -16
  6. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/compiler.h +118 -4
  7. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/cpu.h +1 -3
  8. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/debug.c +1 -1
  9. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/debug.h +12 -19
  10. data/ext/zstdlib/zstd-1.5.0/lib/common/entropy_common.c +362 -0
  11. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/error_private.c +2 -1
  12. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/error_private.h +3 -3
  13. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/fse.h +40 -12
  14. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/fse_decompress.c +139 -22
  15. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/huf.h +29 -7
  16. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/mem.h +69 -98
  17. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/pool.c +23 -17
  18. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/pool.h +2 -2
  19. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/threading.c +6 -5
  20. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/threading.h +0 -0
  21. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/xxhash.c +20 -60
  22. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/xxhash.h +2 -2
  23. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/zstd_common.c +10 -10
  24. data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_deps.h +111 -0
  25. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/zstd_internal.h +105 -62
  26. data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_trace.h +154 -0
  27. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/fse_compress.c +31 -24
  28. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/hist.c +27 -29
  29. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/hist.h +2 -2
  30. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/huf_compress.c +265 -126
  31. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress.c +2843 -728
  32. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_internal.h +305 -63
  33. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_literals.c +8 -8
  34. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_literals.h +1 -1
  35. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.c +29 -7
  36. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.h +1 -1
  37. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_superblock.c +22 -295
  38. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_superblock.h +1 -1
  39. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_cwksp.h +204 -67
  40. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_double_fast.c +25 -25
  41. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_double_fast.h +1 -1
  42. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_fast.c +23 -23
  43. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_fast.h +1 -1
  44. data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.c +2184 -0
  45. data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.h +125 -0
  46. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_ldm.c +314 -211
  47. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_ldm.h +9 -2
  48. data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_ldm_geartab.h +103 -0
  49. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_opt.c +191 -46
  50. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_opt.h +1 -1
  51. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstdmt_compress.c +93 -415
  52. data/ext/zstdlib/zstd-1.5.0/lib/compress/zstdmt_compress.h +110 -0
  53. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/huf_decompress.c +342 -239
  54. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_ddict.c +9 -9
  55. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_ddict.h +2 -2
  56. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress.c +369 -87
  57. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.c +191 -75
  58. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.h +6 -3
  59. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_internal.h +27 -11
  60. data/ext/zstdlib/zstd-1.5.0/lib/zdict.h +452 -0
  61. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/zstd.h +568 -126
  62. data/ext/zstdlib/{zstd-1.4.5/lib/common → zstd-1.5.0/lib}/zstd_errors.h +2 -1
  63. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzclose.c +0 -0
  64. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzcompatibility.h +1 -1
  65. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzguts.h +0 -0
  66. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzlib.c +0 -0
  67. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzread.c +0 -0
  68. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzwrite.c +0 -0
  69. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.c +126 -44
  70. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.h +1 -1
  71. data/lib/2.2/zstdlib.so +0 -0
  72. data/lib/2.3/zstdlib.so +0 -0
  73. data/lib/2.4/zstdlib.so +0 -0
  74. data/lib/2.5/zstdlib.so +0 -0
  75. data/lib/2.6/zstdlib.so +0 -0
  76. data/lib/2.7/zstdlib.so +0 -0
  77. metadata +69 -64
  78. data/ext/zstdlib/zstd-1.4.5/lib/common/entropy_common.c +0 -216
  79. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.c +0 -1138
  80. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.h +0 -67
  81. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstdmt_compress.h +0 -192
@@ -1,7 +1,7 @@
1
1
  /* ******************************************************************
2
2
  * bitstream
3
3
  * Part of FSE library
4
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
4
+ * Copyright (c) Yann Collet, Facebook, Inc.
5
5
  *
6
6
  * You can contact the author at :
7
7
  * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -17,7 +17,6 @@
17
17
  #if defined (__cplusplus)
18
18
  extern "C" {
19
19
  #endif
20
-
21
20
  /*
22
21
  * This API consists of small unitary functions, which must be inlined for best performance.
23
22
  * Since link-time-optimization is not available for all compilers,
@@ -36,10 +35,12 @@ extern "C" {
36
35
  /*=========================================
37
36
  * Target specific
38
37
  =========================================*/
39
- #if defined(__BMI__) && defined(__GNUC__)
40
- # include <immintrin.h> /* support for bextr (experimental) */
41
- #elif defined(__ICCARM__)
42
- # include <intrinsics.h>
38
+ #ifndef ZSTD_NO_INTRINSICS
39
+ # if defined(__BMI__) && defined(__GNUC__)
40
+ # include <immintrin.h> /* support for bextr (experimental) */
41
+ # elif defined(__ICCARM__)
42
+ # include <intrinsics.h>
43
+ # endif
43
44
  #endif
44
45
 
45
46
  #define STREAM_ACCUMULATOR_MIN_32 25
@@ -141,8 +142,12 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
141
142
  assert(val != 0);
142
143
  {
143
144
  # if defined(_MSC_VER) /* Visual */
144
- unsigned long r=0;
145
- return _BitScanReverse ( &r, val ) ? (unsigned)r : 0;
145
+ # if STATIC_BMI2 == 1
146
+ return _lzcnt_u32(val) ^ 31;
147
+ # else
148
+ unsigned long r = 0;
149
+ return _BitScanReverse(&r, val) ? (unsigned)r : 0;
150
+ # endif
146
151
  # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
147
152
  return __builtin_clz (val) ^ 31;
148
153
  # elif defined(__ICCARM__) /* IAR Intrinsic */
@@ -198,7 +203,7 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
198
203
  MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
199
204
  size_t value, unsigned nbBits)
200
205
  {
201
- MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
206
+ DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
202
207
  assert(nbBits < BIT_MASK_SIZE);
203
208
  assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
204
209
  bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
@@ -271,7 +276,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
271
276
  */
272
277
  MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
273
278
  {
274
- if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
279
+ if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
275
280
 
276
281
  bitD->start = (const char*)srcBuffer;
277
282
  bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
@@ -317,12 +322,12 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
317
322
  return srcSize;
318
323
  }
319
324
 
320
- MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
325
+ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
321
326
  {
322
327
  return bitContainer >> start;
323
328
  }
324
329
 
325
- MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
330
+ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
326
331
  {
327
332
  U32 const regMask = sizeof(bitContainer)*8 - 1;
328
333
  /* if start > regMask, bitstream is corrupted, and result is undefined */
@@ -330,10 +335,14 @@ MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 co
330
335
  return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
331
336
  }
332
337
 
333
- MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
338
+ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
334
339
  {
340
+ #if defined(STATIC_BMI2) && STATIC_BMI2 == 1
341
+ return _bzhi_u64(bitContainer, nbBits);
342
+ #else
335
343
  assert(nbBits < BIT_MASK_SIZE);
336
344
  return bitContainer & BIT_mask[nbBits];
345
+ #endif
337
346
  }
338
347
 
339
348
  /*! BIT_lookBits() :
@@ -342,7 +351,7 @@ MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
342
351
  * On 32-bits, maxNbBits==24.
343
352
  * On 64-bits, maxNbBits==56.
344
353
  * @return : value extracted */
345
- MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
354
+ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
346
355
  {
347
356
  /* arbitrate between double-shift and shift+mask */
348
357
  #if 1
@@ -365,7 +374,7 @@ MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
365
374
  return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
366
375
  }
367
376
 
368
- MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
377
+ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
369
378
  {
370
379
  bitD->bitsConsumed += nbBits;
371
380
  }
@@ -374,7 +383,7 @@ MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
374
383
  * Read (consume) next n bits from local register and update.
375
384
  * Pay attention to not read more than nbBits contained into local register.
376
385
  * @return : extracted value. */
377
- MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
386
+ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
378
387
  {
379
388
  size_t const value = BIT_lookBits(bitD, nbBits);
380
389
  BIT_skipBits(bitD, nbBits);
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Yann Collet, Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -38,6 +38,17 @@
38
38
 
39
39
  #endif
40
40
 
41
+ /**
42
+ On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
43
+ This explictly marks such functions as __cdecl so that the code will still compile
44
+ if a CC other than __cdecl has been made the default.
45
+ */
46
+ #if defined(_MSC_VER)
47
+ # define WIN_CDECL __cdecl
48
+ #else
49
+ # define WIN_CDECL
50
+ #endif
51
+
41
52
  /**
42
53
  * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
43
54
  * parameters. They must be inlined for the compiler to eliminate the constant
@@ -79,6 +90,7 @@
79
90
  # endif
80
91
  #endif
81
92
 
93
+
82
94
  /* target attribute */
83
95
  #ifndef __has_attribute
84
96
  #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
@@ -114,12 +126,12 @@
114
126
  # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
115
127
  # define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
116
128
  # define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
117
- # elif defined(__aarch64__)
118
- # define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
119
- # define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
120
129
  # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
121
130
  # define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
122
131
  # define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
132
+ # elif defined(__aarch64__)
133
+ # define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
134
+ # define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
123
135
  # else
124
136
  # define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
125
137
  # define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
@@ -172,4 +184,106 @@
172
184
  # pragma warning(disable : 4324) /* disable: C4324: padded structure */
173
185
  #endif
174
186
 
187
+ /*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
188
+ #ifndef STATIC_BMI2
189
+ # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
190
+ # ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2
191
+ # define STATIC_BMI2 1
192
+ # endif
193
+ # endif
194
+ #endif
195
+
196
+ #ifndef STATIC_BMI2
197
+ #define STATIC_BMI2 0
198
+ #endif
199
+
200
+ /* compat. with non-clang compilers */
201
+ #ifndef __has_builtin
202
+ # define __has_builtin(x) 0
203
+ #endif
204
+
205
+ /* compat. with non-clang compilers */
206
+ #ifndef __has_feature
207
+ # define __has_feature(x) 0
208
+ #endif
209
+
210
+ /* detects whether we are being compiled under msan */
211
+ #ifndef ZSTD_MEMORY_SANITIZER
212
+ # if __has_feature(memory_sanitizer)
213
+ # define ZSTD_MEMORY_SANITIZER 1
214
+ # else
215
+ # define ZSTD_MEMORY_SANITIZER 0
216
+ # endif
217
+ #endif
218
+
219
+ #if ZSTD_MEMORY_SANITIZER
220
+ /* Not all platforms that support msan provide sanitizers/msan_interface.h.
221
+ * We therefore declare the functions we need ourselves, rather than trying to
222
+ * include the header file... */
223
+ #include <stddef.h> /* size_t */
224
+ #define ZSTD_DEPS_NEED_STDINT
225
+ #include "zstd_deps.h" /* intptr_t */
226
+
227
+ /* Make memory region fully initialized (without changing its contents). */
228
+ void __msan_unpoison(const volatile void *a, size_t size);
229
+
230
+ /* Make memory region fully uninitialized (without changing its contents).
231
+ This is a legacy interface that does not update origin information. Use
232
+ __msan_allocated_memory() instead. */
233
+ void __msan_poison(const volatile void *a, size_t size);
234
+
235
+ /* Returns the offset of the first (at least partially) poisoned byte in the
236
+ memory range, or -1 if the whole range is good. */
237
+ intptr_t __msan_test_shadow(const volatile void *x, size_t size);
238
+ #endif
239
+
240
+ /* detects whether we are being compiled under asan */
241
+ #ifndef ZSTD_ADDRESS_SANITIZER
242
+ # if __has_feature(address_sanitizer)
243
+ # define ZSTD_ADDRESS_SANITIZER 1
244
+ # elif defined(__SANITIZE_ADDRESS__)
245
+ # define ZSTD_ADDRESS_SANITIZER 1
246
+ # else
247
+ # define ZSTD_ADDRESS_SANITIZER 0
248
+ # endif
249
+ #endif
250
+
251
+ #if ZSTD_ADDRESS_SANITIZER
252
+ /* Not all platforms that support asan provide sanitizers/asan_interface.h.
253
+ * We therefore declare the functions we need ourselves, rather than trying to
254
+ * include the header file... */
255
+ #include <stddef.h> /* size_t */
256
+
257
+ /**
258
+ * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
259
+ *
260
+ * This memory must be previously allocated by your program. Instrumented
261
+ * code is forbidden from accessing addresses in this region until it is
262
+ * unpoisoned. This function is not guaranteed to poison the entire region -
263
+ * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
264
+ * alignment restrictions.
265
+ *
266
+ * \note This function is not thread-safe because no two threads can poison or
267
+ * unpoison memory in the same memory region simultaneously.
268
+ *
269
+ * \param addr Start of memory region.
270
+ * \param size Size of memory region. */
271
+ void __asan_poison_memory_region(void const volatile *addr, size_t size);
272
+
273
+ /**
274
+ * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
275
+ *
276
+ * This memory must be previously allocated by your program. Accessing
277
+ * addresses in this region is allowed until this region is poisoned again.
278
+ * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
279
+ * to ASan alignment restrictions.
280
+ *
281
+ * \note This function is not thread-safe because no two threads can
282
+ * poison or unpoison memory in the same memory region simultaneously.
283
+ *
284
+ * \param addr Start of memory region.
285
+ * \param size Size of memory region. */
286
+ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
287
+ #endif
288
+
175
289
  #endif /* ZSTD_COMPILER_H */
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2018-2020, Facebook, Inc.
2
+ * Copyright (c) Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -16,8 +16,6 @@
16
16
  * https://github.com/facebook/folly/blob/master/folly/CpuId.h
17
17
  */
18
18
 
19
- #include <string.h>
20
-
21
19
  #include "mem.h"
22
20
 
23
21
  #ifdef _MSC_VER
@@ -1,7 +1,7 @@
1
1
  /* ******************************************************************
2
2
  * debug
3
3
  * Part of FSE library
4
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
4
+ * Copyright (c) Yann Collet, Facebook, Inc.
5
5
  *
6
6
  * You can contact the author at :
7
7
  * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -1,7 +1,7 @@
1
1
  /* ******************************************************************
2
2
  * debug
3
3
  * Part of FSE library
4
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
4
+ * Copyright (c) Yann Collet, Facebook, Inc.
5
5
  *
6
6
  * You can contact the author at :
7
7
  * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -51,15 +51,6 @@ extern "C" {
51
51
  #endif
52
52
 
53
53
 
54
- /* DEBUGFILE can be defined externally,
55
- * typically through compiler command line.
56
- * note : currently useless.
57
- * Value must be stderr or stdout */
58
- #ifndef DEBUGFILE
59
- # define DEBUGFILE stderr
60
- #endif
61
-
62
-
63
54
  /* recommended values for DEBUGLEVEL :
64
55
  * 0 : release mode, no debug, all run-time checks disabled
65
56
  * 1 : enables assert() only, no display
@@ -76,7 +67,8 @@ extern "C" {
76
67
  */
77
68
 
78
69
  #if (DEBUGLEVEL>=1)
79
- # include <assert.h>
70
+ # define ZSTD_DEPS_NEED_ASSERT
71
+ # include "zstd_deps.h"
80
72
  #else
81
73
  # ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
82
74
  # define assert(condition) ((void)0) /* disable assert (default) */
@@ -84,7 +76,8 @@ extern "C" {
84
76
  #endif
85
77
 
86
78
  #if (DEBUGLEVEL>=2)
87
- # include <stdio.h>
79
+ # define ZSTD_DEPS_NEED_IO
80
+ # include "zstd_deps.h"
88
81
  extern int g_debuglevel; /* the variable is only declared,
89
82
  it actually lives in debug.c,
90
83
  and is shared by the whole process.
@@ -92,14 +85,14 @@ extern int g_debuglevel; /* the variable is only declared,
92
85
  It's useful when enabling very verbose levels
93
86
  on selective conditions (such as position in src) */
94
87
 
95
- # define RAWLOG(l, ...) { \
96
- if (l<=g_debuglevel) { \
97
- fprintf(stderr, __VA_ARGS__); \
88
+ # define RAWLOG(l, ...) { \
89
+ if (l<=g_debuglevel) { \
90
+ ZSTD_DEBUG_PRINT(__VA_ARGS__); \
98
91
  } }
99
- # define DEBUGLOG(l, ...) { \
100
- if (l<=g_debuglevel) { \
101
- fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
102
- fprintf(stderr, " \n"); \
92
+ # define DEBUGLOG(l, ...) { \
93
+ if (l<=g_debuglevel) { \
94
+ ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
95
+ ZSTD_DEBUG_PRINT(" \n"); \
103
96
  } }
104
97
  #else
105
98
  # define RAWLOG(l, ...) {} /* disabled */
@@ -0,0 +1,362 @@
1
+ /* ******************************************************************
2
+ * Common functions of New Generation Entropy library
3
+ * Copyright (c) Yann Collet, Facebook, Inc.
4
+ *
5
+ * You can contact the author at :
6
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
7
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
8
+ *
9
+ * This source code is licensed under both the BSD-style license (found in the
10
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11
+ * in the COPYING file in the root directory of this source tree).
12
+ * You may select, at your option, one of the above-listed licenses.
13
+ ****************************************************************** */
14
+
15
+ /* *************************************
16
+ * Dependencies
17
+ ***************************************/
18
+ #include "mem.h"
19
+ #include "error_private.h" /* ERR_*, ERROR */
20
+ #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
21
+ #include "fse.h"
22
+ #define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
23
+ #include "huf.h"
24
+
25
+
26
+ /*=== Version ===*/
27
+ unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
28
+
29
+
30
+ /*=== Error Management ===*/
31
+ unsigned FSE_isError(size_t code) { return ERR_isError(code); }
32
+ const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
33
+
34
+ unsigned HUF_isError(size_t code) { return ERR_isError(code); }
35
+ const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
36
+
37
+
38
+ /*-**************************************************************
39
+ * FSE NCount encoding-decoding
40
+ ****************************************************************/
41
+ static U32 FSE_ctz(U32 val)
42
+ {
43
+ assert(val != 0);
44
+ {
45
+ # if defined(_MSC_VER) /* Visual */
46
+ unsigned long r=0;
47
+ return _BitScanForward(&r, val) ? (unsigned)r : 0;
48
+ # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
49
+ return __builtin_ctz(val);
50
+ # elif defined(__ICCARM__) /* IAR Intrinsic */
51
+ return __CTZ(val);
52
+ # else /* Software version */
53
+ U32 count = 0;
54
+ while ((val & 1) == 0) {
55
+ val >>= 1;
56
+ ++count;
57
+ }
58
+ return count;
59
+ # endif
60
+ }
61
+ }
62
+
63
+ FORCE_INLINE_TEMPLATE
64
+ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
65
+ const void* headerBuffer, size_t hbSize)
66
+ {
67
+ const BYTE* const istart = (const BYTE*) headerBuffer;
68
+ const BYTE* const iend = istart + hbSize;
69
+ const BYTE* ip = istart;
70
+ int nbBits;
71
+ int remaining;
72
+ int threshold;
73
+ U32 bitStream;
74
+ int bitCount;
75
+ unsigned charnum = 0;
76
+ unsigned const maxSV1 = *maxSVPtr + 1;
77
+ int previous0 = 0;
78
+
79
+ if (hbSize < 8) {
80
+ /* This function only works when hbSize >= 8 */
81
+ char buffer[8] = {0};
82
+ ZSTD_memcpy(buffer, headerBuffer, hbSize);
83
+ { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
84
+ buffer, sizeof(buffer));
85
+ if (FSE_isError(countSize)) return countSize;
86
+ if (countSize > hbSize) return ERROR(corruption_detected);
87
+ return countSize;
88
+ } }
89
+ assert(hbSize >= 8);
90
+
91
+ /* init */
92
+ ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
93
+ bitStream = MEM_readLE32(ip);
94
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
95
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
96
+ bitStream >>= 4;
97
+ bitCount = 4;
98
+ *tableLogPtr = nbBits;
99
+ remaining = (1<<nbBits)+1;
100
+ threshold = 1<<nbBits;
101
+ nbBits++;
102
+
103
+ for (;;) {
104
+ if (previous0) {
105
+ /* Count the number of repeats. Each time the
106
+ * 2-bit repeat code is 0b11 there is another
107
+ * repeat.
108
+ * Avoid UB by setting the high bit to 1.
109
+ */
110
+ int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
111
+ while (repeats >= 12) {
112
+ charnum += 3 * 12;
113
+ if (LIKELY(ip <= iend-7)) {
114
+ ip += 3;
115
+ } else {
116
+ bitCount -= (int)(8 * (iend - 7 - ip));
117
+ bitCount &= 31;
118
+ ip = iend - 4;
119
+ }
120
+ bitStream = MEM_readLE32(ip) >> bitCount;
121
+ repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
122
+ }
123
+ charnum += 3 * repeats;
124
+ bitStream >>= 2 * repeats;
125
+ bitCount += 2 * repeats;
126
+
127
+ /* Add the final repeat which isn't 0b11. */
128
+ assert((bitStream & 3) < 3);
129
+ charnum += bitStream & 3;
130
+ bitCount += 2;
131
+
132
+ /* This is an error, but break and return an error
133
+ * at the end, because returning out of a loop makes
134
+ * it harder for the compiler to optimize.
135
+ */
136
+ if (charnum >= maxSV1) break;
137
+
138
+ /* We don't need to set the normalized count to 0
139
+ * because we already memset the whole buffer to 0.
140
+ */
141
+
142
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
143
+ assert((bitCount >> 3) <= 3); /* For first condition to work */
144
+ ip += bitCount>>3;
145
+ bitCount &= 7;
146
+ } else {
147
+ bitCount -= (int)(8 * (iend - 4 - ip));
148
+ bitCount &= 31;
149
+ ip = iend - 4;
150
+ }
151
+ bitStream = MEM_readLE32(ip) >> bitCount;
152
+ }
153
+ {
154
+ int const max = (2*threshold-1) - remaining;
155
+ int count;
156
+
157
+ if ((bitStream & (threshold-1)) < (U32)max) {
158
+ count = bitStream & (threshold-1);
159
+ bitCount += nbBits-1;
160
+ } else {
161
+ count = bitStream & (2*threshold-1);
162
+ if (count >= threshold) count -= max;
163
+ bitCount += nbBits;
164
+ }
165
+
166
+ count--; /* extra accuracy */
167
+ /* When it matters (small blocks), this is a
168
+ * predictable branch, because we don't use -1.
169
+ */
170
+ if (count >= 0) {
171
+ remaining -= count;
172
+ } else {
173
+ assert(count == -1);
174
+ remaining += count;
175
+ }
176
+ normalizedCounter[charnum++] = (short)count;
177
+ previous0 = !count;
178
+
179
+ assert(threshold > 1);
180
+ if (remaining < threshold) {
181
+ /* This branch can be folded into the
182
+ * threshold update condition because we
183
+ * know that threshold > 1.
184
+ */
185
+ if (remaining <= 1) break;
186
+ nbBits = BIT_highbit32(remaining) + 1;
187
+ threshold = 1 << (nbBits - 1);
188
+ }
189
+ if (charnum >= maxSV1) break;
190
+
191
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
192
+ ip += bitCount>>3;
193
+ bitCount &= 7;
194
+ } else {
195
+ bitCount -= (int)(8 * (iend - 4 - ip));
196
+ bitCount &= 31;
197
+ ip = iend - 4;
198
+ }
199
+ bitStream = MEM_readLE32(ip) >> bitCount;
200
+ } }
201
+ if (remaining != 1) return ERROR(corruption_detected);
202
+ /* Only possible when there are too many zeros. */
203
+ if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
204
+ if (bitCount > 32) return ERROR(corruption_detected);
205
+ *maxSVPtr = charnum-1;
206
+
207
+ ip += (bitCount+7)>>3;
208
+ return ip-istart;
209
+ }
210
+
211
+ /* Avoids the FORCE_INLINE of the _body() function. */
212
+ static size_t FSE_readNCount_body_default(
213
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
214
+ const void* headerBuffer, size_t hbSize)
215
+ {
216
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
217
+ }
218
+
219
+ #if DYNAMIC_BMI2
220
+ TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
221
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
222
+ const void* headerBuffer, size_t hbSize)
223
+ {
224
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
225
+ }
226
+ #endif
227
+
228
+ size_t FSE_readNCount_bmi2(
229
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
230
+ const void* headerBuffer, size_t hbSize, int bmi2)
231
+ {
232
+ #if DYNAMIC_BMI2
233
+ if (bmi2) {
234
+ return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
235
+ }
236
+ #endif
237
+ (void)bmi2;
238
+ return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
239
+ }
240
+
241
+ size_t FSE_readNCount(
242
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
243
+ const void* headerBuffer, size_t hbSize)
244
+ {
245
+ return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
246
+ }
247
+
248
+
249
+ /*! HUF_readStats() :
250
+ Read compact Huffman tree, saved by HUF_writeCTable().
251
+ `huffWeight` is destination buffer.
252
+ `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
253
+ @return : size read from `src` , or an error Code .
254
+ Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
255
+ */
256
+ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
257
+ U32* nbSymbolsPtr, U32* tableLogPtr,
258
+ const void* src, size_t srcSize)
259
+ {
260
+ U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
261
+ return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
262
+ }
263
+
264
+ FORCE_INLINE_TEMPLATE size_t
265
+ HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
266
+ U32* nbSymbolsPtr, U32* tableLogPtr,
267
+ const void* src, size_t srcSize,
268
+ void* workSpace, size_t wkspSize,
269
+ int bmi2)
270
+ {
271
+ U32 weightTotal;
272
+ const BYTE* ip = (const BYTE*) src;
273
+ size_t iSize;
274
+ size_t oSize;
275
+
276
+ if (!srcSize) return ERROR(srcSize_wrong);
277
+ iSize = ip[0];
278
+ /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
279
+
280
+ if (iSize >= 128) { /* special header */
281
+ oSize = iSize - 127;
282
+ iSize = ((oSize+1)/2);
283
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
284
+ if (oSize >= hwSize) return ERROR(corruption_detected);
285
+ ip += 1;
286
+ { U32 n;
287
+ for (n=0; n<oSize; n+=2) {
288
+ huffWeight[n] = ip[n/2] >> 4;
289
+ huffWeight[n+1] = ip[n/2] & 15;
290
+ } } }
291
+ else { /* header compressed with FSE (normal case) */
292
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
293
+ /* max (hwSize-1) values decoded, as last one is implied */
294
+ oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
295
+ if (FSE_isError(oSize)) return oSize;
296
+ }
297
+
298
+ /* collect weight stats */
299
+ ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
300
+ weightTotal = 0;
301
+ { U32 n; for (n=0; n<oSize; n++) {
302
+ if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
303
+ rankStats[huffWeight[n]]++;
304
+ weightTotal += (1 << huffWeight[n]) >> 1;
305
+ } }
306
+ if (weightTotal == 0) return ERROR(corruption_detected);
307
+
308
+ /* get last non-null symbol weight (implied, total must be 2^n) */
309
+ { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
310
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
311
+ *tableLogPtr = tableLog;
312
+ /* determine last weight */
313
+ { U32 const total = 1 << tableLog;
314
+ U32 const rest = total - weightTotal;
315
+ U32 const verif = 1 << BIT_highbit32(rest);
316
+ U32 const lastWeight = BIT_highbit32(rest) + 1;
317
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
318
+ huffWeight[oSize] = (BYTE)lastWeight;
319
+ rankStats[lastWeight]++;
320
+ } }
321
+
322
+ /* check tree construction validity */
323
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
324
+
325
+ /* results */
326
+ *nbSymbolsPtr = (U32)(oSize+1);
327
+ return iSize+1;
328
+ }
329
+
330
+ /* Avoids the FORCE_INLINE of the _body() function. */
331
+ static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
332
+ U32* nbSymbolsPtr, U32* tableLogPtr,
333
+ const void* src, size_t srcSize,
334
+ void* workSpace, size_t wkspSize)
335
+ {
336
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
337
+ }
338
+
339
+ #if DYNAMIC_BMI2
340
+ static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
341
+ U32* nbSymbolsPtr, U32* tableLogPtr,
342
+ const void* src, size_t srcSize,
343
+ void* workSpace, size_t wkspSize)
344
+ {
345
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
346
+ }
347
+ #endif
348
+
349
+ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
350
+ U32* nbSymbolsPtr, U32* tableLogPtr,
351
+ const void* src, size_t srcSize,
352
+ void* workSpace, size_t wkspSize,
353
+ int bmi2)
354
+ {
355
+ #if DYNAMIC_BMI2
356
+ if (bmi2) {
357
+ return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
358
+ }
359
+ #endif
360
+ (void)bmi2;
361
+ return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
362
+ }