zstdlib 0.12.0 → 0.13.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (76) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +7 -0
  3. data/Rakefile +1 -1
  4. data/ext/zstdlib_c/extconf.rb +1 -1
  5. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/allocations.h +1 -1
  6. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/bitstream.h +49 -29
  7. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/compiler.h +114 -22
  8. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/cpu.h +36 -0
  9. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/debug.c +6 -0
  10. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/debug.h +20 -11
  11. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/error_private.h +45 -36
  12. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/fse.h +3 -2
  13. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/fse_decompress.c +19 -17
  14. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/huf.h +14 -1
  15. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/mem.h +0 -9
  16. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/pool.c +1 -1
  17. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/pool.h +1 -1
  18. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/portability_macros.h +2 -0
  19. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/threading.c +8 -2
  20. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/xxhash.c +5 -11
  21. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/xxhash.h +2341 -1007
  22. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/zstd_internal.h +5 -5
  23. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/fse_compress.c +8 -7
  24. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/huf_compress.c +54 -25
  25. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_compress.c +282 -161
  26. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_compress_internal.h +29 -27
  27. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_compress_superblock.c +224 -113
  28. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_cwksp.h +19 -13
  29. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_double_fast.c +17 -5
  30. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_double_fast.h +11 -0
  31. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_fast.c +14 -6
  32. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_lazy.c +129 -87
  33. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_lazy.h +103 -28
  34. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_ldm.c +8 -2
  35. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_opt.c +216 -112
  36. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_opt.h +31 -7
  37. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstdmt_compress.c +94 -79
  38. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/decompress/huf_decompress.c +188 -126
  39. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/decompress/huf_decompress_amd64.S +38 -19
  40. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/decompress/zstd_decompress.c +84 -32
  41. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/decompress/zstd_decompress_block.c +231 -208
  42. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/decompress/zstd_decompress_block.h +1 -1
  43. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/decompress/zstd_decompress_internal.h +2 -0
  44. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/zstd.h +129 -60
  45. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/zlibWrapper/gzclose.c +1 -3
  46. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/zlibWrapper/gzlib.c +20 -73
  47. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/zlibWrapper/gzread.c +17 -58
  48. data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/zlibWrapper/gzwrite.c +18 -58
  49. metadata +76 -76
  50. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/bits.h +0 -0
  51. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/entropy_common.c +0 -0
  52. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/error_private.c +0 -0
  53. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/threading.h +0 -0
  54. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/zstd_common.c +0 -0
  55. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/zstd_deps.h +0 -0
  56. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/common/zstd_trace.h +0 -0
  57. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/clevels.h +0 -0
  58. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/hist.c +0 -0
  59. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/hist.h +0 -0
  60. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_compress_literals.c +0 -0
  61. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_compress_literals.h +0 -0
  62. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_compress_sequences.c +0 -0
  63. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_compress_sequences.h +0 -0
  64. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_compress_superblock.h +0 -0
  65. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_fast.h +0 -0
  66. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_ldm.h +0 -0
  67. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstd_ldm_geartab.h +0 -0
  68. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/compress/zstdmt_compress.h +0 -0
  69. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/decompress/zstd_ddict.c +0 -0
  70. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/decompress/zstd_ddict.h +0 -0
  71. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/zdict.h +0 -0
  72. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/lib/zstd_errors.h +0 -0
  73. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/zlibWrapper/gzcompatibility.h +0 -0
  74. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/zlibWrapper/gzguts.h +0 -0
  75. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/zlibWrapper/zstd_zlibwrapper.c +0 -0
  76. /data/ext/zstdlib_c/{zstd-1.5.5 → zstd-1.5.6}/zlibWrapper/zstd_zlibwrapper.h +0 -0
@@ -229,6 +229,7 @@ If there is an error, the function will return an error code, which can be teste
229
229
 
230
230
  #endif /* FSE_H */
231
231
 
232
+
232
233
  #if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
233
234
  #define FSE_H_FSE_STATIC_LINKING_ONLY
234
235
 
@@ -464,13 +465,13 @@ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, un
464
465
  FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
465
466
  const U16* const stateTable = (const U16*)(statePtr->stateTable);
466
467
  U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
467
- BIT_addBits(bitC, statePtr->value, nbBitsOut);
468
+ BIT_addBits(bitC, (size_t)statePtr->value, nbBitsOut);
468
469
  statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
469
470
  }
470
471
 
471
472
  MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
472
473
  {
473
- BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
474
+ BIT_addBits(bitC, (size_t)statePtr->value, statePtr->stateLog);
474
475
  BIT_flushBits(bitC);
475
476
  }
476
477
 
@@ -22,8 +22,7 @@
22
22
  #define FSE_STATIC_LINKING_ONLY
23
23
  #include "fse.h"
24
24
  #include "error_private.h"
25
- #define ZSTD_DEPS_NEED_MALLOC
26
- #include "zstd_deps.h"
25
+ #include "zstd_deps.h" /* ZSTD_memcpy */
27
26
  #include "bits.h" /* ZSTD_highbit32 */
28
27
 
29
28
 
@@ -84,7 +83,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
84
83
  symbolNext[s] = 1;
85
84
  } else {
86
85
  if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
87
- symbolNext[s] = normalizedCounter[s];
86
+ symbolNext[s] = (U16)normalizedCounter[s];
88
87
  } } }
89
88
  ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
90
89
  }
@@ -99,8 +98,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
99
98
  * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
100
99
  * our buffer to handle the over-write.
101
100
  */
102
- {
103
- U64 const add = 0x0101010101010101ull;
101
+ { U64 const add = 0x0101010101010101ull;
104
102
  size_t pos = 0;
105
103
  U64 sv = 0;
106
104
  U32 s;
@@ -111,9 +109,8 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
111
109
  for (i = 8; i < n; i += 8) {
112
110
  MEM_write64(spread + pos + i, sv);
113
111
  }
114
- pos += n;
115
- }
116
- }
112
+ pos += (size_t)n;
113
+ } }
117
114
  /* Now we spread those positions across the table.
118
115
  * The benefit of doing it in two stages is that we avoid the
119
116
  * variable size inner loop, which caused lots of branch misses.
@@ -232,12 +229,12 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
232
229
  break;
233
230
  } }
234
231
 
235
- return op-ostart;
232
+ assert(op >= ostart);
233
+ return (size_t)(op-ostart);
236
234
  }
237
235
 
238
236
  typedef struct {
239
237
  short ncount[FSE_MAX_SYMBOL_VALUE + 1];
240
- FSE_DTable dtable[1]; /* Dynamically sized */
241
238
  } FSE_DecompressWksp;
242
239
 
243
240
 
@@ -252,13 +249,18 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
252
249
  unsigned tableLog;
253
250
  unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
254
251
  FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
252
+ size_t const dtablePos = sizeof(FSE_DecompressWksp) / sizeof(FSE_DTable);
253
+ FSE_DTable* const dtable = (FSE_DTable*)workSpace + dtablePos;
255
254
 
256
- DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
255
+ FSE_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
257
256
  if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
258
257
 
258
+ /* correct offset to dtable depends on this property */
259
+ FSE_STATIC_ASSERT(sizeof(FSE_DecompressWksp) % sizeof(FSE_DTable) == 0);
260
+
259
261
  /* normal FSE decoding mode */
260
- {
261
- size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
262
+ { size_t const NCountLength =
263
+ FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
262
264
  if (FSE_isError(NCountLength)) return NCountLength;
263
265
  if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
264
266
  assert(NCountLength <= cSrcSize);
@@ -271,16 +273,16 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
271
273
  workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
272
274
  wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
273
275
 
274
- CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
276
+ CHECK_F( FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
275
277
 
276
278
  {
277
- const void* ptr = wksp->dtable;
279
+ const void* ptr = dtable;
278
280
  const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
279
281
  const U32 fastMode = DTableH->fastMode;
280
282
 
281
283
  /* select fast mode (static) */
282
- if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
283
- return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
284
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1);
285
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
284
286
  }
285
287
  }
286
288
 
@@ -197,9 +197,22 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
197
197
 
198
198
  /** HUF_getNbBitsFromCTable() :
199
199
  * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
200
- * Note 1 : is not inlined, as HUF_CElt definition is private */
200
+ * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0
201
+ * Note 2 : is not inlined, as HUF_CElt definition is private
202
+ */
201
203
  U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
202
204
 
205
+ typedef struct {
206
+ BYTE tableLog;
207
+ BYTE maxSymbolValue;
208
+ BYTE unused[sizeof(size_t) - 2];
209
+ } HUF_CTableHeader;
210
+
211
+ /** HUF_readCTableHeader() :
212
+ * @returns The header from the CTable specifying the tableLog and the maxSymbolValue.
213
+ */
214
+ HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable);
215
+
203
216
  /*
204
217
  * HUF_decompress() does the following:
205
218
  * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
@@ -31,15 +31,6 @@ extern "C" {
31
31
  # include <stdlib.h> /* _byteswap_ulong */
32
32
  # include <intrin.h> /* _byteswap_* */
33
33
  #endif
34
- #if defined(__GNUC__)
35
- # define MEM_STATIC static __inline __attribute__((unused))
36
- #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
37
- # define MEM_STATIC static inline
38
- #elif defined(_MSC_VER)
39
- # define MEM_STATIC static __inline
40
- #else
41
- # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
42
- #endif
43
34
 
44
35
  /*-**************************************************************
45
36
  * Basic Types
@@ -223,7 +223,7 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
223
223
  { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
224
224
  if (!threadPool) return 1;
225
225
  /* replace existing thread pool */
226
- ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
226
+ ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(ZSTD_pthread_t));
227
227
  ZSTD_customFree(ctx->threads, ctx->customMem);
228
228
  ctx->threads = threadPool;
229
229
  /* Initialize additional threads */
@@ -47,7 +47,7 @@ void POOL_joinJobs(POOL_ctx* ctx);
47
47
  /*! POOL_resize() :
48
48
  * Expands or shrinks pool's number of threads.
49
49
  * This is more efficient than releasing + creating a new context,
50
- * since it tries to preserve and re-use existing threads.
50
+ * since it tries to preserve and reuse existing threads.
51
51
  * `numThreads` must be at least 1.
52
52
  * @return : 0 when resize was successful,
53
53
  * !0 (typically 1) if there is an error.
@@ -68,6 +68,8 @@
68
68
  /* Mark the internal assembly functions as hidden */
69
69
  #ifdef __ELF__
70
70
  # define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
71
+ #elif defined(__APPLE__)
72
+ # define ZSTD_HIDE_ASM_FUNCTION(func) .private_extern func
71
73
  #else
72
74
  # define ZSTD_HIDE_ASM_FUNCTION(func)
73
75
  #endif
@@ -73,10 +73,12 @@ int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
73
73
  ZSTD_thread_params_t thread_param;
74
74
  (void)unused;
75
75
 
76
+ if (thread==NULL) return -1;
77
+ *thread = NULL;
78
+
76
79
  thread_param.start_routine = start_routine;
77
80
  thread_param.arg = arg;
78
81
  thread_param.initialized = 0;
79
- *thread = NULL;
80
82
 
81
83
  /* Setup thread initialization synchronization */
82
84
  if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) {
@@ -91,7 +93,7 @@ int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
91
93
 
92
94
  /* Spawn thread */
93
95
  *thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL);
94
- if (!thread) {
96
+ if (*thread==NULL) {
95
97
  ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
96
98
  ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
97
99
  return errno;
@@ -137,6 +139,7 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread)
137
139
 
138
140
  int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
139
141
  {
142
+ assert(mutex != NULL);
140
143
  *mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t));
141
144
  if (!*mutex)
142
145
  return 1;
@@ -145,6 +148,7 @@ int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t con
145
148
 
146
149
  int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
147
150
  {
151
+ assert(mutex != NULL);
148
152
  if (!*mutex)
149
153
  return 0;
150
154
  {
@@ -156,6 +160,7 @@ int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
156
160
 
157
161
  int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
158
162
  {
163
+ assert(cond != NULL);
159
164
  *cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t));
160
165
  if (!*cond)
161
166
  return 1;
@@ -164,6 +169,7 @@ int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const*
164
169
 
165
170
  int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
166
171
  {
172
+ assert(cond != NULL);
167
173
  if (!*cond)
168
174
  return 0;
169
175
  {
@@ -1,24 +1,18 @@
1
1
  /*
2
- * xxHash - Fast Hash algorithm
3
- * Copyright (c) Meta Platforms, Inc. and affiliates.
4
- *
5
- * You can contact the author at :
6
- * - xxHash homepage: https://cyan4973.github.io/xxHash/
7
- * - xxHash source repository : https://github.com/Cyan4973/xxHash
2
+ * xxHash - Extremely Fast Hash algorithm
3
+ * Copyright (c) Yann Collet - Meta Platforms, Inc
8
4
  *
9
5
  * This source code is licensed under both the BSD-style license (found in the
10
6
  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11
7
  * in the COPYING file in the root directory of this source tree).
12
8
  * You may select, at your option, one of the above-listed licenses.
13
- */
14
-
15
-
9
+ */
16
10
 
17
11
  /*
18
12
  * xxhash.c instantiates functions defined in xxhash.h
19
13
  */
20
14
 
21
- #define XXH_STATIC_LINKING_ONLY /* access advanced declarations */
22
- #define XXH_IMPLEMENTATION /* access definitions */
15
+ #define XXH_STATIC_LINKING_ONLY /* access advanced declarations */
16
+ #define XXH_IMPLEMENTATION /* access definitions */
23
17
 
24
18
  #include "xxhash.h"