extlz4 0.2.5 → 0.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. checksums.yaml +4 -4
  2. data/HISTORY.ja.md +16 -1
  3. data/README.md +49 -51
  4. data/Rakefile +22 -0
  5. data/bin/extlz4 +1 -1
  6. data/contrib/lz4/LICENSE +2 -1
  7. data/contrib/lz4/Makefile.inc +111 -0
  8. data/contrib/lz4/NEWS +97 -0
  9. data/contrib/lz4/README.md +41 -36
  10. data/contrib/lz4/build/README.md +55 -0
  11. data/contrib/lz4/build/VS2010/datagen/datagen.vcxproj +169 -0
  12. data/contrib/lz4/build/VS2010/frametest/frametest.vcxproj +176 -0
  13. data/contrib/lz4/build/VS2010/fullbench/fullbench.vcxproj +176 -0
  14. data/contrib/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +180 -0
  15. data/contrib/lz4/build/VS2010/fuzzer/fuzzer.vcxproj +173 -0
  16. data/contrib/lz4/build/VS2010/liblz4/liblz4.vcxproj +175 -0
  17. data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc +51 -0
  18. data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj +179 -0
  19. data/contrib/lz4/build/VS2010/lz4/lz4.rc +51 -0
  20. data/contrib/lz4/build/VS2010/lz4/lz4.vcxproj +189 -0
  21. data/contrib/lz4/build/VS2010/lz4.sln +98 -0
  22. data/contrib/lz4/build/VS2017/datagen/datagen.vcxproj +173 -0
  23. data/contrib/lz4/build/VS2017/frametest/frametest.vcxproj +180 -0
  24. data/contrib/lz4/build/VS2017/fullbench/fullbench.vcxproj +180 -0
  25. data/contrib/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj +184 -0
  26. data/contrib/lz4/build/VS2017/fuzzer/fuzzer.vcxproj +177 -0
  27. data/contrib/lz4/build/VS2017/liblz4/liblz4.vcxproj +179 -0
  28. data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc +51 -0
  29. data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj +183 -0
  30. data/contrib/lz4/build/VS2017/lz4/lz4.rc +51 -0
  31. data/contrib/lz4/build/VS2017/lz4/lz4.vcxproj +175 -0
  32. data/contrib/lz4/build/VS2017/lz4.sln +103 -0
  33. data/contrib/lz4/build/VS2022/datagen/datagen.vcxproj +173 -0
  34. data/contrib/lz4/build/VS2022/frametest/frametest.vcxproj +180 -0
  35. data/contrib/lz4/build/VS2022/fullbench/fullbench.vcxproj +180 -0
  36. data/contrib/lz4/build/VS2022/fullbench-dll/fullbench-dll.vcxproj +184 -0
  37. data/contrib/lz4/build/VS2022/fuzzer/fuzzer.vcxproj +177 -0
  38. data/contrib/lz4/build/VS2022/liblz4/liblz4.vcxproj +179 -0
  39. data/contrib/lz4/build/VS2022/liblz4-dll/liblz4-dll.rc +51 -0
  40. data/contrib/lz4/build/VS2022/liblz4-dll/liblz4-dll.vcxproj +183 -0
  41. data/contrib/lz4/build/VS2022/lz4.sln +103 -0
  42. data/contrib/lz4/build/cmake/CMakeLists.txt +273 -0
  43. data/contrib/lz4/build/cmake/lz4Config.cmake.in +2 -0
  44. data/contrib/lz4/lib/LICENSE +1 -1
  45. data/contrib/lz4/lib/README.md +111 -15
  46. data/contrib/lz4/lib/liblz4-dll.rc.in +35 -0
  47. data/contrib/lz4/lib/liblz4.pc.in +3 -3
  48. data/contrib/lz4/lib/lz4.c +1891 -733
  49. data/contrib/lz4/lib/lz4.h +597 -234
  50. data/contrib/lz4/lib/lz4file.c +311 -0
  51. data/contrib/lz4/lib/lz4file.h +93 -0
  52. data/contrib/lz4/lib/lz4frame.c +896 -493
  53. data/contrib/lz4/lib/lz4frame.h +408 -107
  54. data/contrib/lz4/lib/lz4frame_static.h +5 -112
  55. data/contrib/lz4/lib/lz4hc.c +1039 -301
  56. data/contrib/lz4/lib/lz4hc.h +264 -123
  57. data/contrib/lz4/lib/xxhash.c +376 -240
  58. data/contrib/lz4/lib/xxhash.h +128 -93
  59. data/contrib/lz4/ossfuzz/Makefile +79 -0
  60. data/contrib/lz4/ossfuzz/compress_frame_fuzzer.c +48 -0
  61. data/contrib/lz4/ossfuzz/compress_fuzzer.c +58 -0
  62. data/contrib/lz4/ossfuzz/compress_hc_fuzzer.c +64 -0
  63. data/contrib/lz4/ossfuzz/decompress_frame_fuzzer.c +75 -0
  64. data/contrib/lz4/ossfuzz/decompress_fuzzer.c +78 -0
  65. data/contrib/lz4/ossfuzz/fuzz.h +48 -0
  66. data/contrib/lz4/ossfuzz/fuzz_data_producer.c +77 -0
  67. data/contrib/lz4/ossfuzz/fuzz_data_producer.h +36 -0
  68. data/contrib/lz4/ossfuzz/fuzz_helpers.h +95 -0
  69. data/contrib/lz4/ossfuzz/lz4_helpers.c +51 -0
  70. data/contrib/lz4/ossfuzz/lz4_helpers.h +13 -0
  71. data/contrib/lz4/ossfuzz/ossfuzz.sh +23 -0
  72. data/contrib/lz4/ossfuzz/round_trip_frame_fuzzer.c +43 -0
  73. data/contrib/lz4/ossfuzz/round_trip_frame_uncompressed_fuzzer.c +134 -0
  74. data/contrib/lz4/ossfuzz/round_trip_fuzzer.c +117 -0
  75. data/contrib/lz4/ossfuzz/round_trip_hc_fuzzer.c +44 -0
  76. data/contrib/lz4/ossfuzz/round_trip_stream_fuzzer.c +302 -0
  77. data/contrib/lz4/ossfuzz/standaloneengine.c +74 -0
  78. data/contrib/lz4/ossfuzz/travisoss.sh +26 -0
  79. data/ext/blockapi.c +13 -48
  80. data/ext/extlz4.c +2 -0
  81. data/ext/extlz4.h +17 -0
  82. data/ext/frameapi.c +3 -14
  83. data/ext/hashargs.c +9 -3
  84. data/ext/hashargs.h +1 -1
  85. data/ext/lz4_amalgam.c +0 -23
  86. data/gemstub.rb +5 -16
  87. data/lib/extlz4/oldstream.rb +1 -1
  88. data/lib/extlz4.rb +51 -3
  89. data/test/common.rb +2 -2
  90. metadata +84 -16
  91. data/contrib/lz4/circle.yml +0 -38
  92. data/contrib/lz4/lib/lz4opt.h +0 -356
  93. data/lib/extlz4/version.rb +0 -3
@@ -50,20 +50,26 @@
50
50
  * Prefer these methods in priority order (0 > 1 > 2)
51
51
  */
52
52
  #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
53
- # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
53
+ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
54
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
55
+ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
54
56
  # define XXH_FORCE_MEMORY_ACCESS 2
55
- # elif defined(__INTEL_COMPILER) || \
56
- (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
57
+ # elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
58
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
59
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
60
+ || defined(__ARM_ARCH_7S__) ))
57
61
  # define XXH_FORCE_MEMORY_ACCESS 1
58
62
  # endif
59
63
  #endif
60
64
 
61
65
  /*!XXH_ACCEPT_NULL_INPUT_POINTER :
62
- * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
63
- * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
64
- * By default, this option is disabled. To enable it, uncomment below define :
66
+ * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
67
+ * When this macro is enabled, xxHash actively checks input for null pointer.
68
+ * It it is, result for null input pointers is the same as a null-length input.
65
69
  */
66
- /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
70
+ #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
71
+ # define XXH_ACCEPT_NULL_INPUT_POINTER 0
72
+ #endif
67
73
 
68
74
  /*!XXH_FORCE_NATIVE_FORMAT :
69
75
  * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
@@ -80,8 +86,9 @@
80
86
  /*!XXH_FORCE_ALIGN_CHECK :
81
87
  * This is a minor performance trick, only useful with lots of very small keys.
82
88
  * It means : check for aligned/unaligned input.
83
- * The check costs one initial branch per hash; set to 0 when the input data
84
- * is guaranteed to be aligned.
89
+ * The check costs one initial branch per hash;
90
+ * set it to 0 when the input is guaranteed to be aligned,
91
+ * or when alignment doesn't matter for performance.
85
92
  */
86
93
  #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
87
94
  # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
@@ -104,6 +111,8 @@ static void XXH_free (void* p) { free(p); }
104
111
  #include <string.h>
105
112
  static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
106
113
 
114
+ #include <assert.h> /* assert */
115
+
107
116
  #define XXH_STATIC_LINKING_ONLY
108
117
  #include "xxhash.h"
109
118
 
@@ -113,40 +122,35 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
113
122
  ***************************************/
114
123
  #ifdef _MSC_VER /* Visual Studio */
115
124
  # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
116
- #endif
117
-
118
- #ifndef XXH_FORCE_INLINE
119
- # ifdef _MSC_VER /* Visual Studio */
120
- # define XXH_FORCE_INLINE static __forceinline
121
- # else
122
- # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
123
- # ifdef __GNUC__
124
- # define XXH_FORCE_INLINE static inline __attribute__((always_inline))
125
- # else
126
- # define XXH_FORCE_INLINE static inline
127
- # endif
125
+ # define FORCE_INLINE static __forceinline
126
+ #else
127
+ # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
128
+ # ifdef __GNUC__
129
+ # define FORCE_INLINE static inline __attribute__((always_inline))
128
130
  # else
129
- # define XXH_FORCE_INLINE static
130
- # endif /* __STDC_VERSION__ */
131
- # endif /* _MSC_VER */
132
- #endif /* XXH_FORCE_INLINE */
131
+ # define FORCE_INLINE static inline
132
+ # endif
133
+ # else
134
+ # define FORCE_INLINE static
135
+ # endif /* __STDC_VERSION__ */
136
+ #endif
133
137
 
134
138
 
135
139
  /* *************************************
136
140
  * Basic Types
137
141
  ***************************************/
138
142
  #ifndef MEM_MODULE
139
- # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
143
+ # if !defined (__VMS) \
144
+ && (defined (__cplusplus) \
145
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
140
146
  # include <stdint.h>
141
147
  typedef uint8_t BYTE;
142
148
  typedef uint16_t U16;
143
149
  typedef uint32_t U32;
144
- typedef int32_t S32;
145
150
  # else
146
151
  typedef unsigned char BYTE;
147
152
  typedef unsigned short U16;
148
153
  typedef unsigned int U32;
149
- typedef signed int S32;
150
154
  # endif
151
155
  #endif
152
156
 
@@ -213,8 +217,12 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
213
217
 
214
218
  /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
215
219
  #ifndef XXH_CPU_LITTLE_ENDIAN
216
- static const int g_one = 1;
217
- # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
220
+ static int XXH_isLittleEndian(void)
221
+ {
222
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
223
+ return one.c[0];
224
+ }
225
+ # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
218
226
  #endif
219
227
 
220
228
 
@@ -223,7 +231,7 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
223
231
  *****************************/
224
232
  typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
225
233
 
226
- XXH_FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
234
+ FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
227
235
  {
228
236
  if (align==XXH_unaligned)
229
237
  return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
@@ -231,7 +239,7 @@ XXH_FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, X
231
239
  return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
232
240
  }
233
241
 
234
- XXH_FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
242
+ FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
235
243
  {
236
244
  return XXH_readLE32_align(ptr, endian, XXH_unaligned);
237
245
  }
@@ -245,12 +253,12 @@ static U32 XXH_readBE32(const void* ptr)
245
253
  /* *************************************
246
254
  * Macros
247
255
  ***************************************/
248
- #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
256
+ #define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */
249
257
  XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
250
258
 
251
259
 
252
260
  /* *******************************************************************
253
- * 32-bits hash functions
261
+ * 32-bit hash functions
254
262
  *********************************************************************/
255
263
  static const U32 PRIME32_1 = 2654435761U;
256
264
  static const U32 PRIME32_2 = 2246822519U;
@@ -266,14 +274,89 @@ static U32 XXH32_round(U32 seed, U32 input)
266
274
  return seed;
267
275
  }
268
276
 
269
- XXH_FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
277
+ /* mix all bits */
278
+ static U32 XXH32_avalanche(U32 h32)
279
+ {
280
+ h32 ^= h32 >> 15;
281
+ h32 *= PRIME32_2;
282
+ h32 ^= h32 >> 13;
283
+ h32 *= PRIME32_3;
284
+ h32 ^= h32 >> 16;
285
+ return(h32);
286
+ }
287
+
288
+ #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
289
+
290
+ static U32
291
+ XXH32_finalize(U32 h32, const void* ptr, size_t len,
292
+ XXH_endianess endian, XXH_alignment align)
293
+
294
+ {
295
+ const BYTE* p = (const BYTE*)ptr;
296
+
297
+ #define PROCESS1 \
298
+ h32 += (*p++) * PRIME32_5; \
299
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
300
+
301
+ #define PROCESS4 \
302
+ h32 += XXH_get32bits(p) * PRIME32_3; \
303
+ p+=4; \
304
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
305
+
306
+ switch(len&15) /* or switch(bEnd - p) */
307
+ {
308
+ case 12: PROCESS4;
309
+ /* fallthrough */
310
+ case 8: PROCESS4;
311
+ /* fallthrough */
312
+ case 4: PROCESS4;
313
+ return XXH32_avalanche(h32);
314
+
315
+ case 13: PROCESS4;
316
+ /* fallthrough */
317
+ case 9: PROCESS4;
318
+ /* fallthrough */
319
+ case 5: PROCESS4;
320
+ PROCESS1;
321
+ return XXH32_avalanche(h32);
322
+
323
+ case 14: PROCESS4;
324
+ /* fallthrough */
325
+ case 10: PROCESS4;
326
+ /* fallthrough */
327
+ case 6: PROCESS4;
328
+ PROCESS1;
329
+ PROCESS1;
330
+ return XXH32_avalanche(h32);
331
+
332
+ case 15: PROCESS4;
333
+ /* fallthrough */
334
+ case 11: PROCESS4;
335
+ /* fallthrough */
336
+ case 7: PROCESS4;
337
+ /* fallthrough */
338
+ case 3: PROCESS1;
339
+ /* fallthrough */
340
+ case 2: PROCESS1;
341
+ /* fallthrough */
342
+ case 1: PROCESS1;
343
+ /* fallthrough */
344
+ case 0: return XXH32_avalanche(h32);
345
+ }
346
+ assert(0);
347
+ return h32; /* reaching this point is deemed impossible */
348
+ }
349
+
350
+
351
+ FORCE_INLINE U32
352
+ XXH32_endian_align(const void* input, size_t len, U32 seed,
353
+ XXH_endianess endian, XXH_alignment align)
270
354
  {
271
355
  const BYTE* p = (const BYTE*)input;
272
356
  const BYTE* bEnd = p + len;
273
357
  U32 h32;
274
- #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
275
358
 
276
- #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
359
+ #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
277
360
  if (p==NULL) {
278
361
  len=0;
279
362
  bEnd=p=(const BYTE*)(size_t)16;
@@ -281,7 +364,7 @@ XXH_FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed,
281
364
  #endif
282
365
 
283
366
  if (len>=16) {
284
- const BYTE* const limit = bEnd - 16;
367
+ const BYTE* const limit = bEnd - 15;
285
368
  U32 v1 = seed + PRIME32_1 + PRIME32_2;
286
369
  U32 v2 = seed + PRIME32_2;
287
370
  U32 v3 = seed + 0;
@@ -292,34 +375,17 @@ XXH_FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed,
292
375
  v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
293
376
  v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
294
377
  v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
295
- } while (p<=limit);
378
+ } while (p < limit);
296
379
 
297
- h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
380
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
381
+ + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
298
382
  } else {
299
383
  h32 = seed + PRIME32_5;
300
384
  }
301
385
 
302
- h32 += (U32) len;
303
-
304
- while (p+4<=bEnd) {
305
- h32 += XXH_get32bits(p) * PRIME32_3;
306
- h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
307
- p+=4;
308
- }
309
-
310
- while (p<bEnd) {
311
- h32 += (*p) * PRIME32_5;
312
- h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
313
- p++;
314
- }
315
-
316
- h32 ^= h32 >> 15;
317
- h32 *= PRIME32_2;
318
- h32 ^= h32 >> 13;
319
- h32 *= PRIME32_3;
320
- h32 ^= h32 >> 16;
386
+ h32 += (U32)len;
321
387
 
322
- return h32;
388
+ return XXH32_finalize(h32, p, len&15, endian, align);
323
389
  }
324
390
 
325
391
 
@@ -371,74 +437,81 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t
371
437
  XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
372
438
  {
373
439
  XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
374
- memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
440
+ memset(&state, 0, sizeof(state));
375
441
  state.v1 = seed + PRIME32_1 + PRIME32_2;
376
442
  state.v2 = seed + PRIME32_2;
377
443
  state.v3 = seed + 0;
378
444
  state.v4 = seed - PRIME32_1;
379
- memcpy(statePtr, &state, sizeof(state));
445
+ /* do not write into reserved, planned to be removed in a future version */
446
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
380
447
  return XXH_OK;
381
448
  }
382
449
 
383
450
 
384
- XXH_FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
451
+ FORCE_INLINE XXH_errorcode
452
+ XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
385
453
  {
386
- const BYTE* p = (const BYTE*)input;
387
- const BYTE* const bEnd = p + len;
388
-
389
- #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
390
- if (input==NULL) return XXH_ERROR;
454
+ if (input==NULL)
455
+ #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
456
+ return XXH_OK;
457
+ #else
458
+ return XXH_ERROR;
391
459
  #endif
392
460
 
393
- state->total_len_32 += (unsigned)len;
394
- state->large_len |= (len>=16) | (state->total_len_32>=16);
461
+ { const BYTE* p = (const BYTE*)input;
462
+ const BYTE* const bEnd = p + len;
395
463
 
396
- if (state->memsize + len < 16) { /* fill in tmp buffer */
397
- XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
398
- state->memsize += (unsigned)len;
399
- return XXH_OK;
400
- }
464
+ state->total_len_32 += (unsigned)len;
465
+ state->large_len |= (len>=16) | (state->total_len_32>=16);
401
466
 
402
- if (state->memsize) { /* some data left from previous update */
403
- XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
404
- { const U32* p32 = state->mem32;
405
- state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
406
- state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
407
- state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
408
- state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
467
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
468
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
469
+ state->memsize += (unsigned)len;
470
+ return XXH_OK;
409
471
  }
410
- p += 16-state->memsize;
411
- state->memsize = 0;
412
- }
413
-
414
- if (p <= bEnd-16) {
415
- const BYTE* const limit = bEnd - 16;
416
- U32 v1 = state->v1;
417
- U32 v2 = state->v2;
418
- U32 v3 = state->v3;
419
- U32 v4 = state->v4;
420
472
 
421
- do {
422
- v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
423
- v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
424
- v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
425
- v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
426
- } while (p<=limit);
473
+ if (state->memsize) { /* some data left from previous update */
474
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
475
+ { const U32* p32 = state->mem32;
476
+ state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
477
+ state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
478
+ state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
479
+ state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian));
480
+ }
481
+ p += 16-state->memsize;
482
+ state->memsize = 0;
483
+ }
427
484
 
428
- state->v1 = v1;
429
- state->v2 = v2;
430
- state->v3 = v3;
431
- state->v4 = v4;
432
- }
485
+ if (p <= bEnd-16) {
486
+ const BYTE* const limit = bEnd - 16;
487
+ U32 v1 = state->v1;
488
+ U32 v2 = state->v2;
489
+ U32 v3 = state->v3;
490
+ U32 v4 = state->v4;
491
+
492
+ do {
493
+ v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
494
+ v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
495
+ v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
496
+ v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
497
+ } while (p<=limit);
498
+
499
+ state->v1 = v1;
500
+ state->v2 = v2;
501
+ state->v3 = v3;
502
+ state->v4 = v4;
503
+ }
433
504
 
434
- if (p < bEnd) {
435
- XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
436
- state->memsize = (unsigned)(bEnd-p);
505
+ if (p < bEnd) {
506
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
507
+ state->memsize = (unsigned)(bEnd-p);
508
+ }
437
509
  }
438
510
 
439
511
  return XXH_OK;
440
512
  }
441
513
 
514
+
442
515
  XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
443
516
  {
444
517
  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
@@ -450,40 +523,23 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void*
450
523
  }
451
524
 
452
525
 
453
-
454
- XXH_FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
526
+ FORCE_INLINE U32
527
+ XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
455
528
  {
456
- const BYTE * p = (const BYTE*)state->mem32;
457
- const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
458
529
  U32 h32;
459
530
 
460
531
  if (state->large_len) {
461
- h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
532
+ h32 = XXH_rotl32(state->v1, 1)
533
+ + XXH_rotl32(state->v2, 7)
534
+ + XXH_rotl32(state->v3, 12)
535
+ + XXH_rotl32(state->v4, 18);
462
536
  } else {
463
537
  h32 = state->v3 /* == seed */ + PRIME32_5;
464
538
  }
465
539
 
466
540
  h32 += state->total_len_32;
467
541
 
468
- while (p+4<=bEnd) {
469
- h32 += XXH_readLE32(p, endian) * PRIME32_3;
470
- h32 = XXH_rotl32(h32, 17) * PRIME32_4;
471
- p+=4;
472
- }
473
-
474
- while (p<bEnd) {
475
- h32 += (*p) * PRIME32_5;
476
- h32 = XXH_rotl32(h32, 11) * PRIME32_1;
477
- p++;
478
- }
479
-
480
- h32 ^= h32 >> 15;
481
- h32 *= PRIME32_2;
482
- h32 ^= h32 >> 13;
483
- h32 *= PRIME32_3;
484
- h32 ^= h32 >> 16;
485
-
486
- return h32;
542
+ return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned);
487
543
  }
488
544
 
489
545
 
@@ -503,7 +559,7 @@ XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
503
559
  /*! Default XXH result types are basic unsigned 32 and 64 bits.
504
560
  * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
505
561
  * These functions allow transformation of hash result into and from its canonical format.
506
- * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
562
+ * This way, hash values can be written into a file or buffer, remaining comparable across different systems.
507
563
  */
508
564
 
509
565
  XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
@@ -522,18 +578,21 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
522
578
  #ifndef XXH_NO_LONG_LONG
523
579
 
524
580
  /* *******************************************************************
525
- * 64-bits hash functions
581
+ * 64-bit hash functions
526
582
  *********************************************************************/
527
583
 
528
584
  /*====== Memory access ======*/
529
585
 
530
586
  #ifndef MEM_MODULE
531
587
  # define MEM_MODULE
532
- # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
588
+ # if !defined (__VMS) \
589
+ && (defined (__cplusplus) \
590
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
533
591
  # include <stdint.h>
534
592
  typedef uint64_t U64;
535
593
  # else
536
- typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
594
+ /* if compiler doesn't support unsigned long long, replace by another 64-bit type */
595
+ typedef unsigned long long U64;
537
596
  # endif
538
597
  #endif
539
598
 
@@ -583,7 +642,7 @@ static U64 XXH_swap64 (U64 x)
583
642
  }
584
643
  #endif
585
644
 
586
- XXH_FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
645
+ FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
587
646
  {
588
647
  if (align==XXH_unaligned)
589
648
  return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
@@ -591,7 +650,7 @@ XXH_FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, X
591
650
  return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
592
651
  }
593
652
 
594
- XXH_FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
653
+ FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
595
654
  {
596
655
  return XXH_readLE64_align(ptr, endian, XXH_unaligned);
597
656
  }
@@ -626,14 +685,137 @@ static U64 XXH64_mergeRound(U64 acc, U64 val)
626
685
  return acc;
627
686
  }
628
687
 
629
- XXH_FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
688
+ static U64 XXH64_avalanche(U64 h64)
689
+ {
690
+ h64 ^= h64 >> 33;
691
+ h64 *= PRIME64_2;
692
+ h64 ^= h64 >> 29;
693
+ h64 *= PRIME64_3;
694
+ h64 ^= h64 >> 32;
695
+ return h64;
696
+ }
697
+
698
+
699
+ #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
700
+
701
+ static U64
702
+ XXH64_finalize(U64 h64, const void* ptr, size_t len,
703
+ XXH_endianess endian, XXH_alignment align)
704
+ {
705
+ const BYTE* p = (const BYTE*)ptr;
706
+
707
+ #define PROCESS1_64 \
708
+ h64 ^= (*p++) * PRIME64_5; \
709
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
710
+
711
+ #define PROCESS4_64 \
712
+ h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
713
+ p+=4; \
714
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
715
+
716
+ #define PROCESS8_64 { \
717
+ U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
718
+ p+=8; \
719
+ h64 ^= k1; \
720
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
721
+ }
722
+
723
+ switch(len&31) {
724
+ case 24: PROCESS8_64;
725
+ /* fallthrough */
726
+ case 16: PROCESS8_64;
727
+ /* fallthrough */
728
+ case 8: PROCESS8_64;
729
+ return XXH64_avalanche(h64);
730
+
731
+ case 28: PROCESS8_64;
732
+ /* fallthrough */
733
+ case 20: PROCESS8_64;
734
+ /* fallthrough */
735
+ case 12: PROCESS8_64;
736
+ /* fallthrough */
737
+ case 4: PROCESS4_64;
738
+ return XXH64_avalanche(h64);
739
+
740
+ case 25: PROCESS8_64;
741
+ /* fallthrough */
742
+ case 17: PROCESS8_64;
743
+ /* fallthrough */
744
+ case 9: PROCESS8_64;
745
+ PROCESS1_64;
746
+ return XXH64_avalanche(h64);
747
+
748
+ case 29: PROCESS8_64;
749
+ /* fallthrough */
750
+ case 21: PROCESS8_64;
751
+ /* fallthrough */
752
+ case 13: PROCESS8_64;
753
+ /* fallthrough */
754
+ case 5: PROCESS4_64;
755
+ PROCESS1_64;
756
+ return XXH64_avalanche(h64);
757
+
758
+ case 26: PROCESS8_64;
759
+ /* fallthrough */
760
+ case 18: PROCESS8_64;
761
+ /* fallthrough */
762
+ case 10: PROCESS8_64;
763
+ PROCESS1_64;
764
+ PROCESS1_64;
765
+ return XXH64_avalanche(h64);
766
+
767
+ case 30: PROCESS8_64;
768
+ /* fallthrough */
769
+ case 22: PROCESS8_64;
770
+ /* fallthrough */
771
+ case 14: PROCESS8_64;
772
+ /* fallthrough */
773
+ case 6: PROCESS4_64;
774
+ PROCESS1_64;
775
+ PROCESS1_64;
776
+ return XXH64_avalanche(h64);
777
+
778
+ case 27: PROCESS8_64;
779
+ /* fallthrough */
780
+ case 19: PROCESS8_64;
781
+ /* fallthrough */
782
+ case 11: PROCESS8_64;
783
+ PROCESS1_64;
784
+ PROCESS1_64;
785
+ PROCESS1_64;
786
+ return XXH64_avalanche(h64);
787
+
788
+ case 31: PROCESS8_64;
789
+ /* fallthrough */
790
+ case 23: PROCESS8_64;
791
+ /* fallthrough */
792
+ case 15: PROCESS8_64;
793
+ /* fallthrough */
794
+ case 7: PROCESS4_64;
795
+ /* fallthrough */
796
+ case 3: PROCESS1_64;
797
+ /* fallthrough */
798
+ case 2: PROCESS1_64;
799
+ /* fallthrough */
800
+ case 1: PROCESS1_64;
801
+ /* fallthrough */
802
+ case 0: return XXH64_avalanche(h64);
803
+ }
804
+
805
+ /* impossible to reach */
806
+ assert(0);
807
+ return 0; /* unreachable, but some compilers complain without it */
808
+ }
809
+
810
+ FORCE_INLINE U64
811
+ XXH64_endian_align(const void* input, size_t len, U64 seed,
812
+ XXH_endianess endian, XXH_alignment align)
630
813
  {
631
814
  const BYTE* p = (const BYTE*)input;
632
- const BYTE* const bEnd = p + len;
815
+ const BYTE* bEnd = p + len;
633
816
  U64 h64;
634
- #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
635
817
 
636
- #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
818
+ #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
637
819
  if (p==NULL) {
638
820
  len=0;
639
821
  bEnd=p=(const BYTE*)(size_t)32;
@@ -666,32 +848,7 @@ XXH_FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed,
666
848
 
667
849
  h64 += (U64) len;
668
850
 
669
- while (p+8<=bEnd) {
670
- U64 const k1 = XXH64_round(0, XXH_get64bits(p));
671
- h64 ^= k1;
672
- h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
673
- p+=8;
674
- }
675
-
676
- if (p+4<=bEnd) {
677
- h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
678
- h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
679
- p+=4;
680
- }
681
-
682
- while (p<bEnd) {
683
- h64 ^= (*p) * PRIME64_5;
684
- h64 = XXH_rotl64(h64, 11) * PRIME64_1;
685
- p++;
686
- }
687
-
688
- h64 ^= h64 >> 33;
689
- h64 *= PRIME64_2;
690
- h64 ^= h64 >> 29;
691
- h64 *= PRIME64_3;
692
- h64 ^= h64 >> 32;
693
-
694
- return h64;
851
+ return XXH64_finalize(h64, p, len, endian, align);
695
852
  }
696
853
 
697
854
 
@@ -741,65 +898,71 @@ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t
741
898
  XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
742
899
  {
743
900
  XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
744
- memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
901
+ memset(&state, 0, sizeof(state));
745
902
  state.v1 = seed + PRIME64_1 + PRIME64_2;
746
903
  state.v2 = seed + PRIME64_2;
747
904
  state.v3 = seed + 0;
748
905
  state.v4 = seed - PRIME64_1;
749
- memcpy(statePtr, &state, sizeof(state));
906
+ /* do not write into reserved, planned to be removed in a future version */
907
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
750
908
  return XXH_OK;
751
909
  }
752
910
 
753
- XXH_FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
911
+ FORCE_INLINE XXH_errorcode
912
+ XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
754
913
  {
755
- const BYTE* p = (const BYTE*)input;
756
- const BYTE* const bEnd = p + len;
757
-
758
- #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
759
- if (input==NULL) return XXH_ERROR;
914
+ if (input==NULL)
915
+ #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
916
+ return XXH_OK;
917
+ #else
918
+ return XXH_ERROR;
760
919
  #endif
761
920
 
762
- state->total_len += len;
763
-
764
- if (state->memsize + len < 32) { /* fill in tmp buffer */
765
- XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
766
- state->memsize += (U32)len;
767
- return XXH_OK;
768
- }
921
+ { const BYTE* p = (const BYTE*)input;
922
+ const BYTE* const bEnd = p + len;
769
923
 
770
- if (state->memsize) { /* tmp buffer is full */
771
- XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
772
- state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
773
- state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
774
- state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
775
- state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
776
- p += 32-state->memsize;
777
- state->memsize = 0;
778
- }
924
+ state->total_len += len;
779
925
 
780
- if (p+32 <= bEnd) {
781
- const BYTE* const limit = bEnd - 32;
782
- U64 v1 = state->v1;
783
- U64 v2 = state->v2;
784
- U64 v3 = state->v3;
785
- U64 v4 = state->v4;
926
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
927
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
928
+ state->memsize += (U32)len;
929
+ return XXH_OK;
930
+ }
786
931
 
787
- do {
788
- v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
789
- v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
790
- v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
791
- v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
792
- } while (p<=limit);
932
+ if (state->memsize) { /* tmp buffer is full */
933
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
934
+ state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
935
+ state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
936
+ state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
937
+ state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
938
+ p += 32-state->memsize;
939
+ state->memsize = 0;
940
+ }
793
941
 
794
- state->v1 = v1;
795
- state->v2 = v2;
796
- state->v3 = v3;
797
- state->v4 = v4;
798
- }
942
+ if (p+32 <= bEnd) {
943
+ const BYTE* const limit = bEnd - 32;
944
+ U64 v1 = state->v1;
945
+ U64 v2 = state->v2;
946
+ U64 v3 = state->v3;
947
+ U64 v4 = state->v4;
948
+
949
+ do {
950
+ v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
951
+ v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
952
+ v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
953
+ v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
954
+ } while (p<=limit);
955
+
956
+ state->v1 = v1;
957
+ state->v2 = v2;
958
+ state->v3 = v3;
959
+ state->v4 = v4;
960
+ }
799
961
 
800
- if (p < bEnd) {
801
- XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
802
- state->memsize = (unsigned)(bEnd-p);
962
+ if (p < bEnd) {
963
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
964
+ state->memsize = (unsigned)(bEnd-p);
965
+ }
803
966
  }
804
967
 
805
968
  return XXH_OK;
@@ -815,10 +978,8 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void*
815
978
  return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
816
979
  }
817
980
 
818
- XXH_FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
981
+ FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
819
982
  {
820
- const BYTE * p = (const BYTE*)state->mem64;
821
- const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
822
983
  U64 h64;
823
984
 
824
985
  if (state->total_len >= 32) {
@@ -833,37 +994,12 @@ XXH_FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endian
833
994
  h64 = XXH64_mergeRound(h64, v3);
834
995
  h64 = XXH64_mergeRound(h64, v4);
835
996
  } else {
836
- h64 = state->v3 + PRIME64_5;
997
+ h64 = state->v3 /*seed*/ + PRIME64_5;
837
998
  }
838
999
 
839
1000
  h64 += (U64) state->total_len;
840
1001
 
841
- while (p+8<=bEnd) {
842
- U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
843
- h64 ^= k1;
844
- h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
845
- p+=8;
846
- }
847
-
848
- if (p+4<=bEnd) {
849
- h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
850
- h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
851
- p+=4;
852
- }
853
-
854
- while (p<bEnd) {
855
- h64 ^= (*p) * PRIME64_5;
856
- h64 = XXH_rotl64(h64, 11) * PRIME64_1;
857
- p++;
858
- }
859
-
860
- h64 ^= h64 >> 33;
861
- h64 *= PRIME64_2;
862
- h64 ^= h64 >> 29;
863
- h64 *= PRIME64_3;
864
- h64 ^= h64 >> 32;
865
-
866
- return h64;
1002
+ return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned);
867
1003
  }
868
1004
 
869
1005
  XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)