xxhash 0.3.0 → 0.4.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: eb65711481aa5bbc9e1b7dc6c8338e0bda70f377
4
- data.tar.gz: 5c4c2b5a262bd5831f9658aeccf35457a0556ce8
3
+ metadata.gz: 7631f1cb865522390841ca6345dadf0247b78404
4
+ data.tar.gz: 07f7a162f2be6f2df03aad3993e8fa74ded83fcc
5
5
  SHA512:
6
- metadata.gz: a1160fe558a79aaa74a9fabe74e902890bcb7a7512ad08ff4634839fa40dfb2abe74eef367b689c5d42f178380a77deba6d7d6a8cd15ebf94d52a6600c4a7e85
7
- data.tar.gz: 5615f46479f9c86b2af11dc36cea59abd1205ba2b06f0364c572bb5b54aa5a816d5f643619b5c0f43965fc9928763ea3c8b112a8134a391beee09a7d527f4d0a
6
+ metadata.gz: 278c331aaa693e1489ceaf6c302a8f9f21087457867807a9f043ad3fe673279e09a05906e10e57266ee3980ce51c3d60c13d70b133b7312b75c8cb8962c5cbd2
7
+ data.tar.gz: e220c7ee0bc95631668cbc981deb5105fdf7f09c1c2acfe4f7a3bf4f532d03e69bdf7950403ade750cd8ceeede2f75df63967df98c478a595e1a6c33bf310ea1
@@ -1,4 +1,9 @@
1
1
  language: ruby
2
+ before_install:
3
+ - gem install bundler
2
4
  rvm:
3
5
  - 1.9.3
4
- - rbx-19mode
6
+ - 2.0.0
7
+ - 2.1
8
+ - 2.2
9
+ script: "bundle exec rake test"
@@ -1,6 +1,13 @@
1
- ### master
1
+ ### 0.4.0 (December 11, 2016)
2
+ * Add xxHash to `Digest` module and make it more compatieble
3
+ with another hash functions from `Digest`. (by [@justinwsmith](https://github.com/justinwsmith))
4
+ * Add a `StreamingHash` class that's externally instantiatable. (by [@justinwsmith](https://github.com/justinwsmith))
5
+ * Fix segfault when nil is passed as param (https://github.com/nashby/xxhash/issues/13) (by [@justinwsmith](https://github.com/justinwsmith))
6
+ * Update libxxhash to 0.6.2.
7
+
8
+ ### 0.3.0 (January 18, 2015)
2
9
  * make seed param optional (by [@weakish](https://github.com/weakish))
3
- * add 64-bit xxhash function (by [@justinwsmith](https://github.com/justinwsmith))
10
+ * add 64-bit xxhash function (by [@justinwsmith](https://github.com/justinwsmith))
4
11
 
5
12
  ### 0.2.0 (September 4, 2013)
6
13
  * xxHash updated to [r32](https://code.google.com/p/xxhash/source/detail?r=32)
data/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  ## xxHash [![Build Status](https://travis-ci.org/nashby/xxhash.png?branch=master)](https://travis-ci.org/nashby/xxhash)
2
2
 
3
- Ruby wrapper for [xxHash](http://code.google.com/p/xxhash/)
3
+ Ruby wrapper for [xxHash](https://github.com/Cyan4973/xxHash)
4
4
 
5
5
  ### Install
6
6
 
@@ -17,6 +17,8 @@ seed = 12345
17
17
  XXhash.xxh32(text, seed) # => 3834992036
18
18
  ```
19
19
 
20
+ If you do not provide a seed, it will use the default value `0`.
21
+
20
22
  You can use it with `IO` objects too:
21
23
 
22
24
  ```ruby
@@ -25,16 +27,18 @@ XXhash.xxh32_stream(StringIO.new('test'), 123) # => 2758658570
25
27
 
26
28
  Note that you can also pass a chunk size as third param (it's 32 bytes by default)
27
29
 
30
+ XXH64 is also supported: you can use `xxh64` and `xxh64_stream`.
31
+
28
32
  ### Supported Ruby versions
29
33
 
30
- - MRI 1.9.3
34
+ - MRI 1.9.3, 2.0, 2.1, 2.2.
31
35
  - rbx-19mode
32
36
 
33
37
  Note: It doesn't work on JRuby as it uses C extension.
34
38
 
35
39
  ### Versioning
36
40
 
37
- Version 0.2.0 is equal to [r32](https://code.google.com/p/xxhash/source/detail?r32)
41
+ Version 0.4.0 is equal to [0.6.2](https://github.com/Cyan4973/xxHash/tree/v0.6.2)
38
42
 
39
43
  ## Contributing
40
44
 
@@ -1,154 +1,183 @@
1
1
  /*
2
- xxHash - Fast Hash algorithm
3
- Copyright (C) 2012-2014, Yann Collet.
4
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5
-
6
- Redistribution and use in source and binary forms, with or without
7
- modification, are permitted provided that the following conditions are
8
- met:
9
-
10
- * Redistributions of source code must retain the above copyright
11
- notice, this list of conditions and the following disclaimer.
12
- * Redistributions in binary form must reproduce the above
13
- copyright notice, this list of conditions and the following disclaimer
14
- in the documentation and/or other materials provided with the
15
- distribution.
16
-
17
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
-
29
- You can contact the author at :
30
- - xxHash source repository : http://code.google.com/p/xxhash/
31
- - public discussion board : https://groups.google.com/forum/#!forum/lz4c
2
+ * xxHash - Fast Hash algorithm
3
+ * Copyright (C) 2012-2016, Yann Collet
4
+ *
5
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without
8
+ * modification, are permitted provided that the following conditions are
9
+ * met:
10
+ *
11
+ * * Redistributions of source code must retain the above copyright
12
+ * notice, this list of conditions and the following disclaimer.
13
+ * * Redistributions in binary form must reproduce the above
14
+ * copyright notice, this list of conditions and the following disclaimer
15
+ * in the documentation and/or other materials provided with the
16
+ * distribution.
17
+ *
18
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ * You can contact the author at :
31
+ * - xxHash homepage: http://www.xxhash.com
32
+ * - xxHash source repository : https://github.com/Cyan4973/xxHash
32
33
  */
33
34
 
34
35
 
35
- //**************************************
36
- // Tuning parameters
37
- //**************************************
38
- // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
39
- // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
40
- // If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
41
- // You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
42
- #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
43
- # define XXH_USE_UNALIGNED_ACCESS 1
36
+ /* *************************************
37
+ * Tuning parameters
38
+ ***************************************/
39
+ /*!XXH_FORCE_MEMORY_ACCESS :
40
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
41
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
42
+ * The below switch allow to select different access method for improved performance.
43
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
44
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
45
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
46
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
47
+ * It can generate buggy code on targets which do not support unaligned memory accesses.
48
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
49
+ * See http://stackoverflow.com/a/32095106/646947 for details.
50
+ * Prefer these methods in priority order (0 > 1 > 2)
51
+ */
52
+ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
53
+ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
54
+ # define XXH_FORCE_MEMORY_ACCESS 2
55
+ # elif defined(__INTEL_COMPILER) || \
56
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
57
+ # define XXH_FORCE_MEMORY_ACCESS 1
58
+ # endif
44
59
  #endif
45
60
 
46
- // XXH_ACCEPT_NULL_INPUT_POINTER :
47
- // If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
48
- // When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
49
- // This option has a very small performance cost (only measurable on small inputs).
50
- // By default, this option is disabled. To enable it, uncomment below define :
51
- // #define XXH_ACCEPT_NULL_INPUT_POINTER 1
52
-
53
- // XXH_FORCE_NATIVE_FORMAT :
54
- // By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
55
- // Results are therefore identical for little-endian and big-endian CPU.
56
- // This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
57
- // Should endian-independance be of no importance for your application, you may set the #define below to 1.
58
- // It will improve speed for Big-endian CPU.
59
- // This option has no impact on Little_Endian CPU.
60
- #define XXH_FORCE_NATIVE_FORMAT 0
61
-
62
- //**************************************
63
- // Compiler Specific Options
64
- //**************************************
65
- // Disable some Visual warning messages
66
- #ifdef _MSC_VER // Visual Studio
67
- # pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
61
+ /*!XXH_ACCEPT_NULL_INPUT_POINTER :
62
+ * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
63
+ * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
64
+ * By default, this option is disabled. To enable it, uncomment below define :
65
+ */
66
+ /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
67
+
68
+ /*!XXH_FORCE_NATIVE_FORMAT :
69
+ * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
70
+ * Results are therefore identical for little-endian and big-endian CPU.
71
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
72
+ * Should endian-independance be of no importance for your application, you may set the #define below to 1,
73
+ * to improve speed for Big-endian CPU.
74
+ * This option has no impact on Little_Endian CPU.
75
+ */
76
+ #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
77
+ # define XXH_FORCE_NATIVE_FORMAT 0
68
78
  #endif
69
79
 
70
- #ifdef _MSC_VER // Visual Studio
71
- # define FORCE_INLINE static __forceinline
72
- #else
73
- # ifdef __GNUC__
74
- # define FORCE_INLINE static inline __attribute__((always_inline))
80
+ /*!XXH_FORCE_ALIGN_CHECK :
81
+ * This is a minor performance trick, only useful with lots of very small keys.
82
+ * It means : check for aligned/unaligned input.
83
+ * The check costs one initial branch per hash; set to 0 when the input data
84
+ * is guaranteed to be aligned.
85
+ */
86
+ #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
87
+ # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
88
+ # define XXH_FORCE_ALIGN_CHECK 0
75
89
  # else
76
- # define FORCE_INLINE static inline
90
+ # define XXH_FORCE_ALIGN_CHECK 1
77
91
  # endif
78
92
  #endif
79
93
 
80
- //**************************************
81
- // Includes & Memory related functions
82
- //**************************************
83
- #include "libxxhash.h"
84
- // Modify the local functions below should you wish to use some other memory routines
85
- // for malloc(), free()
94
+
95
+ /* *************************************
96
+ * Includes & Memory related functions
97
+ ***************************************/
98
+ /* Modify the local functions below should you wish to use some other memory routines */
99
+ /* for malloc(), free() */
86
100
  #include <stdlib.h>
87
101
  static void* XXH_malloc(size_t s) { return malloc(s); }
88
102
  static void XXH_free (void* p) { free(p); }
89
- // for memcpy()
103
+ /* for memcpy() */
90
104
  #include <string.h>
91
- static void* XXH_memcpy(void* dest, const void* src, size_t size)
92
- {
93
- return memcpy(dest,src,size);
94
- }
105
+ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
95
106
 
107
+ #define XXH_STATIC_LINKING_ONLY
108
+ #include "xxhash.h"
96
109
 
97
- //**************************************
98
- // Basic Types
99
- //**************************************
100
- #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
101
- # include <stdint.h>
102
- typedef uint8_t BYTE;
103
- typedef uint16_t U16;
104
- typedef uint32_t U32;
105
- typedef int32_t S32;
106
- typedef uint64_t U64;
107
- #else
108
- typedef unsigned char BYTE;
109
- typedef unsigned short U16;
110
- typedef unsigned int U32;
111
- typedef signed int S32;
112
- typedef unsigned long long U64;
113
- #endif
114
110
 
115
- #if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
116
- # define _PACKED __attribute__ ((packed))
111
+ /* *************************************
112
+ * Compiler Specific Options
113
+ ***************************************/
114
+ #ifdef _MSC_VER /* Visual Studio */
115
+ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
116
+ # define FORCE_INLINE static __forceinline
117
117
  #else
118
- # define _PACKED
119
- #endif
120
-
121
- #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
122
- # ifdef __IBMC__
123
- # pragma pack(1)
118
+ # if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
119
+ # ifdef __GNUC__
120
+ # define FORCE_INLINE static inline __attribute__((always_inline))
121
+ # else
122
+ # define FORCE_INLINE static inline
123
+ # endif
124
124
  # else
125
- # pragma pack(push, 1)
126
- # endif
125
+ # define FORCE_INLINE static
126
+ # endif /* __STDC_VERSION__ */
127
127
  #endif
128
128
 
129
- typedef struct _U32_S
130
- {
131
- U32 v;
132
- } _PACKED U32_S;
133
- typedef struct _U64_S
134
- {
135
- U64 v;
136
- } _PACKED U64_S;
137
129
 
138
- #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
139
- # pragma pack(pop)
130
+ /* *************************************
131
+ * Basic Types
132
+ ***************************************/
133
+ #ifndef MEM_MODULE
134
+ # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
135
+ # include <stdint.h>
136
+ typedef uint8_t BYTE;
137
+ typedef uint16_t U16;
138
+ typedef uint32_t U32;
139
+ typedef int32_t S32;
140
+ # else
141
+ typedef unsigned char BYTE;
142
+ typedef unsigned short U16;
143
+ typedef unsigned int U32;
144
+ typedef signed int S32;
145
+ # endif
140
146
  #endif
141
147
 
142
- #define A32(x) (((U32_S *)(x))->v)
143
- #define A64(x) (((U64_S *)(x))->v)
148
+ #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
149
+
150
+ /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
151
+ static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
144
152
 
153
+ #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
145
154
 
146
- //***************************************
147
- // Compiler-specific Functions and Macros
148
- //***************************************
155
+ /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
156
+ /* currently only defined for gcc and icc */
157
+ typedef union { U32 u32; } __attribute__((packed)) unalign;
158
+ static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
159
+
160
+ #else
161
+
162
+ /* portable and safe solution. Generally efficient.
163
+ * see : http://stackoverflow.com/a/32095106/646947
164
+ */
165
+ static U32 XXH_read32(const void* memPtr)
166
+ {
167
+ U32 val;
168
+ memcpy(&val, memPtr, sizeof(val));
169
+ return val;
170
+ }
171
+
172
+ #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
173
+
174
+
175
+ /* ****************************************
176
+ * Compiler-specific Functions and Macros
177
+ ******************************************/
149
178
  #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
150
179
 
151
- // Note : although _rotl exists for minGW (GCC under windows), performance seems poor
180
+ /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
152
181
  #if defined(_MSC_VER)
153
182
  # define XXH_rotl32(x,r) _rotl(x,r)
154
183
  # define XXH_rotl64(x,r) _rotl64(x,r)
@@ -157,76 +186,44 @@ typedef struct _U64_S
157
186
  # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
158
187
  #endif
159
188
 
160
- #if defined(_MSC_VER) // Visual Studio
189
+ #if defined(_MSC_VER) /* Visual Studio */
161
190
  # define XXH_swap32 _byteswap_ulong
162
- # define XXH_swap64 _byteswap_uint64
163
191
  #elif GCC_VERSION >= 403
164
192
  # define XXH_swap32 __builtin_bswap32
165
- # define XXH_swap64 __builtin_bswap64
166
193
  #else
167
- static inline U32 XXH_swap32 (U32 x)
194
+ static U32 XXH_swap32 (U32 x)
168
195
  {
169
196
  return ((x << 24) & 0xff000000 ) |
170
197
  ((x << 8) & 0x00ff0000 ) |
171
198
  ((x >> 8) & 0x0000ff00 ) |
172
199
  ((x >> 24) & 0x000000ff );
173
200
  }
174
- static inline U64 XXH_swap64 (U64 x)
175
- {
176
- return ((x << 56) & 0xff00000000000000ULL) |
177
- ((x << 40) & 0x00ff000000000000ULL) |
178
- ((x << 24) & 0x0000ff0000000000ULL) |
179
- ((x << 8) & 0x000000ff00000000ULL) |
180
- ((x >> 8) & 0x00000000ff000000ULL) |
181
- ((x >> 24) & 0x0000000000ff0000ULL) |
182
- ((x >> 40) & 0x000000000000ff00ULL) |
183
- ((x >> 56) & 0x00000000000000ffULL);
184
- }
185
201
  #endif
186
202
 
187
203
 
188
- //**************************************
189
- // Constants
190
- //**************************************
191
- #define PRIME32_1 2654435761U
192
- #define PRIME32_2 2246822519U
193
- #define PRIME32_3 3266489917U
194
- #define PRIME32_4 668265263U
195
- #define PRIME32_5 374761393U
196
-
197
- #define PRIME64_1 11400714785074694791ULL
198
- #define PRIME64_2 14029467366897019727ULL
199
- #define PRIME64_3 1609587929392839161ULL
200
- #define PRIME64_4 9650029242287828579ULL
201
- #define PRIME64_5 2870177450012600261ULL
202
-
203
- //**************************************
204
- // Architecture Macros
205
- //**************************************
204
+ /* *************************************
205
+ * Architecture Macros
206
+ ***************************************/
206
207
  typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
207
- #ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch
208
- static const int one = 1;
209
- # define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one))
210
- #endif
211
-
212
208
 
213
- //**************************************
214
- // Macros
215
- //**************************************
216
- #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } // use only *after* variable declarations
209
+ /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
210
+ #ifndef XXH_CPU_LITTLE_ENDIAN
211
+ static const int g_one = 1;
212
+ # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
213
+ #endif
217
214
 
218
215
 
219
- //****************************
220
- // Memory reads
221
- //****************************
216
+ /* ***************************
217
+ * Memory reads
218
+ *****************************/
222
219
  typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
223
220
 
224
221
  FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
225
222
  {
226
223
  if (align==XXH_unaligned)
227
- return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr));
224
+ return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
228
225
  else
229
- return endian==XXH_littleEndian ? *(U32*)ptr : XXH_swap32(*(U32*)ptr);
226
+ return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
230
227
  }
231
228
 
232
229
  FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
@@ -234,23 +231,36 @@ FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
234
231
  return XXH_readLE32_align(ptr, endian, XXH_unaligned);
235
232
  }
236
233
 
237
- FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
234
+ static U32 XXH_readBE32(const void* ptr)
238
235
  {
239
- if (align==XXH_unaligned)
240
- return endian==XXH_littleEndian ? A64(ptr) : XXH_swap64(A64(ptr));
241
- else
242
- return endian==XXH_littleEndian ? *(U64*)ptr : XXH_swap64(*(U64*)ptr);
236
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
243
237
  }
244
238
 
245
- FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
239
+
240
+ /* *************************************
241
+ * Macros
242
+ ***************************************/
243
+ #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
244
+ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
245
+
246
+
247
+ /* *******************************************************************
248
+ * 32-bits hash functions
249
+ *********************************************************************/
250
+ static const U32 PRIME32_1 = 2654435761U;
251
+ static const U32 PRIME32_2 = 2246822519U;
252
+ static const U32 PRIME32_3 = 3266489917U;
253
+ static const U32 PRIME32_4 = 668265263U;
254
+ static const U32 PRIME32_5 = 374761393U;
255
+
256
+ static U32 XXH32_round(U32 seed, U32 input)
246
257
  {
247
- return XXH_readLE64_align(ptr, endian, XXH_unaligned);
258
+ seed += input * PRIME32_2;
259
+ seed = XXH_rotl32(seed, 13);
260
+ seed *= PRIME32_1;
261
+ return seed;
248
262
  }
249
263
 
250
-
251
- //****************************
252
- // Simple Hash Functions
253
- //****************************
254
264
  FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
255
265
  {
256
266
  const BYTE* p = (const BYTE*)input;
@@ -259,60 +269,40 @@ FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH
259
269
  #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
260
270
 
261
271
  #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
262
- if (p==NULL)
263
- {
272
+ if (p==NULL) {
264
273
  len=0;
265
274
  bEnd=p=(const BYTE*)(size_t)16;
266
275
  }
267
276
  #endif
268
277
 
269
- if (len>=16)
270
- {
278
+ if (len>=16) {
271
279
  const BYTE* const limit = bEnd - 16;
272
280
  U32 v1 = seed + PRIME32_1 + PRIME32_2;
273
281
  U32 v2 = seed + PRIME32_2;
274
282
  U32 v3 = seed + 0;
275
283
  U32 v4 = seed - PRIME32_1;
276
284
 
277
- do
278
- {
279
- v1 += XXH_get32bits(p) * PRIME32_2;
280
- v1 = XXH_rotl32(v1, 13);
281
- v1 *= PRIME32_1;
282
- p+=4;
283
- v2 += XXH_get32bits(p) * PRIME32_2;
284
- v2 = XXH_rotl32(v2, 13);
285
- v2 *= PRIME32_1;
286
- p+=4;
287
- v3 += XXH_get32bits(p) * PRIME32_2;
288
- v3 = XXH_rotl32(v3, 13);
289
- v3 *= PRIME32_1;
290
- p+=4;
291
- v4 += XXH_get32bits(p) * PRIME32_2;
292
- v4 = XXH_rotl32(v4, 13);
293
- v4 *= PRIME32_1;
294
- p+=4;
295
- }
296
- while (p<=limit);
285
+ do {
286
+ v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
287
+ v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
288
+ v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
289
+ v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
290
+ } while (p<=limit);
297
291
 
298
292
  h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
299
- }
300
- else
301
- {
293
+ } else {
302
294
  h32 = seed + PRIME32_5;
303
295
  }
304
296
 
305
297
  h32 += (U32) len;
306
298
 
307
- while (p+4<=bEnd)
308
- {
299
+ while (p+4<=bEnd) {
309
300
  h32 += XXH_get32bits(p) * PRIME32_3;
310
301
  h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
311
302
  p+=4;
312
303
  }
313
304
 
314
- while (p<bEnd)
315
- {
305
+ while (p<bEnd) {
316
306
  h32 += (*p) * PRIME32_5;
317
307
  h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
318
308
  p++;
@@ -328,26 +318,24 @@ FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH
328
318
  }
329
319
 
330
320
 
331
- unsigned int XXH32 (const void* input, size_t len, unsigned seed)
321
+ XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
332
322
  {
333
323
  #if 0
334
- // Simple version, good for code maintenance, but unfortunately slow for small inputs
335
- XXH32_state_t state;
336
- XXH32_reset(&state, seed);
337
- XXH32_update(&state, input, len);
338
- return XXH32_digest(&state);
324
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
325
+ XXH32_CREATESTATE_STATIC(state);
326
+ XXH32_reset(state, seed);
327
+ XXH32_update(state, input, len);
328
+ return XXH32_digest(state);
339
329
  #else
340
330
  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
341
331
 
342
- # if !defined(XXH_USE_UNALIGNED_ACCESS)
343
- if ((((size_t)input) & 3) == 0) // Input is aligned, let's leverage the speed advantage
344
- {
345
- if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
346
- return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
347
- else
348
- return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
349
- }
350
- # endif
332
+ if (XXH_FORCE_ALIGN_CHECK) {
333
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
334
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
335
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
336
+ else
337
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
338
+ } }
351
339
 
352
340
  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
353
341
  return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
@@ -356,231 +344,40 @@ unsigned int XXH32 (const void* input, size_t len, unsigned seed)
356
344
  #endif
357
345
  }
358
346
 
359
- FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
360
- {
361
- const BYTE* p = (const BYTE*)input;
362
- const BYTE* bEnd = p + len;
363
- U64 h64;
364
- #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
365
347
 
366
- #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
367
- if (p==NULL)
368
- {
369
- len=0;
370
- bEnd=p=(const BYTE*)(size_t)32;
371
- }
372
- #endif
373
348
 
374
- if (len>=32)
375
- {
376
- const BYTE* const limit = bEnd - 32;
377
- U64 v1 = seed + PRIME64_1 + PRIME64_2;
378
- U64 v2 = seed + PRIME64_2;
379
- U64 v3 = seed + 0;
380
- U64 v4 = seed - PRIME64_1;
381
-
382
- do
383
- {
384
- v1 += XXH_get64bits(p) * PRIME64_2;
385
- p+=8;
386
- v1 = XXH_rotl64(v1, 31);
387
- v1 *= PRIME64_1;
388
- v2 += XXH_get64bits(p) * PRIME64_2;
389
- p+=8;
390
- v2 = XXH_rotl64(v2, 31);
391
- v2 *= PRIME64_1;
392
- v3 += XXH_get64bits(p) * PRIME64_2;
393
- p+=8;
394
- v3 = XXH_rotl64(v3, 31);
395
- v3 *= PRIME64_1;
396
- v4 += XXH_get64bits(p) * PRIME64_2;
397
- p+=8;
398
- v4 = XXH_rotl64(v4, 31);
399
- v4 *= PRIME64_1;
400
- }
401
- while (p<=limit);
402
-
403
- h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
404
-
405
- v1 *= PRIME64_2;
406
- v1 = XXH_rotl64(v1, 31);
407
- v1 *= PRIME64_1;
408
- h64 ^= v1;
409
- h64 = h64 * PRIME64_1 + PRIME64_4;
410
-
411
- v2 *= PRIME64_2;
412
- v2 = XXH_rotl64(v2, 31);
413
- v2 *= PRIME64_1;
414
- h64 ^= v2;
415
- h64 = h64 * PRIME64_1 + PRIME64_4;
416
-
417
- v3 *= PRIME64_2;
418
- v3 = XXH_rotl64(v3, 31);
419
- v3 *= PRIME64_1;
420
- h64 ^= v3;
421
- h64 = h64 * PRIME64_1 + PRIME64_4;
422
-
423
- v4 *= PRIME64_2;
424
- v4 = XXH_rotl64(v4, 31);
425
- v4 *= PRIME64_1;
426
- h64 ^= v4;
427
- h64 = h64 * PRIME64_1 + PRIME64_4;
428
- }
429
- else
430
- {
431
- h64 = seed + PRIME64_5;
432
- }
349
+ /*====== Hash streaming ======*/
433
350
 
434
- h64 += (U64) len;
435
-
436
- while (p+8<=bEnd)
437
- {
438
- U64 k1 = XXH_get64bits(p);
439
- k1 *= PRIME64_2;
440
- k1 = XXH_rotl64(k1,31);
441
- k1 *= PRIME64_1;
442
- h64 ^= k1;
443
- h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
444
- p+=8;
445
- }
446
-
447
- if (p+4<=bEnd)
448
- {
449
- h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
450
- h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
451
- p+=4;
452
- }
453
-
454
- while (p<bEnd)
455
- {
456
- h64 ^= (*p) * PRIME64_5;
457
- h64 = XXH_rotl64(h64, 11) * PRIME64_1;
458
- p++;
459
- }
460
-
461
- h64 ^= h64 >> 33;
462
- h64 *= PRIME64_2;
463
- h64 ^= h64 >> 29;
464
- h64 *= PRIME64_3;
465
- h64 ^= h64 >> 32;
466
-
467
- return h64;
468
- }
469
-
470
-
471
- unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
351
+ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
472
352
  {
473
- #if 0
474
- // Simple version, good for code maintenance, but unfortunately slow for small inputs
475
- XXH64_state_t state;
476
- XXH64_reset(&state, seed);
477
- XXH64_update(&state, input, len);
478
- return XXH64_digest(&state);
479
- #else
480
- XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
481
-
482
- # if !defined(XXH_USE_UNALIGNED_ACCESS)
483
- if ((((size_t)input) & 7)==0) // Input is aligned, let's leverage the speed advantage
484
- {
485
- if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
486
- return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
487
- else
488
- return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
489
- }
490
- # endif
491
-
492
- if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
493
- return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
494
- else
495
- return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
496
- #endif
497
- }
498
-
499
- /****************************************************
500
- * Advanced Hash Functions
501
- ****************************************************/
502
-
503
- /*** Allocation ***/
504
- typedef struct
505
- {
506
- U64 total_len;
507
- U32 seed;
508
- U32 v1;
509
- U32 v2;
510
- U32 v3;
511
- U32 v4;
512
- U32 mem32[4]; /* defined as U32 for alignment */
513
- U32 memsize;
514
- } XXH_istate32_t;
515
-
516
- typedef struct
517
- {
518
- U64 total_len;
519
- U64 seed;
520
- U64 v1;
521
- U64 v2;
522
- U64 v3;
523
- U64 v4;
524
- U64 mem64[4]; /* defined as U64 for alignment */
525
- U32 memsize;
526
- } XXH_istate64_t;
527
-
528
-
529
- XXH32_state_t* XXH32_createState(void)
530
- {
531
- XXH_STATIC_ASSERT(sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t)); // A compilation error here means XXH32_state_t is not large enough
532
353
  return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
533
354
  }
534
- XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
355
+ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
535
356
  {
536
357
  XXH_free(statePtr);
537
358
  return XXH_OK;
538
359
  }
539
360
 
540
- XXH64_state_t* XXH64_createState(void)
361
+ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
541
362
  {
542
- XXH_STATIC_ASSERT(sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t)); // A compilation error here means XXH64_state_t is not large enough
543
- return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
544
- }
545
- XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
546
- {
547
- XXH_free(statePtr);
548
- return XXH_OK;
549
- }
550
-
551
-
552
- /*** Hash feed ***/
553
-
554
- XXH_errorcode XXH32_reset(XXH32_state_t* state_in, U32 seed)
555
- {
556
- XXH_istate32_t* state = (XXH_istate32_t*) state_in;
557
- state->seed = seed;
558
- state->v1 = seed + PRIME32_1 + PRIME32_2;
559
- state->v2 = seed + PRIME32_2;
560
- state->v3 = seed + 0;
561
- state->v4 = seed - PRIME32_1;
562
- state->total_len = 0;
563
- state->memsize = 0;
564
- return XXH_OK;
363
+ memcpy(dstState, srcState, sizeof(*dstState));
565
364
  }
566
365
 
567
- XXH_errorcode XXH64_reset(XXH64_state_t* state_in, unsigned long long seed)
366
+ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
568
367
  {
569
- XXH_istate64_t* state = (XXH_istate64_t*) state_in;
570
- state->seed = seed;
571
- state->v1 = seed + PRIME64_1 + PRIME64_2;
572
- state->v2 = seed + PRIME64_2;
573
- state->v3 = seed + 0;
574
- state->v4 = seed - PRIME64_1;
575
- state->total_len = 0;
576
- state->memsize = 0;
368
+ XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
369
+ memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
370
+ state.v1 = seed + PRIME32_1 + PRIME32_2;
371
+ state.v2 = seed + PRIME32_2;
372
+ state.v3 = seed + 0;
373
+ state.v4 = seed - PRIME32_1;
374
+ memcpy(statePtr, &state, sizeof(state));
577
375
  return XXH_OK;
578
376
  }
579
377
 
580
378
 
581
- FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
379
+ FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
582
380
  {
583
- XXH_istate32_t* state = (XXH_istate32_t *) state_in;
584
381
  const BYTE* p = (const BYTE*)input;
585
382
  const BYTE* const bEnd = p + len;
586
383
 
@@ -588,69 +385,40 @@ FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const v
588
385
  if (input==NULL) return XXH_ERROR;
589
386
  #endif
590
387
 
591
- state->total_len += len;
388
+ state->total_len_32 += (unsigned)len;
389
+ state->large_len |= (len>=16) | (state->total_len_32>=16);
592
390
 
593
- if (state->memsize + len < 16) // fill in tmp buffer
594
- {
391
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
595
392
  XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
596
- state->memsize += (U32)len;
393
+ state->memsize += (unsigned)len;
597
394
  return XXH_OK;
598
395
  }
599
396
 
600
- if (state->memsize) // some data left from previous update
601
- {
397
+ if (state->memsize) { /* some data left from previous update */
602
398
  XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
603
- {
604
- const U32* p32 = state->mem32;
605
- state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
606
- state->v1 = XXH_rotl32(state->v1, 13);
607
- state->v1 *= PRIME32_1;
608
- p32++;
609
- state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
610
- state->v2 = XXH_rotl32(state->v2, 13);
611
- state->v2 *= PRIME32_1;
612
- p32++;
613
- state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
614
- state->v3 = XXH_rotl32(state->v3, 13);
615
- state->v3 *= PRIME32_1;
616
- p32++;
617
- state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
618
- state->v4 = XXH_rotl32(state->v4, 13);
619
- state->v4 *= PRIME32_1;
620
- p32++;
399
+ { const U32* p32 = state->mem32;
400
+ state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
401
+ state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
402
+ state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
403
+ state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
621
404
  }
622
405
  p += 16-state->memsize;
623
406
  state->memsize = 0;
624
407
  }
625
408
 
626
- if (p <= bEnd-16)
627
- {
409
+ if (p <= bEnd-16) {
628
410
  const BYTE* const limit = bEnd - 16;
629
411
  U32 v1 = state->v1;
630
412
  U32 v2 = state->v2;
631
413
  U32 v3 = state->v3;
632
414
  U32 v4 = state->v4;
633
415
 
634
- do
635
- {
636
- v1 += XXH_readLE32(p, endian) * PRIME32_2;
637
- v1 = XXH_rotl32(v1, 13);
638
- v1 *= PRIME32_1;
639
- p+=4;
640
- v2 += XXH_readLE32(p, endian) * PRIME32_2;
641
- v2 = XXH_rotl32(v2, 13);
642
- v2 *= PRIME32_1;
643
- p+=4;
644
- v3 += XXH_readLE32(p, endian) * PRIME32_2;
645
- v3 = XXH_rotl32(v3, 13);
646
- v3 *= PRIME32_1;
647
- p+=4;
648
- v4 += XXH_readLE32(p, endian) * PRIME32_2;
649
- v4 = XXH_rotl32(v4, 13);
650
- v4 *= PRIME32_1;
651
- p+=4;
652
- }
653
- while (p<=limit);
416
+ do {
417
+ v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
418
+ v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
419
+ v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
420
+ v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
421
+ } while (p<=limit);
654
422
 
655
423
  state->v1 = v1;
656
424
  state->v2 = v2;
@@ -658,16 +426,15 @@ FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const v
658
426
  state->v4 = v4;
659
427
  }
660
428
 
661
- if (p < bEnd)
662
- {
429
+ if (p < bEnd) {
663
430
  XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
664
- state->memsize = (U32)(bEnd-p);
431
+ state->memsize = (unsigned)(bEnd-p);
665
432
  }
666
433
 
667
434
  return XXH_OK;
668
435
  }
669
436
 
670
- XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
437
+ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
671
438
  {
672
439
  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
673
440
 
@@ -679,35 +446,29 @@ XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t l
679
446
 
680
447
 
681
448
 
682
- FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endianess endian)
449
+ FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
683
450
  {
684
- XXH_istate32_t* state = (XXH_istate32_t*) state_in;
685
451
  const BYTE * p = (const BYTE*)state->mem32;
686
- BYTE* bEnd = (BYTE*)(state->mem32) + state->memsize;
452
+ const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
687
453
  U32 h32;
688
454
 
689
- if (state->total_len >= 16)
690
- {
455
+ if (state->large_len) {
691
456
  h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
692
- }
693
- else
694
- {
695
- h32 = state->seed + PRIME32_5;
457
+ } else {
458
+ h32 = state->v3 /* == seed */ + PRIME32_5;
696
459
  }
697
460
 
698
- h32 += (U32) state->total_len;
461
+ h32 += state->total_len_32;
699
462
 
700
- while (p+4<=bEnd)
701
- {
463
+ while (p+4<=bEnd) {
702
464
  h32 += XXH_readLE32(p, endian) * PRIME32_3;
703
465
  h32 = XXH_rotl32(h32, 17) * PRIME32_4;
704
466
  p+=4;
705
467
  }
706
468
 
707
- while (p<bEnd)
708
- {
469
+ while (p<bEnd) {
709
470
  h32 += (*p) * PRIME32_5;
710
- h32 = XXH_rotl32(h32, 11) * PRIME32_1;
471
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1;
711
472
  p++;
712
473
  }
713
474
 
@@ -721,7 +482,7 @@ FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endiane
721
482
  }
722
483
 
723
484
 
724
- U32 XXH32_digest (const XXH32_state_t* state_in)
485
+ XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
725
486
  {
726
487
  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
727
488
 
@@ -732,9 +493,261 @@ U32 XXH32_digest (const XXH32_state_t* state_in)
732
493
  }
733
494
 
734
495
 
735
- FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
496
+ /*====== Canonical representation ======*/
497
+
498
+ /*! Default XXH result types are basic unsigned 32 and 64 bits.
499
+ * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
500
+ * These functions allow transformation of hash result into and from its canonical format.
501
+ * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
502
+ */
503
+
504
+ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
505
+ {
506
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
507
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
508
+ memcpy(dst, &hash, sizeof(*dst));
509
+ }
510
+
511
+ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
512
+ {
513
+ return XXH_readBE32(src);
514
+ }
515
+
516
+
517
+ #ifndef XXH_NO_LONG_LONG
518
+
519
+ /* *******************************************************************
520
+ * 64-bits hash functions
521
+ *********************************************************************/
522
+
523
+ /*====== Memory access ======*/
524
+
525
+ #ifndef MEM_MODULE
526
+ # define MEM_MODULE
527
+ # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
528
+ # include <stdint.h>
529
+ typedef uint64_t U64;
530
+ # else
531
+ typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
532
+ # endif
533
+ #endif
534
+
535
+
536
+ #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
537
+
538
+ /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
539
+ static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
540
+
541
+ #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
542
+
543
+ /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
544
+ /* currently only defined for gcc and icc */
545
+ typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
546
+
547
+ static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
548
+
549
+ #else
550
+
551
+ /* portable and safe solution. Generally efficient.
552
+ * see : http://stackoverflow.com/a/32095106/646947
553
+ */
554
+
555
+ static U64 XXH_read64(const void* memPtr)
556
+ {
557
+ U64 val;
558
+ memcpy(&val, memPtr, sizeof(val));
559
+ return val;
560
+ }
561
+
562
+ #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
563
+
564
+ #if defined(_MSC_VER) /* Visual Studio */
565
+ # define XXH_swap64 _byteswap_uint64
566
+ #elif GCC_VERSION >= 403
567
+ # define XXH_swap64 __builtin_bswap64
568
+ #else
569
+ static U64 XXH_swap64 (U64 x)
570
+ {
571
+ return ((x << 56) & 0xff00000000000000ULL) |
572
+ ((x << 40) & 0x00ff000000000000ULL) |
573
+ ((x << 24) & 0x0000ff0000000000ULL) |
574
+ ((x << 8) & 0x000000ff00000000ULL) |
575
+ ((x >> 8) & 0x00000000ff000000ULL) |
576
+ ((x >> 24) & 0x0000000000ff0000ULL) |
577
+ ((x >> 40) & 0x000000000000ff00ULL) |
578
+ ((x >> 56) & 0x00000000000000ffULL);
579
+ }
580
+ #endif
581
+
582
+ FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
583
+ {
584
+ if (align==XXH_unaligned)
585
+ return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
586
+ else
587
+ return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
588
+ }
589
+
590
+ FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
591
+ {
592
+ return XXH_readLE64_align(ptr, endian, XXH_unaligned);
593
+ }
594
+
595
+ static U64 XXH_readBE64(const void* ptr)
596
+ {
597
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
598
+ }
599
+
600
+
601
+ /*====== xxh64 ======*/
602
+
603
+ static const U64 PRIME64_1 = 11400714785074694791ULL;
604
+ static const U64 PRIME64_2 = 14029467366897019727ULL;
605
+ static const U64 PRIME64_3 = 1609587929392839161ULL;
606
+ static const U64 PRIME64_4 = 9650029242287828579ULL;
607
+ static const U64 PRIME64_5 = 2870177450012600261ULL;
608
+
609
+ static U64 XXH64_round(U64 acc, U64 input)
610
+ {
611
+ acc += input * PRIME64_2;
612
+ acc = XXH_rotl64(acc, 31);
613
+ acc *= PRIME64_1;
614
+ return acc;
615
+ }
616
+
617
+ static U64 XXH64_mergeRound(U64 acc, U64 val)
618
+ {
619
+ val = XXH64_round(0, val);
620
+ acc ^= val;
621
+ acc = acc * PRIME64_1 + PRIME64_4;
622
+ return acc;
623
+ }
624
+
625
+ FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
626
+ {
627
+ const BYTE* p = (const BYTE*)input;
628
+ const BYTE* const bEnd = p + len;
629
+ U64 h64;
630
+ #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
631
+
632
+ #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
633
+ if (p==NULL) {
634
+ len=0;
635
+ bEnd=p=(const BYTE*)(size_t)32;
636
+ }
637
+ #endif
638
+
639
+ if (len>=32) {
640
+ const BYTE* const limit = bEnd - 32;
641
+ U64 v1 = seed + PRIME64_1 + PRIME64_2;
642
+ U64 v2 = seed + PRIME64_2;
643
+ U64 v3 = seed + 0;
644
+ U64 v4 = seed - PRIME64_1;
645
+
646
+ do {
647
+ v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
648
+ v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
649
+ v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
650
+ v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
651
+ } while (p<=limit);
652
+
653
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
654
+ h64 = XXH64_mergeRound(h64, v1);
655
+ h64 = XXH64_mergeRound(h64, v2);
656
+ h64 = XXH64_mergeRound(h64, v3);
657
+ h64 = XXH64_mergeRound(h64, v4);
658
+
659
+ } else {
660
+ h64 = seed + PRIME64_5;
661
+ }
662
+
663
+ h64 += (U64) len;
664
+
665
+ while (p+8<=bEnd) {
666
+ U64 const k1 = XXH64_round(0, XXH_get64bits(p));
667
+ h64 ^= k1;
668
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
669
+ p+=8;
670
+ }
671
+
672
+ if (p+4<=bEnd) {
673
+ h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
674
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
675
+ p+=4;
676
+ }
677
+
678
+ while (p<bEnd) {
679
+ h64 ^= (*p) * PRIME64_5;
680
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
681
+ p++;
682
+ }
683
+
684
+ h64 ^= h64 >> 33;
685
+ h64 *= PRIME64_2;
686
+ h64 ^= h64 >> 29;
687
+ h64 *= PRIME64_3;
688
+ h64 ^= h64 >> 32;
689
+
690
+ return h64;
691
+ }
692
+
693
+
694
+ XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
695
+ {
696
+ #if 0
697
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
698
+ XXH64_CREATESTATE_STATIC(state);
699
+ XXH64_reset(state, seed);
700
+ XXH64_update(state, input, len);
701
+ return XXH64_digest(state);
702
+ #else
703
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
704
+
705
+ if (XXH_FORCE_ALIGN_CHECK) {
706
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
707
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
708
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
709
+ else
710
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
711
+ } }
712
+
713
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
714
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
715
+ else
716
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
717
+ #endif
718
+ }
719
+
720
+ /*====== Hash Streaming ======*/
721
+
722
+ XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
723
+ {
724
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
725
+ }
726
+ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
727
+ {
728
+ XXH_free(statePtr);
729
+ return XXH_OK;
730
+ }
731
+
732
+ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
733
+ {
734
+ memcpy(dstState, srcState, sizeof(*dstState));
735
+ }
736
+
737
+ XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
738
+ {
739
+ XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
740
+ memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
741
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
742
+ state.v2 = seed + PRIME64_2;
743
+ state.v3 = seed + 0;
744
+ state.v4 = seed - PRIME64_1;
745
+ memcpy(statePtr, &state, sizeof(state));
746
+ return XXH_OK;
747
+ }
748
+
749
+ FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
736
750
  {
737
- XXH_istate64_t * state = (XXH_istate64_t *) state_in;
738
751
  const BYTE* p = (const BYTE*)input;
739
752
  const BYTE* const bEnd = p + len;
740
753
 
@@ -744,67 +757,35 @@ FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const v
744
757
 
745
758
  state->total_len += len;
746
759
 
747
- if (state->memsize + len < 32) // fill in tmp buffer
748
- {
760
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
749
761
  XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
750
762
  state->memsize += (U32)len;
751
763
  return XXH_OK;
752
764
  }
753
765
 
754
- if (state->memsize) // some data left from previous update
755
- {
766
+ if (state->memsize) { /* tmp buffer is full */
756
767
  XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
757
- {
758
- const U64* p64 = state->mem64;
759
- state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
760
- state->v1 = XXH_rotl64(state->v1, 31);
761
- state->v1 *= PRIME64_1;
762
- p64++;
763
- state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
764
- state->v2 = XXH_rotl64(state->v2, 31);
765
- state->v2 *= PRIME64_1;
766
- p64++;
767
- state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
768
- state->v3 = XXH_rotl64(state->v3, 31);
769
- state->v3 *= PRIME64_1;
770
- p64++;
771
- state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
772
- state->v4 = XXH_rotl64(state->v4, 31);
773
- state->v4 *= PRIME64_1;
774
- p64++;
775
- }
768
+ state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
769
+ state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
770
+ state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
771
+ state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
776
772
  p += 32-state->memsize;
777
773
  state->memsize = 0;
778
774
  }
779
775
 
780
- if (p+32 <= bEnd)
781
- {
776
+ if (p+32 <= bEnd) {
782
777
  const BYTE* const limit = bEnd - 32;
783
778
  U64 v1 = state->v1;
784
779
  U64 v2 = state->v2;
785
780
  U64 v3 = state->v3;
786
781
  U64 v4 = state->v4;
787
782
 
788
- do
789
- {
790
- v1 += XXH_readLE64(p, endian) * PRIME64_2;
791
- v1 = XXH_rotl64(v1, 31);
792
- v1 *= PRIME64_1;
793
- p+=8;
794
- v2 += XXH_readLE64(p, endian) * PRIME64_2;
795
- v2 = XXH_rotl64(v2, 31);
796
- v2 *= PRIME64_1;
797
- p+=8;
798
- v3 += XXH_readLE64(p, endian) * PRIME64_2;
799
- v3 = XXH_rotl64(v3, 31);
800
- v3 *= PRIME64_1;
801
- p+=8;
802
- v4 += XXH_readLE64(p, endian) * PRIME64_2;
803
- v4 = XXH_rotl64(v4, 31);
804
- v4 *= PRIME64_1;
805
- p+=8;
806
- }
807
- while (p<=limit);
783
+ do {
784
+ v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
785
+ v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
786
+ v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
787
+ v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
788
+ } while (p<=limit);
808
789
 
809
790
  state->v1 = v1;
810
791
  state->v2 = v2;
@@ -812,16 +793,15 @@ FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const v
812
793
  state->v4 = v4;
813
794
  }
814
795
 
815
- if (p < bEnd)
816
- {
796
+ if (p < bEnd) {
817
797
  XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
818
- state->memsize = (U32)(bEnd-p);
798
+ state->memsize = (unsigned)(bEnd-p);
819
799
  }
820
800
 
821
801
  return XXH_OK;
822
802
  }
823
803
 
824
- XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
804
+ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
825
805
  {
826
806
  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
827
807
 
@@ -831,77 +811,45 @@ XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t l
831
811
  return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
832
812
  }
833
813
 
834
-
835
-
836
- FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state_in, XXH_endianess endian)
814
+ FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
837
815
  {
838
- XXH_istate64_t * state = (XXH_istate64_t *) state_in;
839
816
  const BYTE * p = (const BYTE*)state->mem64;
840
- BYTE* bEnd = (BYTE*)state->mem64 + state->memsize;
817
+ const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
841
818
  U64 h64;
842
819
 
843
- if (state->total_len >= 32)
844
- {
845
- U64 v1 = state->v1;
846
- U64 v2 = state->v2;
847
- U64 v3 = state->v3;
848
- U64 v4 = state->v4;
820
+ if (state->total_len >= 32) {
821
+ U64 const v1 = state->v1;
822
+ U64 const v2 = state->v2;
823
+ U64 const v3 = state->v3;
824
+ U64 const v4 = state->v4;
849
825
 
850
826
  h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
851
-
852
- v1 *= PRIME64_2;
853
- v1 = XXH_rotl64(v1, 31);
854
- v1 *= PRIME64_1;
855
- h64 ^= v1;
856
- h64 = h64*PRIME64_1 + PRIME64_4;
857
-
858
- v2 *= PRIME64_2;
859
- v2 = XXH_rotl64(v2, 31);
860
- v2 *= PRIME64_1;
861
- h64 ^= v2;
862
- h64 = h64*PRIME64_1 + PRIME64_4;
863
-
864
- v3 *= PRIME64_2;
865
- v3 = XXH_rotl64(v3, 31);
866
- v3 *= PRIME64_1;
867
- h64 ^= v3;
868
- h64 = h64*PRIME64_1 + PRIME64_4;
869
-
870
- v4 *= PRIME64_2;
871
- v4 = XXH_rotl64(v4, 31);
872
- v4 *= PRIME64_1;
873
- h64 ^= v4;
874
- h64 = h64*PRIME64_1 + PRIME64_4;
875
- }
876
- else
877
- {
878
- h64 = state->seed + PRIME64_5;
827
+ h64 = XXH64_mergeRound(h64, v1);
828
+ h64 = XXH64_mergeRound(h64, v2);
829
+ h64 = XXH64_mergeRound(h64, v3);
830
+ h64 = XXH64_mergeRound(h64, v4);
831
+ } else {
832
+ h64 = state->v3 + PRIME64_5;
879
833
  }
880
834
 
881
835
  h64 += (U64) state->total_len;
882
836
 
883
- while (p+8<=bEnd)
884
- {
885
- U64 k1 = XXH_readLE64(p, endian);
886
- k1 *= PRIME64_2;
887
- k1 = XXH_rotl64(k1,31);
888
- k1 *= PRIME64_1;
837
+ while (p+8<=bEnd) {
838
+ U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
889
839
  h64 ^= k1;
890
- h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
840
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
891
841
  p+=8;
892
842
  }
893
843
 
894
- if (p+4<=bEnd)
895
- {
844
+ if (p+4<=bEnd) {
896
845
  h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
897
- h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
846
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
898
847
  p+=4;
899
848
  }
900
849
 
901
- while (p<bEnd)
902
- {
850
+ while (p<bEnd) {
903
851
  h64 ^= (*p) * PRIME64_5;
904
- h64 = XXH_rotl64(h64, 11) * PRIME64_1;
852
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
905
853
  p++;
906
854
  }
907
855
 
@@ -914,8 +862,7 @@ FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state_in, XXH_endiane
914
862
  return h64;
915
863
  }
916
864
 
917
-
918
- unsigned long long XXH64_digest (const XXH64_state_t* state_in)
865
+ XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
919
866
  {
920
867
  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
921
868
 
@@ -925,3 +872,19 @@ unsigned long long XXH64_digest (const XXH64_state_t* state_in)
925
872
  return XXH64_digest_endian(state_in, XXH_bigEndian);
926
873
  }
927
874
 
875
+
876
+ /*====== Canonical representation ======*/
877
+
878
+ XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
879
+ {
880
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
881
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
882
+ memcpy(dst, &hash, sizeof(*dst));
883
+ }
884
+
885
+ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
886
+ {
887
+ return XXH_readBE64(src);
888
+ }
889
+
890
+ #endif /* XXH_NO_LONG_LONG */