extlz4 0.2.5 → 0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/HISTORY.ja.md +9 -1
- data/README.md +44 -41
- data/contrib/lz4/NEWS +33 -0
- data/contrib/lz4/README.md +30 -24
- data/contrib/lz4/lib/README.md +59 -10
- data/contrib/lz4/lib/lz4.c +1303 -583
- data/contrib/lz4/lib/lz4.h +376 -176
- data/contrib/lz4/lib/lz4frame.c +447 -286
- data/contrib/lz4/lib/lz4frame.h +289 -74
- data/contrib/lz4/lib/lz4frame_static.h +4 -111
- data/contrib/lz4/lib/lz4hc.c +789 -207
- data/contrib/lz4/lib/lz4hc.h +256 -93
- data/contrib/lz4/lib/xxhash.c +376 -240
- data/contrib/lz4/lib/xxhash.h +128 -93
- data/ext/blockapi.c +2 -2
- data/ext/lz4_amalgam.c +0 -23
- data/gemstub.rb +4 -4
- data/lib/extlz4.rb +46 -0
- data/lib/extlz4/version.rb +1 -1
- metadata +33 -10
- data/contrib/lz4/circle.yml +0 -38
- data/contrib/lz4/lib/lz4opt.h +0 -356
data/contrib/lz4/lib/lz4hc.h
CHANGED
@@ -54,7 +54,7 @@ extern "C" {
|
|
54
54
|
* Block Compression
|
55
55
|
**************************************/
|
56
56
|
/*! LZ4_compress_HC() :
|
57
|
-
* Compress data from `src` into `dst`, using the
|
57
|
+
* Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
|
58
58
|
* `dst` must be already allocated.
|
59
59
|
* Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
|
60
60
|
* Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
|
@@ -77,7 +77,21 @@ LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dst
|
|
77
77
|
* Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
|
78
78
|
*/
|
79
79
|
LZ4LIB_API int LZ4_sizeofStateHC(void);
|
80
|
-
LZ4LIB_API int LZ4_compress_HC_extStateHC(void*
|
80
|
+
LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel);
|
81
|
+
|
82
|
+
|
83
|
+
/*! LZ4_compress_HC_destSize() : v1.9.0+
|
84
|
+
* Will compress as much data as possible from `src`
|
85
|
+
* to fit into `targetDstSize` budget.
|
86
|
+
* Result is provided in 2 parts :
|
87
|
+
* @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
|
88
|
+
* or 0 if compression fails.
|
89
|
+
* `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src`
|
90
|
+
*/
|
91
|
+
LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC,
|
92
|
+
const char* src, char* dst,
|
93
|
+
int* srcSizePtr, int targetDstSize,
|
94
|
+
int compressionLevel);
|
81
95
|
|
82
96
|
|
83
97
|
/*-************************************
|
@@ -89,46 +103,92 @@ LZ4LIB_API int LZ4_compress_HC_extStateHC(void* state, const char* src, char* ds
|
|
89
103
|
/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
|
90
104
|
* These functions create and release memory for LZ4 HC streaming state.
|
91
105
|
* Newly created states are automatically initialized.
|
92
|
-
*
|
93
|
-
*
|
106
|
+
* A same state can be used multiple times consecutively,
|
107
|
+
* starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
|
94
108
|
*/
|
95
109
|
LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
|
96
110
|
LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
|
97
111
|
|
98
|
-
LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
|
99
|
-
LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize);
|
100
|
-
|
101
|
-
LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr, const char* src, char* dst, int srcSize, int maxDstSize);
|
102
|
-
|
103
|
-
LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
|
104
|
-
|
105
112
|
/*
|
106
|
-
These functions compress data in successive blocks of any size,
|
113
|
+
These functions compress data in successive blocks of any size,
|
114
|
+
using previous blocks as dictionary, to improve compression ratio.
|
107
115
|
One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
|
108
116
|
There is an exception for ring buffers, which can be smaller than 64 KB.
|
109
|
-
Ring
|
117
|
+
Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue().
|
118
|
+
|
119
|
+
Before starting compression, state must be allocated and properly initialized.
|
120
|
+
LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT.
|
121
|
+
|
122
|
+
Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream)
|
123
|
+
or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental).
|
124
|
+
LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once,
|
125
|
+
which is automatically the case when state is created using LZ4_createStreamHC().
|
126
|
+
|
127
|
+
After reset, a first "fictional block" can be designated as initial dictionary,
|
128
|
+
using LZ4_loadDictHC() (Optional).
|
129
|
+
|
130
|
+
Invoke LZ4_compress_HC_continue() to compress each successive block.
|
131
|
+
The number of blocks is unlimited.
|
132
|
+
Previous input blocks, including initial dictionary when present,
|
133
|
+
must remain accessible and unmodified during compression.
|
134
|
+
|
135
|
+
It's allowed to update compression level anytime between blocks,
|
136
|
+
using LZ4_setCompressionLevel() (experimental).
|
137
|
+
|
138
|
+
'dst' buffer should be sized to handle worst case scenarios
|
139
|
+
(see LZ4_compressBound(), it ensures compression success).
|
140
|
+
In case of failure, the API does not guarantee recovery,
|
141
|
+
so the state _must_ be reset.
|
142
|
+
To ensure compression success
|
143
|
+
whenever `dst` buffer size cannot be made >= LZ4_compressBound(),
|
144
|
+
consider using LZ4_compress_HC_continue_destSize().
|
145
|
+
|
146
|
+
Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
|
147
|
+
it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC().
|
148
|
+
Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB)
|
149
|
+
|
150
|
+
After completing a streaming compression,
|
151
|
+
it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state,
|
152
|
+
just by resetting it, using LZ4_resetStreamHC_fast().
|
153
|
+
*/
|
110
154
|
|
111
|
-
|
112
|
-
|
155
|
+
LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */
|
156
|
+
LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize);
|
113
157
|
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
Because in case of failure, the API does not guarantee context recovery, and context will have to be reset.
|
118
|
-
If `dst` buffer budget cannot be >= LZ4_compressBound(), consider using LZ4_compress_HC_continue_destSize() instead.
|
158
|
+
LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr,
|
159
|
+
const char* src, char* dst,
|
160
|
+
int srcSize, int maxDstSize);
|
119
161
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
162
|
+
/*! LZ4_compress_HC_continue_destSize() : v1.9.0+
|
163
|
+
* Similar to LZ4_compress_HC_continue(),
|
164
|
+
* but will read as much data as possible from `src`
|
165
|
+
* to fit into `targetDstSize` budget.
|
166
|
+
* Result is provided into 2 parts :
|
167
|
+
* @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
|
168
|
+
* or 0 if compression fails.
|
169
|
+
* `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`.
|
170
|
+
* Note that this function may not consume the entire input.
|
171
|
+
*/
|
172
|
+
LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
|
173
|
+
const char* src, char* dst,
|
174
|
+
int* srcSizePtr, int targetDstSize);
|
175
|
+
|
176
|
+
LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
|
124
177
|
|
125
178
|
|
126
|
-
|
179
|
+
|
180
|
+
/*^**********************************************
|
181
|
+
* !!!!!! STATIC LINKING ONLY !!!!!!
|
182
|
+
***********************************************/
|
183
|
+
|
184
|
+
/*-******************************************************************
|
127
185
|
* PRIVATE DEFINITIONS :
|
128
|
-
* Do not use these definitions.
|
129
|
-
* They are exposed to allow static allocation of `LZ4_streamHC_t`.
|
130
|
-
*
|
131
|
-
|
186
|
+
* Do not use these definitions directly.
|
187
|
+
* They are merely exposed to allow static allocation of `LZ4_streamHC_t`.
|
188
|
+
* Declare an `LZ4_streamHC_t` directly, rather than any type below.
|
189
|
+
* Even then, only do so in the context of static linking, as definitions may change between versions.
|
190
|
+
********************************************************************/
|
191
|
+
|
132
192
|
#define LZ4HC_DICTIONARY_LOGSIZE 16
|
133
193
|
#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
|
134
194
|
#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
|
@@ -141,52 +201,75 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
|
|
141
201
|
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
142
202
|
#include <stdint.h>
|
143
203
|
|
144
|
-
typedef struct
|
204
|
+
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
|
205
|
+
struct LZ4HC_CCtx_internal
|
145
206
|
{
|
146
207
|
uint32_t hashTable[LZ4HC_HASHTABLESIZE];
|
147
208
|
uint16_t chainTable[LZ4HC_MAXD];
|
148
209
|
const uint8_t* end; /* next block here to continue on current prefix */
|
149
210
|
const uint8_t* base; /* All index relative to this position */
|
150
211
|
const uint8_t* dictBase; /* alternate base for extDict */
|
151
|
-
uint8_t* inputBuffer; /* deprecated */
|
152
212
|
uint32_t dictLimit; /* below that point, need extDict */
|
153
213
|
uint32_t lowLimit; /* below that point, no more dict */
|
154
214
|
uint32_t nextToUpdate; /* index from which to continue dictionary update */
|
155
|
-
|
156
|
-
|
215
|
+
short compressionLevel;
|
216
|
+
int8_t favorDecSpeed; /* favor decompression speed if this flag set,
|
217
|
+
otherwise, favor compression ratio */
|
218
|
+
int8_t dirty; /* stream has to be fully reset if this flag is set */
|
219
|
+
const LZ4HC_CCtx_internal* dictCtx;
|
220
|
+
};
|
157
221
|
|
158
222
|
#else
|
159
223
|
|
160
|
-
typedef struct
|
224
|
+
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
|
225
|
+
struct LZ4HC_CCtx_internal
|
161
226
|
{
|
162
227
|
unsigned int hashTable[LZ4HC_HASHTABLESIZE];
|
163
228
|
unsigned short chainTable[LZ4HC_MAXD];
|
164
229
|
const unsigned char* end; /* next block here to continue on current prefix */
|
165
230
|
const unsigned char* base; /* All index relative to this position */
|
166
231
|
const unsigned char* dictBase; /* alternate base for extDict */
|
167
|
-
unsigned char* inputBuffer; /* deprecated */
|
168
232
|
unsigned int dictLimit; /* below that point, need extDict */
|
169
233
|
unsigned int lowLimit; /* below that point, no more dict */
|
170
234
|
unsigned int nextToUpdate; /* index from which to continue dictionary update */
|
171
|
-
|
172
|
-
|
235
|
+
short compressionLevel;
|
236
|
+
char favorDecSpeed; /* favor decompression speed if this flag set,
|
237
|
+
otherwise, favor compression ratio */
|
238
|
+
char dirty; /* stream has to be fully reset if this flag is set */
|
239
|
+
const LZ4HC_CCtx_internal* dictCtx;
|
240
|
+
};
|
173
241
|
|
174
242
|
#endif
|
175
243
|
|
176
|
-
|
244
|
+
|
245
|
+
/* Do not use these definitions directly !
|
246
|
+
* Declare or allocate an LZ4_streamHC_t instead.
|
247
|
+
*/
|
248
|
+
#define LZ4_STREAMHCSIZE (4*LZ4HC_HASHTABLESIZE + 2*LZ4HC_MAXD + 56 + ((sizeof(void*)==16) ? 56 : 0) /* AS400*/ ) /* 262200 or 262256*/
|
177
249
|
#define LZ4_STREAMHCSIZE_SIZET (LZ4_STREAMHCSIZE / sizeof(size_t))
|
178
250
|
union LZ4_streamHC_u {
|
179
251
|
size_t table[LZ4_STREAMHCSIZE_SIZET];
|
180
252
|
LZ4HC_CCtx_internal internal_donotuse;
|
181
|
-
};
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
253
|
+
}; /* previously typedef'd to LZ4_streamHC_t */
|
254
|
+
|
255
|
+
/* LZ4_streamHC_t :
|
256
|
+
* This structure allows static allocation of LZ4 HC streaming state.
|
257
|
+
* This can be used to allocate statically, on state, or as part of a larger structure.
|
258
|
+
*
|
259
|
+
* Such state **must** be initialized using LZ4_initStreamHC() before first use.
|
260
|
+
*
|
261
|
+
* Note that invoking LZ4_initStreamHC() is not required when
|
262
|
+
* the state was created using LZ4_createStreamHC() (which is recommended).
|
263
|
+
* Using the normal builder, a newly created state is automatically initialized.
|
264
|
+
*
|
265
|
+
* Static allocation shall only be used in combination with static linking.
|
266
|
+
*/
|
186
267
|
|
187
|
-
|
188
|
-
|
189
|
-
|
268
|
+
/* LZ4_initStreamHC() : v1.9.0+
|
269
|
+
* Required before first use of a statically allocated LZ4_streamHC_t.
|
270
|
+
* Before v1.9.0 : use LZ4_resetStreamHC() instead
|
271
|
+
*/
|
272
|
+
LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size);
|
190
273
|
|
191
274
|
|
192
275
|
/*-************************************
|
@@ -195,25 +278,43 @@ union LZ4_streamHC_u {
|
|
195
278
|
/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
|
196
279
|
|
197
280
|
/* deprecated compression functions */
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
/*
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
281
|
+
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize);
|
282
|
+
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
|
283
|
+
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
|
284
|
+
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
|
285
|
+
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize);
|
286
|
+
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
|
287
|
+
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel);
|
288
|
+
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
|
289
|
+
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
|
290
|
+
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
|
291
|
+
|
292
|
+
/* Obsolete streaming functions; degraded functionality; do not use!
|
293
|
+
*
|
294
|
+
* In order to perform streaming compression, these functions depended on data
|
295
|
+
* that is no longer tracked in the state. They have been preserved as well as
|
296
|
+
* possible: using them will still produce a correct output. However, use of
|
297
|
+
* LZ4_slideInputBufferHC() will truncate the history of the stream, rather
|
298
|
+
* than preserve a window-sized chunk of history.
|
299
|
+
*/
|
300
|
+
LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer);
|
301
|
+
LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
|
302
|
+
LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data);
|
303
|
+
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
|
304
|
+
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
|
305
|
+
LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
|
306
|
+
LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer);
|
307
|
+
|
308
|
+
|
309
|
+
/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC().
|
310
|
+
* The intention is to emphasize the difference with LZ4_resetStreamHC_fast(),
|
311
|
+
* which is now the recommended function to start a new stream of blocks,
|
312
|
+
* but cannot be used to initialize a memory segment containing arbitrary garbage data.
|
313
|
+
*
|
314
|
+
* It is recommended to switch to LZ4_initStreamHC().
|
315
|
+
* LZ4_resetStreamHC() will generate deprecation warnings in a future version.
|
316
|
+
*/
|
317
|
+
LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
|
217
318
|
|
218
319
|
|
219
320
|
#if defined (__cplusplus)
|
@@ -235,38 +336,100 @@ LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStreamHC() instead") int LZ4_resetStr
|
|
235
336
|
#ifndef LZ4_HC_SLO_098092834
|
236
337
|
#define LZ4_HC_SLO_098092834
|
237
338
|
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
* Result is provided in 2 parts :
|
242
|
-
* @return : the number of bytes written into 'dst'
|
243
|
-
* or 0 if compression fails.
|
244
|
-
* `srcSizePtr` : value will be updated to indicate how much bytes were read from `src`
|
245
|
-
*/
|
246
|
-
int LZ4_compress_HC_destSize(void* LZ4HC_Data,
|
247
|
-
const char* src, char* dst,
|
248
|
-
int* srcSizePtr, int targetDstSize,
|
249
|
-
int compressionLevel);
|
250
|
-
|
251
|
-
/*! LZ4_compress_HC_continue_destSize() : v1.8.0 (experimental)
|
252
|
-
* Similar as LZ4_compress_HC_continue(),
|
253
|
-
* but will read a variable nb of bytes from `src`
|
254
|
-
* to fit into `targetDstSize` budget.
|
255
|
-
* Result is provided in 2 parts :
|
256
|
-
* @return : the number of bytes written into 'dst'
|
257
|
-
* or 0 if compression fails.
|
258
|
-
* `srcSizePtr` : value will be updated to indicate how much bytes were read from `src`.
|
259
|
-
*/
|
260
|
-
int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
|
261
|
-
const char* src, char* dst,
|
262
|
-
int* srcSizePtr, int targetDstSize);
|
339
|
+
#if defined (__cplusplus)
|
340
|
+
extern "C" {
|
341
|
+
#endif
|
263
342
|
|
264
|
-
/*! LZ4_setCompressionLevel() : v1.8.0 (experimental)
|
265
|
-
* It's possible to change compression level
|
343
|
+
/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental)
|
344
|
+
* It's possible to change compression level
|
345
|
+
* between successive invocations of LZ4_compress_HC_continue*()
|
346
|
+
* for dynamic adaptation.
|
266
347
|
*/
|
267
|
-
void LZ4_setCompressionLevel(
|
348
|
+
LZ4LIB_STATIC_API void LZ4_setCompressionLevel(
|
349
|
+
LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
|
268
350
|
|
351
|
+
/*! LZ4_favorDecompressionSpeed() : v1.8.2+ (experimental)
|
352
|
+
* Opt. Parser will favor decompression speed over compression ratio.
|
353
|
+
* Only applicable to levels >= LZ4HC_CLEVEL_OPT_MIN.
|
354
|
+
*/
|
355
|
+
LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed(
|
356
|
+
LZ4_streamHC_t* LZ4_streamHCPtr, int favor);
|
357
|
+
|
358
|
+
/*! LZ4_resetStreamHC_fast() : v1.9.0+
|
359
|
+
* When an LZ4_streamHC_t is known to be in a internally coherent state,
|
360
|
+
* it can often be prepared for a new compression with almost no work, only
|
361
|
+
* sometimes falling back to the full, expensive reset that is always required
|
362
|
+
* when the stream is in an indeterminate state (i.e., the reset performed by
|
363
|
+
* LZ4_resetStreamHC()).
|
364
|
+
*
|
365
|
+
* LZ4_streamHCs are guaranteed to be in a valid state when:
|
366
|
+
* - returned from LZ4_createStreamHC()
|
367
|
+
* - reset by LZ4_resetStreamHC()
|
368
|
+
* - memset(stream, 0, sizeof(LZ4_streamHC_t))
|
369
|
+
* - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast()
|
370
|
+
* - the stream was in a valid state and was then used in any compression call
|
371
|
+
* that returned success
|
372
|
+
* - the stream was in an indeterminate state and was used in a compression
|
373
|
+
* call that fully reset the state (LZ4_compress_HC_extStateHC()) and that
|
374
|
+
* returned success
|
375
|
+
*
|
376
|
+
* Note:
|
377
|
+
* A stream that was last used in a compression call that returned an error
|
378
|
+
* may be passed to this function. However, it will be fully reset, which will
|
379
|
+
* clear any existing history and settings from the context.
|
380
|
+
*/
|
381
|
+
LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast(
|
382
|
+
LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
|
383
|
+
|
384
|
+
/*! LZ4_compress_HC_extStateHC_fastReset() :
|
385
|
+
* A variant of LZ4_compress_HC_extStateHC().
|
386
|
+
*
|
387
|
+
* Using this variant avoids an expensive initialization step. It is only safe
|
388
|
+
* to call if the state buffer is known to be correctly initialized already
|
389
|
+
* (see above comment on LZ4_resetStreamHC_fast() for a definition of
|
390
|
+
* "correctly initialized"). From a high level, the difference is that this
|
391
|
+
* function initializes the provided state with a call to
|
392
|
+
* LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a
|
393
|
+
* call to LZ4_resetStreamHC().
|
394
|
+
*/
|
395
|
+
LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset (
|
396
|
+
void* state,
|
397
|
+
const char* src, char* dst,
|
398
|
+
int srcSize, int dstCapacity,
|
399
|
+
int compressionLevel);
|
400
|
+
|
401
|
+
/*! LZ4_attach_HC_dictionary() :
|
402
|
+
* This is an experimental API that allows for the efficient use of a
|
403
|
+
* static dictionary many times.
|
404
|
+
*
|
405
|
+
* Rather than re-loading the dictionary buffer into a working context before
|
406
|
+
* each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
|
407
|
+
* working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
|
408
|
+
* in which the working stream references the dictionary stream in-place.
|
409
|
+
*
|
410
|
+
* Several assumptions are made about the state of the dictionary stream.
|
411
|
+
* Currently, only streams which have been prepared by LZ4_loadDictHC() should
|
412
|
+
* be expected to work.
|
413
|
+
*
|
414
|
+
* Alternatively, the provided dictionary stream pointer may be NULL, in which
|
415
|
+
* case any existing dictionary stream is unset.
|
416
|
+
*
|
417
|
+
* A dictionary should only be attached to a stream without any history (i.e.,
|
418
|
+
* a stream that has just been reset).
|
419
|
+
*
|
420
|
+
* The dictionary will remain attached to the working stream only for the
|
421
|
+
* current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
|
422
|
+
* dictionary context association from the working stream. The dictionary
|
423
|
+
* stream (and source buffer) must remain in-place / accessible / unchanged
|
424
|
+
* through the lifetime of the stream session.
|
425
|
+
*/
|
426
|
+
LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
|
427
|
+
LZ4_streamHC_t *working_stream,
|
428
|
+
const LZ4_streamHC_t *dictionary_stream);
|
269
429
|
|
430
|
+
#if defined (__cplusplus)
|
431
|
+
}
|
432
|
+
#endif
|
270
433
|
|
271
434
|
#endif /* LZ4_HC_SLO_098092834 */
|
272
435
|
#endif /* LZ4_HC_STATIC_LINKING_ONLY */
|
data/contrib/lz4/lib/xxhash.c
CHANGED
@@ -50,20 +50,26 @@
|
|
50
50
|
* Prefer these methods in priority order (0 > 1 > 2)
|
51
51
|
*/
|
52
52
|
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
53
|
-
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__)
|
53
|
+
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|
54
|
+
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
|
55
|
+
|| defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
54
56
|
# define XXH_FORCE_MEMORY_ACCESS 2
|
55
|
-
# elif defined(__INTEL_COMPILER) || \
|
56
|
-
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
|
57
|
+
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
|
58
|
+
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|
59
|
+
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|
60
|
+
|| defined(__ARM_ARCH_7S__) ))
|
57
61
|
# define XXH_FORCE_MEMORY_ACCESS 1
|
58
62
|
# endif
|
59
63
|
#endif
|
60
64
|
|
61
65
|
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
|
62
|
-
* If
|
63
|
-
* When this
|
64
|
-
*
|
66
|
+
* If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
|
67
|
+
* When this macro is enabled, xxHash actively checks input for null pointer.
|
68
|
+
* It it is, result for null input pointers is the same as a null-length input.
|
65
69
|
*/
|
66
|
-
|
70
|
+
#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
|
71
|
+
# define XXH_ACCEPT_NULL_INPUT_POINTER 0
|
72
|
+
#endif
|
67
73
|
|
68
74
|
/*!XXH_FORCE_NATIVE_FORMAT :
|
69
75
|
* By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
|
@@ -80,8 +86,9 @@
|
|
80
86
|
/*!XXH_FORCE_ALIGN_CHECK :
|
81
87
|
* This is a minor performance trick, only useful with lots of very small keys.
|
82
88
|
* It means : check for aligned/unaligned input.
|
83
|
-
* The check costs one initial branch per hash;
|
84
|
-
* is guaranteed to be aligned
|
89
|
+
* The check costs one initial branch per hash;
|
90
|
+
* set it to 0 when the input is guaranteed to be aligned,
|
91
|
+
* or when alignment doesn't matter for performance.
|
85
92
|
*/
|
86
93
|
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
|
87
94
|
# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
|
@@ -104,6 +111,8 @@ static void XXH_free (void* p) { free(p); }
|
|
104
111
|
#include <string.h>
|
105
112
|
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
106
113
|
|
114
|
+
#include <assert.h> /* assert */
|
115
|
+
|
107
116
|
#define XXH_STATIC_LINKING_ONLY
|
108
117
|
#include "xxhash.h"
|
109
118
|
|
@@ -113,40 +122,35 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
|
|
113
122
|
***************************************/
|
114
123
|
#ifdef _MSC_VER /* Visual Studio */
|
115
124
|
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
116
|
-
#
|
117
|
-
|
118
|
-
#
|
119
|
-
#
|
120
|
-
#
|
121
|
-
# else
|
122
|
-
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
123
|
-
# ifdef __GNUC__
|
124
|
-
# define XXH_FORCE_INLINE static inline __attribute__((always_inline))
|
125
|
-
# else
|
126
|
-
# define XXH_FORCE_INLINE static inline
|
127
|
-
# endif
|
125
|
+
# define FORCE_INLINE static __forceinline
|
126
|
+
#else
|
127
|
+
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
128
|
+
# ifdef __GNUC__
|
129
|
+
# define FORCE_INLINE static inline __attribute__((always_inline))
|
128
130
|
# else
|
129
|
-
# define
|
130
|
-
# endif
|
131
|
-
#
|
132
|
-
#
|
131
|
+
# define FORCE_INLINE static inline
|
132
|
+
# endif
|
133
|
+
# else
|
134
|
+
# define FORCE_INLINE static
|
135
|
+
# endif /* __STDC_VERSION__ */
|
136
|
+
#endif
|
133
137
|
|
134
138
|
|
135
139
|
/* *************************************
|
136
140
|
* Basic Types
|
137
141
|
***************************************/
|
138
142
|
#ifndef MEM_MODULE
|
139
|
-
# if !defined (__VMS)
|
143
|
+
# if !defined (__VMS) \
|
144
|
+
&& (defined (__cplusplus) \
|
145
|
+
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
140
146
|
# include <stdint.h>
|
141
147
|
typedef uint8_t BYTE;
|
142
148
|
typedef uint16_t U16;
|
143
149
|
typedef uint32_t U32;
|
144
|
-
typedef int32_t S32;
|
145
150
|
# else
|
146
151
|
typedef unsigned char BYTE;
|
147
152
|
typedef unsigned short U16;
|
148
153
|
typedef unsigned int U32;
|
149
|
-
typedef signed int S32;
|
150
154
|
# endif
|
151
155
|
#endif
|
152
156
|
|
@@ -213,8 +217,12 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
|
213
217
|
|
214
218
|
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
|
215
219
|
#ifndef XXH_CPU_LITTLE_ENDIAN
|
216
|
-
|
217
|
-
|
220
|
+
static int XXH_isLittleEndian(void)
|
221
|
+
{
|
222
|
+
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
223
|
+
return one.c[0];
|
224
|
+
}
|
225
|
+
# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
|
218
226
|
#endif
|
219
227
|
|
220
228
|
|
@@ -223,7 +231,7 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
|
223
231
|
*****************************/
|
224
232
|
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
225
233
|
|
226
|
-
|
234
|
+
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
227
235
|
{
|
228
236
|
if (align==XXH_unaligned)
|
229
237
|
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
@@ -231,7 +239,7 @@ XXH_FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, X
|
|
231
239
|
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
232
240
|
}
|
233
241
|
|
234
|
-
|
242
|
+
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
235
243
|
{
|
236
244
|
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
237
245
|
}
|
@@ -245,12 +253,12 @@ static U32 XXH_readBE32(const void* ptr)
|
|
245
253
|
/* *************************************
|
246
254
|
* Macros
|
247
255
|
***************************************/
|
248
|
-
#define XXH_STATIC_ASSERT(c)
|
256
|
+
#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */
|
249
257
|
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
250
258
|
|
251
259
|
|
252
260
|
/* *******************************************************************
|
253
|
-
* 32-
|
261
|
+
* 32-bit hash functions
|
254
262
|
*********************************************************************/
|
255
263
|
static const U32 PRIME32_1 = 2654435761U;
|
256
264
|
static const U32 PRIME32_2 = 2246822519U;
|
@@ -266,14 +274,89 @@ static U32 XXH32_round(U32 seed, U32 input)
|
|
266
274
|
return seed;
|
267
275
|
}
|
268
276
|
|
269
|
-
|
277
|
+
/* mix all bits */
|
278
|
+
static U32 XXH32_avalanche(U32 h32)
|
279
|
+
{
|
280
|
+
h32 ^= h32 >> 15;
|
281
|
+
h32 *= PRIME32_2;
|
282
|
+
h32 ^= h32 >> 13;
|
283
|
+
h32 *= PRIME32_3;
|
284
|
+
h32 ^= h32 >> 16;
|
285
|
+
return(h32);
|
286
|
+
}
|
287
|
+
|
288
|
+
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
|
289
|
+
|
290
|
+
static U32
|
291
|
+
XXH32_finalize(U32 h32, const void* ptr, size_t len,
|
292
|
+
XXH_endianess endian, XXH_alignment align)
|
293
|
+
|
294
|
+
{
|
295
|
+
const BYTE* p = (const BYTE*)ptr;
|
296
|
+
|
297
|
+
#define PROCESS1 \
|
298
|
+
h32 += (*p++) * PRIME32_5; \
|
299
|
+
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
|
300
|
+
|
301
|
+
#define PROCESS4 \
|
302
|
+
h32 += XXH_get32bits(p) * PRIME32_3; \
|
303
|
+
p+=4; \
|
304
|
+
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
|
305
|
+
|
306
|
+
switch(len&15) /* or switch(bEnd - p) */
|
307
|
+
{
|
308
|
+
case 12: PROCESS4;
|
309
|
+
/* fallthrough */
|
310
|
+
case 8: PROCESS4;
|
311
|
+
/* fallthrough */
|
312
|
+
case 4: PROCESS4;
|
313
|
+
return XXH32_avalanche(h32);
|
314
|
+
|
315
|
+
case 13: PROCESS4;
|
316
|
+
/* fallthrough */
|
317
|
+
case 9: PROCESS4;
|
318
|
+
/* fallthrough */
|
319
|
+
case 5: PROCESS4;
|
320
|
+
PROCESS1;
|
321
|
+
return XXH32_avalanche(h32);
|
322
|
+
|
323
|
+
case 14: PROCESS4;
|
324
|
+
/* fallthrough */
|
325
|
+
case 10: PROCESS4;
|
326
|
+
/* fallthrough */
|
327
|
+
case 6: PROCESS4;
|
328
|
+
PROCESS1;
|
329
|
+
PROCESS1;
|
330
|
+
return XXH32_avalanche(h32);
|
331
|
+
|
332
|
+
case 15: PROCESS4;
|
333
|
+
/* fallthrough */
|
334
|
+
case 11: PROCESS4;
|
335
|
+
/* fallthrough */
|
336
|
+
case 7: PROCESS4;
|
337
|
+
/* fallthrough */
|
338
|
+
case 3: PROCESS1;
|
339
|
+
/* fallthrough */
|
340
|
+
case 2: PROCESS1;
|
341
|
+
/* fallthrough */
|
342
|
+
case 1: PROCESS1;
|
343
|
+
/* fallthrough */
|
344
|
+
case 0: return XXH32_avalanche(h32);
|
345
|
+
}
|
346
|
+
assert(0);
|
347
|
+
return h32; /* reaching this point is deemed impossible */
|
348
|
+
}
|
349
|
+
|
350
|
+
|
351
|
+
FORCE_INLINE U32
|
352
|
+
XXH32_endian_align(const void* input, size_t len, U32 seed,
|
353
|
+
XXH_endianess endian, XXH_alignment align)
|
270
354
|
{
|
271
355
|
const BYTE* p = (const BYTE*)input;
|
272
356
|
const BYTE* bEnd = p + len;
|
273
357
|
U32 h32;
|
274
|
-
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
|
275
358
|
|
276
|
-
#
|
359
|
+
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
277
360
|
if (p==NULL) {
|
278
361
|
len=0;
|
279
362
|
bEnd=p=(const BYTE*)(size_t)16;
|
@@ -281,7 +364,7 @@ XXH_FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed,
|
|
281
364
|
#endif
|
282
365
|
|
283
366
|
if (len>=16) {
|
284
|
-
const BYTE* const limit = bEnd -
|
367
|
+
const BYTE* const limit = bEnd - 15;
|
285
368
|
U32 v1 = seed + PRIME32_1 + PRIME32_2;
|
286
369
|
U32 v2 = seed + PRIME32_2;
|
287
370
|
U32 v3 = seed + 0;
|
@@ -292,34 +375,17 @@ XXH_FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed,
|
|
292
375
|
v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
|
293
376
|
v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
|
294
377
|
v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
|
295
|
-
} while (p
|
378
|
+
} while (p < limit);
|
296
379
|
|
297
|
-
h32 = XXH_rotl32(v1, 1)
|
380
|
+
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
|
381
|
+
+ XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
|
298
382
|
} else {
|
299
383
|
h32 = seed + PRIME32_5;
|
300
384
|
}
|
301
385
|
|
302
|
-
h32 += (U32)
|
303
|
-
|
304
|
-
while (p+4<=bEnd) {
|
305
|
-
h32 += XXH_get32bits(p) * PRIME32_3;
|
306
|
-
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
|
307
|
-
p+=4;
|
308
|
-
}
|
309
|
-
|
310
|
-
while (p<bEnd) {
|
311
|
-
h32 += (*p) * PRIME32_5;
|
312
|
-
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
|
313
|
-
p++;
|
314
|
-
}
|
315
|
-
|
316
|
-
h32 ^= h32 >> 15;
|
317
|
-
h32 *= PRIME32_2;
|
318
|
-
h32 ^= h32 >> 13;
|
319
|
-
h32 *= PRIME32_3;
|
320
|
-
h32 ^= h32 >> 16;
|
386
|
+
h32 += (U32)len;
|
321
387
|
|
322
|
-
return h32;
|
388
|
+
return XXH32_finalize(h32, p, len&15, endian, align);
|
323
389
|
}
|
324
390
|
|
325
391
|
|
@@ -371,74 +437,81 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t
|
|
371
437
|
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
|
372
438
|
{
|
373
439
|
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
374
|
-
memset(&state, 0, sizeof(state)
|
440
|
+
memset(&state, 0, sizeof(state));
|
375
441
|
state.v1 = seed + PRIME32_1 + PRIME32_2;
|
376
442
|
state.v2 = seed + PRIME32_2;
|
377
443
|
state.v3 = seed + 0;
|
378
444
|
state.v4 = seed - PRIME32_1;
|
379
|
-
|
445
|
+
/* do not write into reserved, planned to be removed in a future version */
|
446
|
+
memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
|
380
447
|
return XXH_OK;
|
381
448
|
}
|
382
449
|
|
383
450
|
|
384
|
-
|
451
|
+
FORCE_INLINE XXH_errorcode
|
452
|
+
XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
385
453
|
{
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
#
|
390
|
-
|
454
|
+
if (input==NULL)
|
455
|
+
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
456
|
+
return XXH_OK;
|
457
|
+
#else
|
458
|
+
return XXH_ERROR;
|
391
459
|
#endif
|
392
460
|
|
393
|
-
|
394
|
-
|
461
|
+
{ const BYTE* p = (const BYTE*)input;
|
462
|
+
const BYTE* const bEnd = p + len;
|
395
463
|
|
396
|
-
|
397
|
-
|
398
|
-
state->memsize += (unsigned)len;
|
399
|
-
return XXH_OK;
|
400
|
-
}
|
464
|
+
state->total_len_32 += (unsigned)len;
|
465
|
+
state->large_len |= (len>=16) | (state->total_len_32>=16);
|
401
466
|
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
|
407
|
-
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
|
408
|
-
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
|
467
|
+
if (state->memsize + len < 16) { /* fill in tmp buffer */
|
468
|
+
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
|
469
|
+
state->memsize += (unsigned)len;
|
470
|
+
return XXH_OK;
|
409
471
|
}
|
410
|
-
p += 16-state->memsize;
|
411
|
-
state->memsize = 0;
|
412
|
-
}
|
413
|
-
|
414
|
-
if (p <= bEnd-16) {
|
415
|
-
const BYTE* const limit = bEnd - 16;
|
416
|
-
U32 v1 = state->v1;
|
417
|
-
U32 v2 = state->v2;
|
418
|
-
U32 v3 = state->v3;
|
419
|
-
U32 v4 = state->v4;
|
420
472
|
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
473
|
+
if (state->memsize) { /* some data left from previous update */
|
474
|
+
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
|
475
|
+
{ const U32* p32 = state->mem32;
|
476
|
+
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
|
477
|
+
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
|
478
|
+
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
|
479
|
+
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian));
|
480
|
+
}
|
481
|
+
p += 16-state->memsize;
|
482
|
+
state->memsize = 0;
|
483
|
+
}
|
427
484
|
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
485
|
+
if (p <= bEnd-16) {
|
486
|
+
const BYTE* const limit = bEnd - 16;
|
487
|
+
U32 v1 = state->v1;
|
488
|
+
U32 v2 = state->v2;
|
489
|
+
U32 v3 = state->v3;
|
490
|
+
U32 v4 = state->v4;
|
491
|
+
|
492
|
+
do {
|
493
|
+
v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
|
494
|
+
v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
|
495
|
+
v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
|
496
|
+
v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
|
497
|
+
} while (p<=limit);
|
498
|
+
|
499
|
+
state->v1 = v1;
|
500
|
+
state->v2 = v2;
|
501
|
+
state->v3 = v3;
|
502
|
+
state->v4 = v4;
|
503
|
+
}
|
433
504
|
|
434
|
-
|
435
|
-
|
436
|
-
|
505
|
+
if (p < bEnd) {
|
506
|
+
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
|
507
|
+
state->memsize = (unsigned)(bEnd-p);
|
508
|
+
}
|
437
509
|
}
|
438
510
|
|
439
511
|
return XXH_OK;
|
440
512
|
}
|
441
513
|
|
514
|
+
|
442
515
|
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
|
443
516
|
{
|
444
517
|
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
@@ -450,40 +523,23 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void*
|
|
450
523
|
}
|
451
524
|
|
452
525
|
|
453
|
-
|
454
|
-
|
526
|
+
FORCE_INLINE U32
|
527
|
+
XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
455
528
|
{
|
456
|
-
const BYTE * p = (const BYTE*)state->mem32;
|
457
|
-
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
|
458
529
|
U32 h32;
|
459
530
|
|
460
531
|
if (state->large_len) {
|
461
|
-
h32 = XXH_rotl32(state->v1, 1)
|
532
|
+
h32 = XXH_rotl32(state->v1, 1)
|
533
|
+
+ XXH_rotl32(state->v2, 7)
|
534
|
+
+ XXH_rotl32(state->v3, 12)
|
535
|
+
+ XXH_rotl32(state->v4, 18);
|
462
536
|
} else {
|
463
537
|
h32 = state->v3 /* == seed */ + PRIME32_5;
|
464
538
|
}
|
465
539
|
|
466
540
|
h32 += state->total_len_32;
|
467
541
|
|
468
|
-
|
469
|
-
h32 += XXH_readLE32(p, endian) * PRIME32_3;
|
470
|
-
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
|
471
|
-
p+=4;
|
472
|
-
}
|
473
|
-
|
474
|
-
while (p<bEnd) {
|
475
|
-
h32 += (*p) * PRIME32_5;
|
476
|
-
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
|
477
|
-
p++;
|
478
|
-
}
|
479
|
-
|
480
|
-
h32 ^= h32 >> 15;
|
481
|
-
h32 *= PRIME32_2;
|
482
|
-
h32 ^= h32 >> 13;
|
483
|
-
h32 *= PRIME32_3;
|
484
|
-
h32 ^= h32 >> 16;
|
485
|
-
|
486
|
-
return h32;
|
542
|
+
return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned);
|
487
543
|
}
|
488
544
|
|
489
545
|
|
@@ -503,7 +559,7 @@ XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
|
|
503
559
|
/*! Default XXH result types are basic unsigned 32 and 64 bits.
|
504
560
|
* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
|
505
561
|
* These functions allow transformation of hash result into and from its canonical format.
|
506
|
-
* This way, hash values can be written into a file or buffer,
|
562
|
+
* This way, hash values can be written into a file or buffer, remaining comparable across different systems.
|
507
563
|
*/
|
508
564
|
|
509
565
|
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
|
@@ -522,18 +578,21 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
|
|
522
578
|
#ifndef XXH_NO_LONG_LONG
|
523
579
|
|
524
580
|
/* *******************************************************************
|
525
|
-
* 64-
|
581
|
+
* 64-bit hash functions
|
526
582
|
*********************************************************************/
|
527
583
|
|
528
584
|
/*====== Memory access ======*/
|
529
585
|
|
530
586
|
#ifndef MEM_MODULE
|
531
587
|
# define MEM_MODULE
|
532
|
-
# if !defined (__VMS)
|
588
|
+
# if !defined (__VMS) \
|
589
|
+
&& (defined (__cplusplus) \
|
590
|
+
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
533
591
|
# include <stdint.h>
|
534
592
|
typedef uint64_t U64;
|
535
593
|
# else
|
536
|
-
|
594
|
+
/* if compiler doesn't support unsigned long long, replace by another 64-bit type */
|
595
|
+
typedef unsigned long long U64;
|
537
596
|
# endif
|
538
597
|
#endif
|
539
598
|
|
@@ -583,7 +642,7 @@ static U64 XXH_swap64 (U64 x)
|
|
583
642
|
}
|
584
643
|
#endif
|
585
644
|
|
586
|
-
|
645
|
+
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
587
646
|
{
|
588
647
|
if (align==XXH_unaligned)
|
589
648
|
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
@@ -591,7 +650,7 @@ XXH_FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, X
|
|
591
650
|
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
592
651
|
}
|
593
652
|
|
594
|
-
|
653
|
+
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
595
654
|
{
|
596
655
|
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
597
656
|
}
|
@@ -626,14 +685,137 @@ static U64 XXH64_mergeRound(U64 acc, U64 val)
|
|
626
685
|
return acc;
|
627
686
|
}
|
628
687
|
|
629
|
-
|
688
|
+
static U64 XXH64_avalanche(U64 h64)
|
689
|
+
{
|
690
|
+
h64 ^= h64 >> 33;
|
691
|
+
h64 *= PRIME64_2;
|
692
|
+
h64 ^= h64 >> 29;
|
693
|
+
h64 *= PRIME64_3;
|
694
|
+
h64 ^= h64 >> 32;
|
695
|
+
return h64;
|
696
|
+
}
|
697
|
+
|
698
|
+
|
699
|
+
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
|
700
|
+
|
701
|
+
static U64
|
702
|
+
XXH64_finalize(U64 h64, const void* ptr, size_t len,
|
703
|
+
XXH_endianess endian, XXH_alignment align)
|
704
|
+
{
|
705
|
+
const BYTE* p = (const BYTE*)ptr;
|
706
|
+
|
707
|
+
#define PROCESS1_64 \
|
708
|
+
h64 ^= (*p++) * PRIME64_5; \
|
709
|
+
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
710
|
+
|
711
|
+
#define PROCESS4_64 \
|
712
|
+
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
|
713
|
+
p+=4; \
|
714
|
+
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
715
|
+
|
716
|
+
#define PROCESS8_64 { \
|
717
|
+
U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
|
718
|
+
p+=8; \
|
719
|
+
h64 ^= k1; \
|
720
|
+
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
|
721
|
+
}
|
722
|
+
|
723
|
+
switch(len&31) {
|
724
|
+
case 24: PROCESS8_64;
|
725
|
+
/* fallthrough */
|
726
|
+
case 16: PROCESS8_64;
|
727
|
+
/* fallthrough */
|
728
|
+
case 8: PROCESS8_64;
|
729
|
+
return XXH64_avalanche(h64);
|
730
|
+
|
731
|
+
case 28: PROCESS8_64;
|
732
|
+
/* fallthrough */
|
733
|
+
case 20: PROCESS8_64;
|
734
|
+
/* fallthrough */
|
735
|
+
case 12: PROCESS8_64;
|
736
|
+
/* fallthrough */
|
737
|
+
case 4: PROCESS4_64;
|
738
|
+
return XXH64_avalanche(h64);
|
739
|
+
|
740
|
+
case 25: PROCESS8_64;
|
741
|
+
/* fallthrough */
|
742
|
+
case 17: PROCESS8_64;
|
743
|
+
/* fallthrough */
|
744
|
+
case 9: PROCESS8_64;
|
745
|
+
PROCESS1_64;
|
746
|
+
return XXH64_avalanche(h64);
|
747
|
+
|
748
|
+
case 29: PROCESS8_64;
|
749
|
+
/* fallthrough */
|
750
|
+
case 21: PROCESS8_64;
|
751
|
+
/* fallthrough */
|
752
|
+
case 13: PROCESS8_64;
|
753
|
+
/* fallthrough */
|
754
|
+
case 5: PROCESS4_64;
|
755
|
+
PROCESS1_64;
|
756
|
+
return XXH64_avalanche(h64);
|
757
|
+
|
758
|
+
case 26: PROCESS8_64;
|
759
|
+
/* fallthrough */
|
760
|
+
case 18: PROCESS8_64;
|
761
|
+
/* fallthrough */
|
762
|
+
case 10: PROCESS8_64;
|
763
|
+
PROCESS1_64;
|
764
|
+
PROCESS1_64;
|
765
|
+
return XXH64_avalanche(h64);
|
766
|
+
|
767
|
+
case 30: PROCESS8_64;
|
768
|
+
/* fallthrough */
|
769
|
+
case 22: PROCESS8_64;
|
770
|
+
/* fallthrough */
|
771
|
+
case 14: PROCESS8_64;
|
772
|
+
/* fallthrough */
|
773
|
+
case 6: PROCESS4_64;
|
774
|
+
PROCESS1_64;
|
775
|
+
PROCESS1_64;
|
776
|
+
return XXH64_avalanche(h64);
|
777
|
+
|
778
|
+
case 27: PROCESS8_64;
|
779
|
+
/* fallthrough */
|
780
|
+
case 19: PROCESS8_64;
|
781
|
+
/* fallthrough */
|
782
|
+
case 11: PROCESS8_64;
|
783
|
+
PROCESS1_64;
|
784
|
+
PROCESS1_64;
|
785
|
+
PROCESS1_64;
|
786
|
+
return XXH64_avalanche(h64);
|
787
|
+
|
788
|
+
case 31: PROCESS8_64;
|
789
|
+
/* fallthrough */
|
790
|
+
case 23: PROCESS8_64;
|
791
|
+
/* fallthrough */
|
792
|
+
case 15: PROCESS8_64;
|
793
|
+
/* fallthrough */
|
794
|
+
case 7: PROCESS4_64;
|
795
|
+
/* fallthrough */
|
796
|
+
case 3: PROCESS1_64;
|
797
|
+
/* fallthrough */
|
798
|
+
case 2: PROCESS1_64;
|
799
|
+
/* fallthrough */
|
800
|
+
case 1: PROCESS1_64;
|
801
|
+
/* fallthrough */
|
802
|
+
case 0: return XXH64_avalanche(h64);
|
803
|
+
}
|
804
|
+
|
805
|
+
/* impossible to reach */
|
806
|
+
assert(0);
|
807
|
+
return 0; /* unreachable, but some compilers complain without it */
|
808
|
+
}
|
809
|
+
|
810
|
+
FORCE_INLINE U64
|
811
|
+
XXH64_endian_align(const void* input, size_t len, U64 seed,
|
812
|
+
XXH_endianess endian, XXH_alignment align)
|
630
813
|
{
|
631
814
|
const BYTE* p = (const BYTE*)input;
|
632
|
-
const BYTE*
|
815
|
+
const BYTE* bEnd = p + len;
|
633
816
|
U64 h64;
|
634
|
-
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
|
635
817
|
|
636
|
-
#
|
818
|
+
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
637
819
|
if (p==NULL) {
|
638
820
|
len=0;
|
639
821
|
bEnd=p=(const BYTE*)(size_t)32;
|
@@ -666,32 +848,7 @@ XXH_FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed,
|
|
666
848
|
|
667
849
|
h64 += (U64) len;
|
668
850
|
|
669
|
-
|
670
|
-
U64 const k1 = XXH64_round(0, XXH_get64bits(p));
|
671
|
-
h64 ^= k1;
|
672
|
-
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
673
|
-
p+=8;
|
674
|
-
}
|
675
|
-
|
676
|
-
if (p+4<=bEnd) {
|
677
|
-
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
|
678
|
-
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
679
|
-
p+=4;
|
680
|
-
}
|
681
|
-
|
682
|
-
while (p<bEnd) {
|
683
|
-
h64 ^= (*p) * PRIME64_5;
|
684
|
-
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
685
|
-
p++;
|
686
|
-
}
|
687
|
-
|
688
|
-
h64 ^= h64 >> 33;
|
689
|
-
h64 *= PRIME64_2;
|
690
|
-
h64 ^= h64 >> 29;
|
691
|
-
h64 *= PRIME64_3;
|
692
|
-
h64 ^= h64 >> 32;
|
693
|
-
|
694
|
-
return h64;
|
851
|
+
return XXH64_finalize(h64, p, len, endian, align);
|
695
852
|
}
|
696
853
|
|
697
854
|
|
@@ -741,65 +898,71 @@ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t
|
|
741
898
|
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
742
899
|
{
|
743
900
|
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
744
|
-
memset(&state, 0, sizeof(state)
|
901
|
+
memset(&state, 0, sizeof(state));
|
745
902
|
state.v1 = seed + PRIME64_1 + PRIME64_2;
|
746
903
|
state.v2 = seed + PRIME64_2;
|
747
904
|
state.v3 = seed + 0;
|
748
905
|
state.v4 = seed - PRIME64_1;
|
749
|
-
|
906
|
+
/* do not write into reserved, planned to be removed in a future version */
|
907
|
+
memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
|
750
908
|
return XXH_OK;
|
751
909
|
}
|
752
910
|
|
753
|
-
|
911
|
+
FORCE_INLINE XXH_errorcode
|
912
|
+
XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
754
913
|
{
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
#
|
759
|
-
|
914
|
+
if (input==NULL)
|
915
|
+
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
916
|
+
return XXH_OK;
|
917
|
+
#else
|
918
|
+
return XXH_ERROR;
|
760
919
|
#endif
|
761
920
|
|
762
|
-
|
763
|
-
|
764
|
-
if (state->memsize + len < 32) { /* fill in tmp buffer */
|
765
|
-
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
766
|
-
state->memsize += (U32)len;
|
767
|
-
return XXH_OK;
|
768
|
-
}
|
921
|
+
{ const BYTE* p = (const BYTE*)input;
|
922
|
+
const BYTE* const bEnd = p + len;
|
769
923
|
|
770
|
-
|
771
|
-
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
|
772
|
-
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
|
773
|
-
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
|
774
|
-
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
|
775
|
-
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
|
776
|
-
p += 32-state->memsize;
|
777
|
-
state->memsize = 0;
|
778
|
-
}
|
924
|
+
state->total_len += len;
|
779
925
|
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
U64 v4 = state->v4;
|
926
|
+
if (state->memsize + len < 32) { /* fill in tmp buffer */
|
927
|
+
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
928
|
+
state->memsize += (U32)len;
|
929
|
+
return XXH_OK;
|
930
|
+
}
|
786
931
|
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
932
|
+
if (state->memsize) { /* tmp buffer is full */
|
933
|
+
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
|
934
|
+
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
|
935
|
+
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
|
936
|
+
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
|
937
|
+
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
|
938
|
+
p += 32-state->memsize;
|
939
|
+
state->memsize = 0;
|
940
|
+
}
|
793
941
|
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
942
|
+
if (p+32 <= bEnd) {
|
943
|
+
const BYTE* const limit = bEnd - 32;
|
944
|
+
U64 v1 = state->v1;
|
945
|
+
U64 v2 = state->v2;
|
946
|
+
U64 v3 = state->v3;
|
947
|
+
U64 v4 = state->v4;
|
948
|
+
|
949
|
+
do {
|
950
|
+
v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
|
951
|
+
v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
|
952
|
+
v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
|
953
|
+
v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
|
954
|
+
} while (p<=limit);
|
955
|
+
|
956
|
+
state->v1 = v1;
|
957
|
+
state->v2 = v2;
|
958
|
+
state->v3 = v3;
|
959
|
+
state->v4 = v4;
|
960
|
+
}
|
799
961
|
|
800
|
-
|
801
|
-
|
802
|
-
|
962
|
+
if (p < bEnd) {
|
963
|
+
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
|
964
|
+
state->memsize = (unsigned)(bEnd-p);
|
965
|
+
}
|
803
966
|
}
|
804
967
|
|
805
968
|
return XXH_OK;
|
@@ -815,10 +978,8 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void*
|
|
815
978
|
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
|
816
979
|
}
|
817
980
|
|
818
|
-
|
981
|
+
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
819
982
|
{
|
820
|
-
const BYTE * p = (const BYTE*)state->mem64;
|
821
|
-
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
|
822
983
|
U64 h64;
|
823
984
|
|
824
985
|
if (state->total_len >= 32) {
|
@@ -833,37 +994,12 @@ XXH_FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endian
|
|
833
994
|
h64 = XXH64_mergeRound(h64, v3);
|
834
995
|
h64 = XXH64_mergeRound(h64, v4);
|
835
996
|
} else {
|
836
|
-
h64 = state->v3 + PRIME64_5;
|
997
|
+
h64 = state->v3 /*seed*/ + PRIME64_5;
|
837
998
|
}
|
838
999
|
|
839
1000
|
h64 += (U64) state->total_len;
|
840
1001
|
|
841
|
-
|
842
|
-
U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
|
843
|
-
h64 ^= k1;
|
844
|
-
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
845
|
-
p+=8;
|
846
|
-
}
|
847
|
-
|
848
|
-
if (p+4<=bEnd) {
|
849
|
-
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
|
850
|
-
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
851
|
-
p+=4;
|
852
|
-
}
|
853
|
-
|
854
|
-
while (p<bEnd) {
|
855
|
-
h64 ^= (*p) * PRIME64_5;
|
856
|
-
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
857
|
-
p++;
|
858
|
-
}
|
859
|
-
|
860
|
-
h64 ^= h64 >> 33;
|
861
|
-
h64 *= PRIME64_2;
|
862
|
-
h64 ^= h64 >> 29;
|
863
|
-
h64 *= PRIME64_3;
|
864
|
-
h64 ^= h64 >> 32;
|
865
|
-
|
866
|
-
return h64;
|
1002
|
+
return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned);
|
867
1003
|
}
|
868
1004
|
|
869
1005
|
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
|