lz4-ruby 0.2.0 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,15 +1,15 @@
1
1
  ---
2
2
  !binary "U0hBMQ==":
3
3
  metadata.gz: !binary |-
4
- NzllZTQyNmZkZjdiNzQ4OTY4ZmU0ZDA4YTlkNzY1MjlmOTE1NjBhYQ==
4
+ ZDM4YzE2NGYyYzdiNDU4YjA3ODgwNjQxNzNmOTA3ZTA2YjYzZDM5OQ==
5
5
  data.tar.gz: !binary |-
6
- MTczZDc4OGM2NjhkNWY5MTRiZjZjOTBkOWQ0ZTIwOWNlNWZkZjkwNw==
6
+ NWE2YjViZTBlMmJhYTA0MzdhMmNkZDlkZTU3YTM2NDMyMzc4M2Q1Mw==
7
7
  SHA512:
8
8
  metadata.gz: !binary |-
9
- ZWJmN2Y3NmNmMmI4ODAzY2YzYzEyZWQ1NTFlNDc4YTkzNWU1NGY5ZmFjMmIy
10
- YThkM2M2NThmNjZiZGYyMTFhOGVhY2FkNzUzNjE1MjRmNjI1ODFiZjE2MDk2
11
- ZmE1NTllNGFkMmMyMzdhOTIyNDNkYTAyNDkyMTZlOWU5YzZkNDI=
9
+ ODQ4NDY1M2QyOTMyNmY3ODc3ODRkMjY4ZTZiZTVkZjRjMjliY2I0NjkzZGE4
10
+ ZWFlMzFhNDRlNjdhMTA0M2U4MTYwMTVjMzkwNDZjN2Y4ZThmMDQwZDY5ZWE4
11
+ Y2E2NTYyNjVkZTQxZGRkMWY0OTlhNDQ2ZWRkODUxMzM2MzZiNWQ=
12
12
  data.tar.gz: !binary |-
13
- Y2IyNWFiMjBiMDdjNGE3NWY4YTcyMGI4ZmU1YWM0MTM4MmUxZTY5MThkZTVj
14
- OTg4MjcyZGIyZTA0MmY3YWJjMjI3Mjc4MjE3YTA0Y2M3ZDZlZmZiOTg1M2Fj
15
- Mzg3MDk1NWI0YjY2MmY4NDhjOTE1NTgwOTdmMmY1YWYwZDAyNDE=
13
+ NWFlZTk2YTJkYWVmOGUzMzg4NTk3M2Q3ZTIwZDc0OTk0ZGQ4M2Q0OWQ3Mjc3
14
+ MWJkNTAyN2Q1YjRhMzcxYmJjMDZiZWUwMjNjMTRiYzMxNDM3MWI1MzJmNzRh
15
+ MGEwZmI2NDZmMGMwZjQ4YzQ5YjM0YTI0NDIyODFhNTc2ZTJkMTM=
@@ -0,0 +1,9 @@
1
+ = ChangeLog
2
+
3
+ == 0.3.0
4
+
5
+ * Support raw data stream handling methods (but not worked in JRuby).
6
+ * +LZ4.Raw.compress()+
7
+ * +LZ4.Raw.compressHC()+
8
+ * +LZ4.Raw.decompress()+
9
+ * Rename +LZ4.uncompress()+ to +LZ4.decompress()+ .
@@ -33,7 +33,8 @@ Tested on VirtualBox VM : 2-core / 4GB RAM (Host : Core i5-2520M / 8GB RAM).
33
33
 
34
34
  == TODO
35
35
 
36
- * replace calloc() with xalloc() in lz4hc.c
36
+ * Support raw data handling methods for JRuby.
37
+ * Support streaming methods.
37
38
  * Write API documents.
38
39
 
39
40
  == Copyright
@@ -43,7 +44,8 @@ See LICENSE.txt for further details.
43
44
 
44
45
  == Contributors
45
46
 
46
- * Charles Oliver Nutter <headius at headius.com>
47
+ * {Charles Oliver Nutter}[https://github.com/headius] <headius at headius.com>
48
+ * dearblue[https://github.com/dearblue]
47
49
 
48
50
  == About LZ4
49
51
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 0.2.0
1
+ 0.3.0
@@ -1,861 +1,877 @@
1
- /*
2
- LZ4 - Fast LZ compression algorithm
3
- Copyright (C) 2011-2012, Yann Collet.
4
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5
-
6
- Redistribution and use in source and binary forms, with or without
7
- modification, are permitted provided that the following conditions are
8
- met:
9
-
10
- * Redistributions of source code must retain the above copyright
11
- notice, this list of conditions and the following disclaimer.
12
- * Redistributions in binary form must reproduce the above
13
- copyright notice, this list of conditions and the following disclaimer
14
- in the documentation and/or other materials provided with the
15
- distribution.
16
-
17
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
-
29
- You can contact the author at :
30
- - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31
- - LZ4 source repository : http://code.google.com/p/lz4/
32
- */
33
-
34
- //**************************************
35
- // Tuning parameters
36
- //**************************************
37
- // MEMORY_USAGE :
38
- // Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
39
- // Increasing memory usage improves compression ratio
40
- // Reduced memory usage can improve speed, due to cache effect
41
- // Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
42
- #define MEMORY_USAGE 14
43
-
44
- // NOTCOMPRESSIBLE_DETECTIONLEVEL :
45
- // Decreasing this value will make the algorithm skip faster data segments considered "incompressible"
46
- // This may decrease compression ratio dramatically, but will be faster on incompressible data
47
- // Increasing this value will make the algorithm search more before declaring a segment "incompressible"
48
- // This could improve compression a bit, but will be slower on incompressible data
49
- // The default value (6) is recommended
50
- #define NOTCOMPRESSIBLE_DETECTIONLEVEL 6
51
-
52
- // BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
53
- // This will provide a small boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
54
- // You can set this option to 1 in situations where data will remain within closed environment
55
- // This option is useless on Little_Endian CPU (such as x86)
56
- //#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
57
-
58
-
59
-
60
- //**************************************
61
- // CPU Feature Detection
62
- //**************************************
63
- // 32 or 64 bits ?
64
- #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode
65
- # define LZ4_ARCH64 1
66
- #else
67
- # define LZ4_ARCH64 0
68
- #endif
69
-
70
- // Little Endian or Big Endian ?
71
- // Note : overwrite the below #define if you know your architecture endianess
72
- #if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )
73
- # define LZ4_BIG_ENDIAN 1
74
- #else
75
- // Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
76
- #endif
77
-
78
- // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
79
- // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
80
- // If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance
81
- #if defined(__ARM_FEATURE_UNALIGNED)
82
- # define LZ4_FORCE_UNALIGNED_ACCESS 1
83
- #endif
84
-
85
- // Define this parameter if your target system or compiler does not support hardware bit count
86
- #if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
87
- # define LZ4_FORCE_SW_BITCOUNT
88
- #endif
89
-
90
-
91
- //**************************************
92
- // Compiler Options
93
- //**************************************
94
- #if __STDC_VERSION__ >= 199901L // C99
95
- /* "restrict" is a known keyword */
96
- #else
97
- # define restrict // Disable restrict
98
- #endif
99
-
100
- #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
101
-
102
- #ifdef _MSC_VER // Visual Studio
103
- # define inline __forceinline // Visual is not C99, but supports some kind of inline
104
- # include <intrin.h> // For Visual 2005
105
- # if LZ4_ARCH64 // 64-bit
106
- # pragma intrinsic(_BitScanForward64) // For Visual 2005
107
- # pragma intrinsic(_BitScanReverse64) // For Visual 2005
108
- # else
109
- # pragma intrinsic(_BitScanForward) // For Visual 2005
110
- # pragma intrinsic(_BitScanReverse) // For Visual 2005
111
- # endif
112
- #endif
113
-
114
- #ifdef _MSC_VER
115
- # define lz4_bswap16(x) _byteswap_ushort(x)
116
- #else
117
- # define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
118
- #endif
119
-
120
- #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
121
- # define expect(expr,value) (__builtin_expect ((expr),(value)) )
122
- #else
123
- # define expect(expr,value) (expr)
124
- #endif
125
-
126
- #define likely(expr) expect((expr) != 0, 1)
127
- #define unlikely(expr) expect((expr) != 0, 0)
128
-
129
-
130
- //**************************************
131
- // Includes
132
- //**************************************
133
- #include <stdlib.h> // for malloc
134
- #include <string.h> // for memset
135
- #include "lz4.h"
136
-
137
-
138
- //**************************************
139
- // Basic Types
140
- //**************************************
141
- #if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively
142
- # define BYTE unsigned __int8
143
- # define U16 unsigned __int16
144
- # define U32 unsigned __int32
145
- # define S32 __int32
146
- # define U64 unsigned __int64
147
- #else
148
- # include <stdint.h>
149
- # define BYTE uint8_t
150
- # define U16 uint16_t
151
- # define U32 uint32_t
152
- # define S32 int32_t
153
- # define U64 uint64_t
154
- #endif
155
-
156
- #ifndef LZ4_FORCE_UNALIGNED_ACCESS
157
- # pragma pack(push, 1)
158
- #endif
159
-
160
- typedef struct _U16_S { U16 v; } U16_S;
161
- typedef struct _U32_S { U32 v; } U32_S;
162
- typedef struct _U64_S { U64 v; } U64_S;
163
-
164
- #ifndef LZ4_FORCE_UNALIGNED_ACCESS
165
- # pragma pack(pop)
166
- #endif
167
-
168
- #define A64(x) (((U64_S *)(x))->v)
169
- #define A32(x) (((U32_S *)(x))->v)
170
- #define A16(x) (((U16_S *)(x))->v)
171
-
172
-
173
- //**************************************
174
- // Constants
175
- //**************************************
176
- #define MINMATCH 4
177
-
178
- #define HASH_LOG (MEMORY_USAGE-2)
179
- #define HASHTABLESIZE (1 << HASH_LOG)
180
- #define HASH_MASK (HASHTABLESIZE - 1)
181
-
182
- #define SKIPSTRENGTH (NOTCOMPRESSIBLE_DETECTIONLEVEL>2?NOTCOMPRESSIBLE_DETECTIONLEVEL:2)
183
- #define STACKLIMIT 13
184
- #define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
185
- #define COPYLENGTH 8
186
- #define LASTLITERALS 5
187
- #define MFLIMIT (COPYLENGTH+MINMATCH)
188
- #define MINLENGTH (MFLIMIT+1)
189
-
190
- #define MAXD_LOG 16
191
- #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
192
-
193
- #define ML_BITS 4
194
- #define ML_MASK ((1U<<ML_BITS)-1)
195
- #define RUN_BITS (8-ML_BITS)
196
- #define RUN_MASK ((1U<<RUN_BITS)-1)
197
-
198
-
199
- //**************************************
200
- // Architecture-specific macros
201
- //**************************************
202
- #if LZ4_ARCH64 // 64-bit
203
- # define STEPSIZE 8
204
- # define UARCH U64
205
- # define AARCH A64
206
- # define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
207
- # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
208
- # define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
209
- # define HTYPE U32
210
- # define INITBASE(base) const BYTE* const base = ip
211
- #else // 32-bit
212
- # define STEPSIZE 4
213
- # define UARCH U32
214
- # define AARCH A32
215
- # define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
216
- # define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
217
- # define LZ4_SECURECOPY LZ4_WILDCOPY
218
- # define HTYPE const BYTE*
219
- # define INITBASE(base) const int base = 0
220
- #endif
221
-
222
- #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
223
- # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
224
- # define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
225
- #else // Little Endian
226
- # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
227
- # define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
228
- #endif
229
-
230
-
231
- //**************************************
232
- // Local structures
233
- //**************************************
234
- struct refTables
235
- {
236
- HTYPE hashTable[HASHTABLESIZE];
237
- };
238
-
239
-
240
- //**************************************
241
- // Macros
242
- //**************************************
243
- #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
244
- #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
245
- #define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
246
- #define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+l; LZ4_WILDCOPY(s,d,e); d=e; }
247
-
248
-
249
- //****************************
250
- // Private functions
251
- //****************************
252
- #if LZ4_ARCH64
253
-
254
- static inline int LZ4_NbCommonBytes (register U64 val)
255
- {
256
- #if defined(LZ4_BIG_ENDIAN)
257
- #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
258
- unsigned long r = 0;
259
- _BitScanReverse64( &r, val );
260
- return (int)(r>>3);
261
- #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
262
- return (__builtin_clzll(val) >> 3);
263
- #else
264
- int r;
265
- if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
266
- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
267
- r += (!val);
268
- return r;
269
- #endif
270
- #else
271
- #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
272
- unsigned long r = 0;
273
- _BitScanForward64( &r, val );
274
- return (int)(r>>3);
275
- #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
276
- return (__builtin_ctzll(val) >> 3);
277
- #else
278
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
279
- return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
280
- #endif
281
- #endif
282
- }
283
-
284
- #else
285
-
286
- static inline int LZ4_NbCommonBytes (register U32 val)
287
- {
288
- #if defined(LZ4_BIG_ENDIAN)
289
- #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
290
- unsigned long r = 0;
291
- _BitScanReverse( &r, val );
292
- return (int)(r>>3);
293
- #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
294
- return (__builtin_clz(val) >> 3);
295
- #else
296
- int r;
297
- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
298
- r += (!val);
299
- return r;
300
- #endif
301
- #else
302
- #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
303
- unsigned long r = 0;
304
- _BitScanForward( &r, val );
305
- return (int)(r>>3);
306
- #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
307
- return (__builtin_ctz(val) >> 3);
308
- #else
309
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
310
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
311
- #endif
312
- #endif
313
- }
314
-
315
- #endif
316
-
317
-
318
-
319
- //******************************
320
- // Compression functions
321
- //******************************
322
-
323
- // LZ4_compressCtx :
324
- // -----------------
325
- // Compress 'isize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
326
- // If it cannot achieve it, compression will stop, and result of the function will be zero.
327
- // return : the number of bytes written in buffer 'dest', or 0 if the compression fails
328
-
329
- static inline int LZ4_compressCtx(void** ctx,
330
- const char* source,
331
- char* dest,
332
- int isize,
333
- int maxOutputSize)
334
- {
335
- #if HEAPMODE
336
- struct refTables *srt = (struct refTables *) (*ctx);
337
- HTYPE* HashTable;
338
- #else
339
- HTYPE HashTable[HASHTABLESIZE] = {0};
340
- #endif
341
-
342
- const BYTE* ip = (BYTE*) source;
343
- INITBASE(base);
344
- const BYTE* anchor = ip;
345
- const BYTE* const iend = ip + isize;
346
- const BYTE* const mflimit = iend - MFLIMIT;
347
- #define matchlimit (iend - LASTLITERALS)
348
-
349
- BYTE* op = (BYTE*) dest;
350
- BYTE* const oend = op + maxOutputSize;
351
-
352
- int len, length;
353
- const int skipStrength = SKIPSTRENGTH;
354
- U32 forwardH;
355
-
356
-
357
- // Init
358
- if (isize<MINLENGTH) goto _last_literals;
359
- #if HEAPMODE
360
- if (*ctx == NULL)
361
- {
362
- srt = (struct refTables *) malloc ( sizeof(struct refTables) );
363
- *ctx = (void*) srt;
364
- }
365
- HashTable = (HTYPE*)(srt->hashTable);
366
- memset((void*)HashTable, 0, sizeof(srt->hashTable));
367
- #else
368
- (void) ctx;
369
- #endif
370
-
371
-
372
- // First Byte
373
- HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
374
- ip++; forwardH = LZ4_HASH_VALUE(ip);
375
-
376
- // Main Loop
377
- for ( ; ; )
378
- {
379
- int findMatchAttempts = (1U << skipStrength) + 3;
380
- const BYTE* forwardIp = ip;
381
- const BYTE* ref;
382
- BYTE* token;
383
-
384
- // Find a match
385
- do {
386
- U32 h = forwardH;
387
- int step = findMatchAttempts++ >> skipStrength;
388
- ip = forwardIp;
389
- forwardIp = ip + step;
390
-
391
- if unlikely(forwardIp > mflimit) { goto _last_literals; }
392
-
393
- forwardH = LZ4_HASH_VALUE(forwardIp);
394
- ref = base + HashTable[h];
395
- HashTable[h] = ip - base;
396
-
397
- } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
398
-
399
- // Catch up
400
- while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
401
-
402
- // Encode Literal length
403
- length = (int)(ip - anchor);
404
- token = op++;
405
- if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) >= oend) return 0; // Check output limit
406
- #ifdef _MSC_VER
407
- if (length>=(int)RUN_MASK)
408
- {
409
- int len = length-RUN_MASK;
410
- *token=(RUN_MASK<<ML_BITS);
411
- if (len>254)
412
- {
413
- do { *op++ = 255; len -= 255; } while (len>254);
414
- *op++ = (BYTE)len;
415
- memcpy(op, anchor, length);
416
- op += length;
417
- goto _next_match;
418
- }
419
- else
420
- *op++ = (BYTE)len;
421
- }
422
- else *token = (length<<ML_BITS);
423
- #else
424
- if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
425
- else *token = (length<<ML_BITS);
426
- #endif
427
-
428
- // Copy Literals
429
- LZ4_BLINDCOPY(anchor, op, length);
430
-
431
- _next_match:
432
- // Encode Offset
433
- LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
434
-
435
- // Start Counting
436
- ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
437
- anchor = ip;
438
- while likely(ip<matchlimit-(STEPSIZE-1))
439
- {
440
- UARCH diff = AARCH(ref) ^ AARCH(ip);
441
- if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
442
- ip += LZ4_NbCommonBytes(diff);
443
- goto _endCount;
444
- }
445
- if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
446
- if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
447
- if ((ip<matchlimit) && (*ref == *ip)) ip++;
448
- _endCount:
449
-
450
- // Encode MatchLength
451
- len = (int)(ip - anchor);
452
- if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
453
- else *token += len;
454
-
455
- // Test end of chunk
456
- if (ip > mflimit) { anchor = ip; break; }
457
-
458
- // Fill table
459
- HashTable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
460
-
461
- // Test next position
462
- ref = base + HashTable[LZ4_HASH_VALUE(ip)];
463
- HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
464
- if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
465
-
466
- // Prepare next loop
467
- anchor = ip++;
468
- forwardH = LZ4_HASH_VALUE(ip);
469
- }
470
-
471
- _last_literals:
472
- // Encode Last Literals
473
- {
474
- int lastRun = (int)(iend - anchor);
475
- if (((char*)op - dest) + lastRun + 1 + ((lastRun-15)/255) >= maxOutputSize) return 0;
476
- if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
477
- else *op++ = (lastRun<<ML_BITS);
478
- memcpy(op, anchor, iend - anchor);
479
- op += iend-anchor;
480
- }
481
-
482
- // End
483
- return (int) (((char*)op)-dest);
484
- }
485
-
486
-
487
-
488
- // Note : this function is valid only if isize < LZ4_64KLIMIT
489
- #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
490
- #define HASHLOG64K (HASH_LOG+1)
491
- #define HASH64KTABLESIZE (1U<<HASHLOG64K)
492
- #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
493
- #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
494
- static inline int LZ4_compress64kCtx(void** ctx,
495
- const char* source,
496
- char* dest,
497
- int isize,
498
- int maxOutputSize)
499
- {
500
- #if HEAPMODE
501
- struct refTables *srt = (struct refTables *) (*ctx);
502
- U16* HashTable;
503
- #else
504
- U16 HashTable[HASH64KTABLESIZE] = {0};
505
- #endif
506
-
507
- const BYTE* ip = (BYTE*) source;
508
- const BYTE* anchor = ip;
509
- const BYTE* const base = ip;
510
- const BYTE* const iend = ip + isize;
511
- const BYTE* const mflimit = iend - MFLIMIT;
512
- #define matchlimit (iend - LASTLITERALS)
513
-
514
- BYTE* op = (BYTE*) dest;
515
- BYTE* const oend = op + maxOutputSize;
516
-
517
- int len, length;
518
- const int skipStrength = SKIPSTRENGTH;
519
- U32 forwardH;
520
-
521
-
522
- // Init
523
- if (isize<MINLENGTH) goto _last_literals;
524
- #if HEAPMODE
525
- if (*ctx == NULL)
526
- {
527
- srt = (struct refTables *) malloc ( sizeof(struct refTables) );
528
- *ctx = (void*) srt;
529
- }
530
- HashTable = (U16*)(srt->hashTable);
531
- memset((void*)HashTable, 0, sizeof(srt->hashTable));
532
- #else
533
- (void) ctx;
534
- #endif
535
-
536
-
537
- // First Byte
538
- ip++; forwardH = LZ4_HASH64K_VALUE(ip);
539
-
540
- // Main Loop
541
- for ( ; ; )
542
- {
543
- int findMatchAttempts = (1U << skipStrength) + 3;
544
- const BYTE* forwardIp = ip;
545
- const BYTE* ref;
546
- BYTE* token;
547
-
548
- // Find a match
549
- do {
550
- U32 h = forwardH;
551
- int step = findMatchAttempts++ >> skipStrength;
552
- ip = forwardIp;
553
- forwardIp = ip + step;
554
-
555
- if (forwardIp > mflimit) { goto _last_literals; }
556
-
557
- forwardH = LZ4_HASH64K_VALUE(forwardIp);
558
- ref = base + HashTable[h];
559
- HashTable[h] = (U16)(ip - base);
560
-
561
- } while (A32(ref) != A32(ip));
562
-
563
- // Catch up
564
- while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }
565
-
566
- // Encode Literal length
567
- length = (int)(ip - anchor);
568
- token = op++;
569
- if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) >= oend) return 0; // Check output limit
570
- #ifdef _MSC_VER
571
- if (length>=(int)RUN_MASK)
572
- {
573
- int len = length-RUN_MASK;
574
- *token=(RUN_MASK<<ML_BITS);
575
- if (len>254)
576
- {
577
- do { *op++ = 255; len -= 255; } while (len>254);
578
- *op++ = (BYTE)len;
579
- memcpy(op, anchor, length);
580
- op += length;
581
- goto _next_match;
582
- }
583
- else
584
- *op++ = (BYTE)len;
585
- }
586
- else *token = (length<<ML_BITS);
587
- #else
588
- if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
589
- else *token = (length<<ML_BITS);
590
- #endif
591
-
592
- // Copy Literals
593
- LZ4_BLINDCOPY(anchor, op, length);
594
-
595
- _next_match:
596
- // Encode Offset
597
- LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
598
-
599
- // Start Counting
600
- ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
601
- anchor = ip;
602
- while (ip<matchlimit-(STEPSIZE-1))
603
- {
604
- UARCH diff = AARCH(ref) ^ AARCH(ip);
605
- if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
606
- ip += LZ4_NbCommonBytes(diff);
607
- goto _endCount;
608
- }
609
- if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
610
- if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
611
- if ((ip<matchlimit) && (*ref == *ip)) ip++;
612
- _endCount:
613
-
614
- // Encode MatchLength
615
- len = (int)(ip - anchor);
616
- if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
617
- else *token += len;
618
-
619
- // Test end of chunk
620
- if (ip > mflimit) { anchor = ip; break; }
621
-
622
- // Fill table
623
- HashTable[LZ4_HASH64K_VALUE(ip-2)] = (U16)(ip - 2 - base);
624
-
625
- // Test next position
626
- ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
627
- HashTable[LZ4_HASH64K_VALUE(ip)] = (U16)(ip - base);
628
- if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }
629
-
630
- // Prepare next loop
631
- anchor = ip++;
632
- forwardH = LZ4_HASH64K_VALUE(ip);
633
- }
634
-
635
- _last_literals:
636
- // Encode Last Literals
637
- {
638
- int lastRun = (int)(iend - anchor);
639
- if (((char*)op - dest) + lastRun + 1 + ((lastRun)>>8) >= maxOutputSize) return 0;
640
- if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
641
- else *op++ = (lastRun<<ML_BITS);
642
- memcpy(op, anchor, iend - anchor);
643
- op += iend-anchor;
644
- }
645
-
646
- // End
647
- return (int) (((char*)op)-dest);
648
- }
649
-
650
-
651
- int LZ4_compress_limitedOutput(const char* source,
652
- char* dest,
653
- int isize,
654
- int maxOutputSize)
655
- {
656
- #if HEAPMODE
657
- void* ctx = malloc(sizeof(struct refTables));
658
- int result;
659
- if (isize < LZ4_64KLIMIT)
660
- result = LZ4_compress64kCtx(&ctx, source, dest, isize, maxOutputSize);
661
- else result = LZ4_compressCtx(&ctx, source, dest, isize, maxOutputSize);
662
- free(ctx);
663
- return result;
664
- #else
665
- if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize, maxOutputSize);
666
- return LZ4_compressCtx(NULL, source, dest, isize, maxOutputSize);
667
- #endif
668
- }
669
-
670
-
671
- int LZ4_compress(const char* source,
672
- char* dest,
673
- int isize)
674
- {
675
- return LZ4_compress_limitedOutput(source, dest, isize, LZ4_compressBound(isize));
676
- }
677
-
678
-
679
-
680
-
681
- //****************************
682
- // Decompression functions
683
- //****************************
684
-
685
- // Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize()
686
- // are safe against "buffer overflow" attack type.
687
- // They will never write nor read outside of the provided output buffers.
688
- // LZ4_uncompress_unknownOutputSize() also insures that it will never read outside of the input buffer.
689
- // A corrupted input will produce an error result, a negative int, indicating the position of the error within input stream.
690
-
691
- int LZ4_uncompress(const char* source,
692
- char* dest,
693
- int osize)
694
- {
695
- // Local Variables
696
- const BYTE* restrict ip = (const BYTE*) source;
697
- const BYTE* restrict ref;
698
-
699
- BYTE* restrict op = (BYTE*) dest;
700
- BYTE* const oend = op + osize;
701
- BYTE* cpy;
702
-
703
- BYTE token;
704
-
705
- int len, length;
706
- size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};
707
-
708
-
709
- // Main Loop
710
- while (1)
711
- {
712
- // get runlength
713
- token = *ip++;
714
- if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
715
-
716
- // copy literals
717
- cpy = op+length;
718
- if unlikely(cpy>oend-COPYLENGTH)
719
- {
720
- if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
721
- memcpy(op, ip, length);
722
- ip += length;
723
- break; // Necessarily EOF
724
- }
725
- LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
726
-
727
- // get offset
728
- LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
729
- if (ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer
730
-
731
- // get matchlength
732
- if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }
733
-
734
- // copy repeated sequence
735
- if unlikely(op-ref<STEPSIZE)
736
- {
737
- #if LZ4_ARCH64
738
- size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};
739
- size_t dec2 = dec2table[op-ref];
740
- #else
741
- const int dec2 = 0;
742
- #endif
743
- *op++ = *ref++;
744
- *op++ = *ref++;
745
- *op++ = *ref++;
746
- *op++ = *ref++;
747
- ref -= dec[op-ref];
748
- A32(op)=A32(ref); op += STEPSIZE-4;
749
- ref -= dec2;
750
- } else { LZ4_COPYSTEP(ref,op); }
751
- cpy = op + length - (STEPSIZE-4);
752
- if (cpy>oend-COPYLENGTH)
753
- {
754
- if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
755
- LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
756
- while(op<cpy) *op++=*ref++;
757
- op=cpy;
758
- if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
759
- continue;
760
- }
761
- LZ4_SECURECOPY(ref, op, cpy);
762
- op=cpy; // correction
763
- }
764
-
765
- // end of decoding
766
- return (int) (((char*)ip)-source);
767
-
768
- // write overflow error detected
769
- _output_error:
770
- return (int) (-(((char*)ip)-source));
771
- }
772
-
773
-
774
- int LZ4_uncompress_unknownOutputSize(
775
- const char* source,
776
- char* dest,
777
- int isize,
778
- int maxOutputSize)
779
- {
780
- // Local Variables
781
- const BYTE* restrict ip = (const BYTE*) source;
782
- const BYTE* const iend = ip + isize;
783
- const BYTE* restrict ref;
784
-
785
- BYTE* restrict op = (BYTE*) dest;
786
- BYTE* const oend = op + maxOutputSize;
787
- BYTE* cpy;
788
-
789
- size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};
790
-
791
-
792
- // Main Loop
793
- while (ip<iend)
794
- {
795
- BYTE token;
796
- int length;
797
-
798
- // get runlength
799
- token = *ip++;
800
- if ((length=(token>>ML_BITS)) == RUN_MASK) { int s=255; while ((ip<iend) && (s==255)) { s=*ip++; length += s; } }
801
-
802
- // copy literals
803
- cpy = op+length;
804
- if ((cpy>oend-COPYLENGTH) || (ip+length>iend-COPYLENGTH))
805
- {
806
- if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
807
- if (ip+length > iend) goto _output_error; // Error : request to read beyond source buffer
808
- memcpy(op, ip, length);
809
- op += length;
810
- ip += length;
811
- if (ip<iend) goto _output_error; // Error : LZ4 format violation
812
- break; // Necessarily EOF, due to parsing restrictions
813
- }
814
- LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
815
-
816
- // get offset
817
- LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
818
- if (ref < (BYTE* const)dest) goto _output_error; // Error : offset creates reference outside of destination buffer
819
-
820
- // get matchlength
821
- if ((length=(token&ML_MASK)) == ML_MASK) { while (ip<iend) { int s = *ip++; length +=s; if (s==255) continue; break; } }
822
-
823
- // copy repeated sequence
824
- if unlikely(op-ref<STEPSIZE)
825
- {
826
- #if LZ4_ARCH64
827
- size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};
828
- size_t dec2 = dec2table[op-ref];
829
- #else
830
- const int dec2 = 0;
831
- #endif
832
- *op++ = *ref++;
833
- *op++ = *ref++;
834
- *op++ = *ref++;
835
- *op++ = *ref++;
836
- ref -= dec[op-ref];
837
- A32(op)=A32(ref); op += STEPSIZE-4;
838
- ref -= dec2;
839
- } else { LZ4_COPYSTEP(ref,op); }
840
- cpy = op + length - (STEPSIZE-4);
841
- if (cpy>oend-COPYLENGTH)
842
- {
843
- if (cpy > oend) goto _output_error; // Error : request to write outside of destination buffer
844
- LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
845
- while(op<cpy) *op++=*ref++;
846
- op=cpy;
847
- if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
848
- continue;
849
- }
850
- LZ4_SECURECOPY(ref, op, cpy);
851
- op=cpy; // correction
852
- }
853
-
854
- // end of decoding
855
- return (int) (((char*)op)-dest);
856
-
857
- // write overflow error detected
858
- _output_error:
859
- return (int) (-(((char*)ip)-source));
860
- }
861
-
1
+ /*
2
+ LZ4 - Fast LZ compression algorithm
3
+ Copyright (C) 2011-2014, Yann Collet.
4
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are
8
+ met:
9
+
10
+ * Redistributions of source code must retain the above copyright
11
+ notice, this list of conditions and the following disclaimer.
12
+ * Redistributions in binary form must reproduce the above
13
+ copyright notice, this list of conditions and the following disclaimer
14
+ in the documentation and/or other materials provided with the
15
+ distribution.
16
+
17
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+ You can contact the author at :
30
+ - LZ4 source repository : http://code.google.com/p/lz4/
31
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
32
+ */
33
+
34
+ /**************************************
35
+ Tuning parameters
36
+ **************************************/
37
+ /*
38
+ * MEMORY_USAGE :
39
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
40
+ * Increasing memory usage improves compression ratio
41
+ * Reduced memory usage can improve speed, due to cache effect
42
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
43
+ */
44
+ #define MEMORY_USAGE 14
45
+
46
+ /*
47
+ * HEAPMODE :
48
+ * Select how default compression functions will allocate memory for their hash table,
49
+ * in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
50
+ */
51
+ #define HEAPMODE 0
52
+
53
+
54
+ /**************************************
55
+ CPU Feature Detection
56
+ **************************************/
57
+ /* 32 or 64 bits ? */
58
+ #if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
59
+ || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
60
+ || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
61
+ || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) /* Detects 64 bits mode */
62
+ # define LZ4_ARCH64 1
63
+ #else
64
+ # define LZ4_ARCH64 0
65
+ #endif
66
+
67
+ /*
68
+ * Little Endian or Big Endian ?
69
+ * Overwrite the #define below if you know your architecture endianess
70
+ */
71
+ #if defined (__GLIBC__)
72
+ # include <endian.h>
73
+ # if (__BYTE_ORDER == __BIG_ENDIAN)
74
+ # define LZ4_BIG_ENDIAN 1
75
+ # endif
76
+ #elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
77
+ # define LZ4_BIG_ENDIAN 1
78
+ #elif defined(__sparc) || defined(__sparc__) \
79
+ || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
80
+ || defined(__hpux) || defined(__hppa) \
81
+ || defined(_MIPSEB) || defined(__s390__)
82
+ # define LZ4_BIG_ENDIAN 1
83
+ #else
84
+ /* Little Endian assumed. PDP Endian and other very rare endian format are unsupported. */
85
+ #endif
86
+
87
+ /*
88
+ * Unaligned memory access is automatically enabled for "common" CPU, such as x86.
89
+ * For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
90
+ * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
91
+ */
92
+ #if defined(__ARM_FEATURE_UNALIGNED)
93
+ # define LZ4_FORCE_UNALIGNED_ACCESS 1
94
+ #endif
95
+
96
+ /* Define this parameter if your target system or compiler does not support hardware bit count */
97
+ #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
98
+ # define LZ4_FORCE_SW_BITCOUNT
99
+ #endif
100
+
101
+ /*
102
+ * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
103
+ * This option may provide a small boost to performance for some big endian cpu, although probably modest.
104
+ * You may set this option to 1 if data will remain within closed environment.
105
+ * This option is useless on Little_Endian CPU (such as x86)
106
+ */
107
+
108
+ /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
109
+
110
+
111
+ /**************************************
112
+ Compiler Options
113
+ **************************************/
114
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
115
+ /* "restrict" is a known keyword */
116
+ #else
117
+ # define restrict /* Disable restrict */
118
+ #endif
119
+
120
+ #ifdef _MSC_VER /* Visual Studio */
121
+ # define FORCE_INLINE static __forceinline
122
+ # include <intrin.h> /* For Visual 2005 */
123
+ # if LZ4_ARCH64 /* 64-bits */
124
+ # pragma intrinsic(_BitScanForward64) /* For Visual 2005 */
125
+ # pragma intrinsic(_BitScanReverse64) /* For Visual 2005 */
126
+ # else /* 32-bits */
127
+ # pragma intrinsic(_BitScanForward) /* For Visual 2005 */
128
+ # pragma intrinsic(_BitScanReverse) /* For Visual 2005 */
129
+ # endif
130
+ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
131
+ #else
132
+ # ifdef __GNUC__
133
+ # define FORCE_INLINE static inline __attribute__((always_inline))
134
+ # else
135
+ # define FORCE_INLINE static inline
136
+ # endif
137
+ #endif
138
+
139
+ #ifdef _MSC_VER /* Visual Studio */
140
+ # define lz4_bswap16(x) _byteswap_ushort(x)
141
+ #else
142
+ # define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
143
+ #endif
144
+
145
+ #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
146
+
147
+ #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
148
+ # define expect(expr,value) (__builtin_expect ((expr),(value)) )
149
+ #else
150
+ # define expect(expr,value) (expr)
151
+ #endif
152
+
153
+ #define likely(expr) expect((expr) != 0, 1)
154
+ #define unlikely(expr) expect((expr) != 0, 0)
155
+
156
+
157
+ /**************************************
158
+ Memory routines
159
+ **************************************/
160
+ #include <stdlib.h> /* malloc, calloc, free */
161
+ #define ALLOCATOR(n,s) calloc(n,s)
162
+ #define FREEMEM free
163
+ #include <string.h> /* memset, memcpy */
164
+ #define MEM_INIT memset
165
+
166
+
167
+ /**************************************
168
+ Includes
169
+ **************************************/
170
+ #include "lz4.h"
171
+
172
+
173
+ /**************************************
174
+ Basic Types
175
+ **************************************/
176
+ #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
177
+ # include <stdint.h>
178
+ typedef uint8_t BYTE;
179
+ typedef uint16_t U16;
180
+ typedef uint32_t U32;
181
+ typedef int32_t S32;
182
+ typedef uint64_t U64;
183
+ #else
184
+ typedef unsigned char BYTE;
185
+ typedef unsigned short U16;
186
+ typedef unsigned int U32;
187
+ typedef signed int S32;
188
+ typedef unsigned long long U64;
189
+ #endif
190
+
191
+ #if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
192
+ # define _PACKED __attribute__ ((packed))
193
+ #else
194
+ # define _PACKED
195
+ #endif
196
+
197
+ #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
198
+ # if defined(__IBMC__) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
199
+ # pragma pack(1)
200
+ # else
201
+ # pragma pack(push, 1)
202
+ # endif
203
+ #endif
204
+
205
+ typedef struct { U16 v; } _PACKED U16_S;
206
+ typedef struct { U32 v; } _PACKED U32_S;
207
+ typedef struct { U64 v; } _PACKED U64_S;
208
+ typedef struct {size_t v;} _PACKED size_t_S;
209
+
210
+ #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
211
+ # if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
212
+ # pragma pack(0)
213
+ # else
214
+ # pragma pack(pop)
215
+ # endif
216
+ #endif
217
+
218
+ #define A16(x) (((U16_S *)(x))->v)
219
+ #define A32(x) (((U32_S *)(x))->v)
220
+ #define A64(x) (((U64_S *)(x))->v)
221
+ #define AARCH(x) (((size_t_S *)(x))->v)
222
+
223
+
224
+ /**************************************
225
+ Constants
226
+ **************************************/
227
+ #define LZ4_HASHLOG (MEMORY_USAGE-2)
228
+ #define HASHTABLESIZE (1 << MEMORY_USAGE)
229
+ #define HASHNBCELLS4 (1 << LZ4_HASHLOG)
230
+
231
+ #define MINMATCH 4
232
+
233
+ #define COPYLENGTH 8
234
+ #define LASTLITERALS 5
235
+ #define MFLIMIT (COPYLENGTH+MINMATCH)
236
+ static const int LZ4_minLength = (MFLIMIT+1);
237
+
238
+ #define KB *(1U<<10)
239
+ #define MB *(1U<<20)
240
+ #define GB *(1U<<30)
241
+
242
+ #define LZ4_64KLIMIT ((64 KB) + (MFLIMIT-1))
243
+ #define SKIPSTRENGTH 6 /* Increasing this value will make the compression run slower on incompressible data */
244
+
245
+ #define MAXD_LOG 16
246
+ #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
247
+
248
+ #define ML_BITS 4
249
+ #define ML_MASK ((1U<<ML_BITS)-1)
250
+ #define RUN_BITS (8-ML_BITS)
251
+ #define RUN_MASK ((1U<<RUN_BITS)-1)
252
+
253
+
254
+ /**************************************
255
+ Structures and local types
256
+ **************************************/
257
+ typedef struct {
258
+ U32 hashTable[HASHNBCELLS4];
259
+ const BYTE* bufferStart;
260
+ const BYTE* base;
261
+ const BYTE* nextBlock;
262
+ } LZ4_Data_Structure;
263
+
264
+ typedef enum { notLimited = 0, limited = 1 } limitedOutput_directive;
265
+ typedef enum { byPtr, byU32, byU16 } tableType_t;
266
+
267
+ typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
268
+
269
+ typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
270
+ typedef enum { full = 0, partial = 1 } earlyEnd_directive;
271
+
272
+
273
+ /**************************************
274
+ Architecture-specific macros
275
+ **************************************/
276
+ #define STEPSIZE sizeof(size_t)
277
+ #define LZ4_COPYSTEP(d,s) { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
278
+ #define LZ4_COPY8(d,s) { LZ4_COPYSTEP(d,s); if (STEPSIZE<8) LZ4_COPYSTEP(d,s); }
279
+
280
+ #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
281
+ # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
282
+ # define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
283
+ #else /* Little Endian */
284
+ # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
285
+ # define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
286
+ #endif
287
+
288
+
289
+ /**************************************
290
+ Macros
291
+ **************************************/
292
+ #if LZ4_ARCH64 || !defined(__GNUC__)
293
+ # define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } /* at the end, d>=e; */
294
+ #else
295
+ # define LZ4_WILDCOPY(d,s,e) { if (likely(e-d <= 8)) LZ4_COPY8(d,s) else do { LZ4_COPY8(d,s) } while (d<e); }
296
+ #endif
297
+ #define LZ4_SECURECOPY(d,s,e) { if (d<e) LZ4_WILDCOPY(d,s,e); }
298
+
299
+
300
+ /****************************
301
+ Private local functions
302
+ ****************************/
303
+ #if LZ4_ARCH64
304
+
305
+ FORCE_INLINE int LZ4_NbCommonBytes (register U64 val)
306
+ {
307
+ # if defined(LZ4_BIG_ENDIAN)
308
+ # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
309
+ unsigned long r = 0;
310
+ _BitScanReverse64( &r, val );
311
+ return (int)(r>>3);
312
+ # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
313
+ return (__builtin_clzll(val) >> 3);
314
+ # else
315
+ int r;
316
+ if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
317
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
318
+ r += (!val);
319
+ return r;
320
+ # endif
321
+ # else
322
+ # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
323
+ unsigned long r = 0;
324
+ _BitScanForward64( &r, val );
325
+ return (int)(r>>3);
326
+ # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
327
+ return (__builtin_ctzll(val) >> 3);
328
+ # else
329
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
330
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
331
+ # endif
332
+ # endif
333
+ }
334
+
335
+ #else
336
+
337
+ FORCE_INLINE int LZ4_NbCommonBytes (register U32 val)
338
+ {
339
+ # if defined(LZ4_BIG_ENDIAN)
340
+ # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
341
+ unsigned long r = 0;
342
+ _BitScanReverse( &r, val );
343
+ return (int)(r>>3);
344
+ # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
345
+ return (__builtin_clz(val) >> 3);
346
+ # else
347
+ int r;
348
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
349
+ r += (!val);
350
+ return r;
351
+ # endif
352
+ # else
353
+ # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
354
+ unsigned long r;
355
+ _BitScanForward( &r, val );
356
+ return (int)(r>>3);
357
+ # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
358
+ return (__builtin_ctz(val) >> 3);
359
+ # else
360
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
361
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
362
+ # endif
363
+ # endif
364
+ }
365
+
366
+ #endif
367
+
368
+
369
+ /****************************
370
+ Compression functions
371
+ ****************************/
372
+ FORCE_INLINE int LZ4_hashSequence(U32 sequence, tableType_t tableType)
373
+ {
374
+ if (tableType == byU16)
375
+ return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
376
+ else
377
+ return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
378
+ }
379
+
380
+ FORCE_INLINE int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
381
+
382
+ FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
383
+ {
384
+ switch (tableType)
385
+ {
386
+ case byPtr: { const BYTE** hashTable = (const BYTE**) tableBase; hashTable[h] = p; break; }
387
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); break; }
388
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); break; }
389
+ }
390
+ }
391
+
392
+ FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
393
+ {
394
+ U32 h = LZ4_hashPosition(p, tableType);
395
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
396
+ }
397
+
398
+ FORCE_INLINE const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
399
+ {
400
+ if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
401
+ if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
402
+ { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
403
+ }
404
+
405
+ FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
406
+ {
407
+ U32 h = LZ4_hashPosition(p, tableType);
408
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
409
+ }
410
+
411
+
412
+ FORCE_INLINE int LZ4_compress_generic(
413
+ void* ctx,
414
+ const char* source,
415
+ char* dest,
416
+ int inputSize,
417
+ int maxOutputSize,
418
+
419
+ limitedOutput_directive limitedOutput,
420
+ tableType_t tableType,
421
+ prefix64k_directive prefix)
422
+ {
423
+ const BYTE* ip = (const BYTE*) source;
424
+ const BYTE* const base = (prefix==withPrefix) ? ((LZ4_Data_Structure*)ctx)->base : (const BYTE*) source;
425
+ const BYTE* const lowLimit = ((prefix==withPrefix) ? ((LZ4_Data_Structure*)ctx)->bufferStart : (const BYTE*)source);
426
+ const BYTE* anchor = (const BYTE*) source;
427
+ const BYTE* const iend = ip + inputSize;
428
+ const BYTE* const mflimit = iend - MFLIMIT;
429
+ const BYTE* const matchlimit = iend - LASTLITERALS;
430
+
431
+ BYTE* op = (BYTE*) dest;
432
+ BYTE* const oend = op + maxOutputSize;
433
+
434
+ int length;
435
+ const int skipStrength = SKIPSTRENGTH;
436
+ U32 forwardH;
437
+
438
+ /* Init conditions */
439
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
440
+ if ((prefix==withPrefix) && (ip != ((LZ4_Data_Structure*)ctx)->nextBlock)) return 0; /* must continue from end of previous block */
441
+ if (prefix==withPrefix) ((LZ4_Data_Structure*)ctx)->nextBlock=iend; /* do it now, due to potential early exit */
442
+ if ((tableType == byU16) && (inputSize>=(int)LZ4_64KLIMIT)) return 0; /* Size too large (not within 64K limit) */
443
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
444
+
445
+ /* First Byte */
446
+ LZ4_putPosition(ip, ctx, tableType, base);
447
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
448
+
449
+ /* Main Loop */
450
+ for ( ; ; )
451
+ {
452
+ int findMatchAttempts = (1U << skipStrength) + 3;
453
+ const BYTE* forwardIp = ip;
454
+ const BYTE* ref;
455
+ BYTE* token;
456
+
457
+ /* Find a match */
458
+ do {
459
+ U32 h = forwardH;
460
+ int step = findMatchAttempts++ >> skipStrength;
461
+ ip = forwardIp;
462
+ forwardIp = ip + step;
463
+
464
+ if (unlikely(forwardIp > mflimit)) { goto _last_literals; }
465
+
466
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
467
+ ref = LZ4_getPositionOnHash(h, ctx, tableType, base);
468
+ LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
469
+
470
+ } while ((ref + MAX_DISTANCE < ip) || (A32(ref) != A32(ip)));
471
+
472
+ /* Catch up */
473
+ while ((ip>anchor) && (ref > lowLimit) && (unlikely(ip[-1]==ref[-1]))) { ip--; ref--; }
474
+
475
+ /* Encode Literal length */
476
+ length = (int)(ip - anchor);
477
+ token = op++;
478
+ if ((limitedOutput) && (unlikely(op + length + (2 + 1 + LASTLITERALS) + (length/255) > oend))) return 0; /* Check output limit */
479
+ if (length>=(int)RUN_MASK)
480
+ {
481
+ int len = length-RUN_MASK;
482
+ *token=(RUN_MASK<<ML_BITS);
483
+ for(; len >= 255 ; len-=255) *op++ = 255;
484
+ *op++ = (BYTE)len;
485
+ }
486
+ else *token = (BYTE)(length<<ML_BITS);
487
+
488
+ /* Copy Literals */
489
+ { BYTE* end=(op)+(length); LZ4_WILDCOPY(op,anchor,end); op=end; }
490
+
491
+ _next_match:
492
+ /* Encode Offset */
493
+ LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
494
+
495
+ /* Start Counting */
496
+ ip+=MINMATCH; ref+=MINMATCH; /* MinMatch already verified */
497
+ anchor = ip;
498
+ while (likely(ip<matchlimit-(STEPSIZE-1)))
499
+ {
500
+ size_t diff = AARCH(ref) ^ AARCH(ip);
501
+ if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
502
+ ip += LZ4_NbCommonBytes(diff);
503
+ goto _endCount;
504
+ }
505
+ if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
506
+ if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
507
+ if ((ip<matchlimit) && (*ref == *ip)) ip++;
508
+ _endCount:
509
+
510
+ /* Encode MatchLength */
511
+ length = (int)(ip - anchor);
512
+ if ((limitedOutput) && (unlikely(op + (1 + LASTLITERALS) + (length>>8) > oend))) return 0; /* Check output limit */
513
+ if (length>=(int)ML_MASK)
514
+ {
515
+ *token += ML_MASK;
516
+ length -= ML_MASK;
517
+ for (; length > 509 ; length-=510) { *op++ = 255; *op++ = 255; }
518
+ if (length >= 255) { length-=255; *op++ = 255; }
519
+ *op++ = (BYTE)length;
520
+ }
521
+ else *token += (BYTE)(length);
522
+
523
+ /* Test end of chunk */
524
+ if (ip > mflimit) { anchor = ip; break; }
525
+
526
+ /* Fill table */
527
+ LZ4_putPosition(ip-2, ctx, tableType, base);
528
+
529
+ /* Test next position */
530
+ ref = LZ4_getPosition(ip, ctx, tableType, base);
531
+ LZ4_putPosition(ip, ctx, tableType, base);
532
+ if ((ref + MAX_DISTANCE >= ip) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
533
+
534
+ /* Prepare next loop */
535
+ anchor = ip++;
536
+ forwardH = LZ4_hashPosition(ip, tableType);
537
+ }
538
+
539
+ _last_literals:
540
+ /* Encode Last Literals */
541
+ {
542
+ int lastRun = (int)(iend - anchor);
543
+ if ((limitedOutput) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */
544
+ if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
545
+ else *op++ = (BYTE)(lastRun<<ML_BITS);
546
+ memcpy(op, anchor, iend - anchor);
547
+ op += iend-anchor;
548
+ }
549
+
550
+ /* End */
551
+ return (int) (((char*)op)-dest);
552
+ }
553
+
554
+
555
+ int LZ4_compress(const char* source, char* dest, int inputSize)
556
+ {
557
+ #if (HEAPMODE)
558
+ void* ctx = ALLOCATOR(HASHNBCELLS4, 4); /* Aligned on 4-bytes boundaries */
559
+ #else
560
+ U32 ctx[1U<<(MEMORY_USAGE-2)] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
561
+ #endif
562
+ int result;
563
+
564
+ if (inputSize < (int)LZ4_64KLIMIT)
565
+ result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noPrefix);
566
+ else
567
+ result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
568
+
569
+ #if (HEAPMODE)
570
+ FREEMEM(ctx);
571
+ #endif
572
+ return result;
573
+ }
574
+
575
+ int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
576
+ {
577
+ #if (HEAPMODE)
578
+ void* ctx = ALLOCATOR(HASHNBCELLS4, 4); /* Aligned on 4-bytes boundaries */
579
+ #else
580
+ U32 ctx[1U<<(MEMORY_USAGE-2)] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
581
+ #endif
582
+ int result;
583
+
584
+ if (inputSize < (int)LZ4_64KLIMIT)
585
+ result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limited, byU16, noPrefix);
586
+ else
587
+ result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
588
+
589
+ #if (HEAPMODE)
590
+ FREEMEM(ctx);
591
+ #endif
592
+ return result;
593
+ }
594
+
595
+
596
+ /*****************************
597
+ Using external allocation
598
+ *****************************/
599
+
600
+ int LZ4_sizeofState() { return 1 << MEMORY_USAGE; }
601
+
602
+
603
+ int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize)
604
+ {
605
+ if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */
606
+ MEM_INIT(state, 0, LZ4_sizeofState());
607
+
608
+ if (inputSize < (int)LZ4_64KLIMIT)
609
+ return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noPrefix);
610
+ else
611
+ return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
612
+ }
613
+
614
+
615
+ int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
616
+ {
617
+ if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */
618
+ MEM_INIT(state, 0, LZ4_sizeofState());
619
+
620
+ if (inputSize < (int)LZ4_64KLIMIT)
621
+ return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limited, byU16, noPrefix);
622
+ else
623
+ return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
624
+ }
625
+
626
+
627
+ /****************************
628
+ Stream functions
629
+ ****************************/
630
+
631
+ int LZ4_sizeofStreamState()
632
+ {
633
+ return sizeof(LZ4_Data_Structure);
634
+ }
635
+
636
+ FORCE_INLINE void LZ4_init(LZ4_Data_Structure* lz4ds, const BYTE* base)
637
+ {
638
+ MEM_INIT(lz4ds->hashTable, 0, sizeof(lz4ds->hashTable));
639
+ lz4ds->bufferStart = base;
640
+ lz4ds->base = base;
641
+ lz4ds->nextBlock = base;
642
+ }
643
+
644
+ int LZ4_resetStreamState(void* state, const char* inputBuffer)
645
+ {
646
+ if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
647
+ LZ4_init((LZ4_Data_Structure*)state, (const BYTE*)inputBuffer);
648
+ return 0;
649
+ }
650
+
651
+ void* LZ4_create (const char* inputBuffer)
652
+ {
653
+ void* lz4ds = ALLOCATOR(1, sizeof(LZ4_Data_Structure));
654
+ LZ4_init ((LZ4_Data_Structure*)lz4ds, (const BYTE*)inputBuffer);
655
+ return lz4ds;
656
+ }
657
+
658
+
659
+ int LZ4_free (void* LZ4_Data)
660
+ {
661
+ FREEMEM(LZ4_Data);
662
+ return (0);
663
+ }
664
+
665
+
666
+ char* LZ4_slideInputBuffer (void* LZ4_Data)
667
+ {
668
+ LZ4_Data_Structure* lz4ds = (LZ4_Data_Structure*)LZ4_Data;
669
+ size_t delta = lz4ds->nextBlock - (lz4ds->bufferStart + 64 KB);
670
+
671
+ if ( (lz4ds->base - delta > lz4ds->base) /* underflow control */
672
+ || ((size_t)(lz4ds->nextBlock - lz4ds->base) > 0xE0000000) ) /* close to 32-bits limit */
673
+ {
674
+ size_t deltaLimit = (lz4ds->nextBlock - 64 KB) - lz4ds->base;
675
+ int nH;
676
+
677
+ for (nH=0; nH < HASHNBCELLS4; nH++)
678
+ {
679
+ if ((size_t)(lz4ds->hashTable[nH]) < deltaLimit) lz4ds->hashTable[nH] = 0;
680
+ else lz4ds->hashTable[nH] -= (U32)deltaLimit;
681
+ }
682
+ memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
683
+ lz4ds->base = lz4ds->bufferStart;
684
+ lz4ds->nextBlock = lz4ds->base + 64 KB;
685
+ }
686
+ else
687
+ {
688
+ memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
689
+ lz4ds->nextBlock -= delta;
690
+ lz4ds->base -= delta;
691
+ }
692
+
693
+ return (char*)(lz4ds->nextBlock);
694
+ }
695
+
696
+
697
+ int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize)
698
+ {
699
+ return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, 0, notLimited, byU32, withPrefix);
700
+ }
701
+
702
+
703
+ int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize)
704
+ {
705
+ return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, maxOutputSize, limited, byU32, withPrefix);
706
+ }
707
+
708
+
709
+ /****************************
710
+ Decompression functions
711
+ ****************************/
712
+
713
+ /*
714
+ * This generic decompression function cover all use cases.
715
+ * It shall be instanciated several times, using different sets of directives
716
+ * Note that it is essential this generic function is really inlined,
717
+ * in order to remove useless branches during compilation optimisation.
718
+ */
719
+ FORCE_INLINE int LZ4_decompress_generic(
720
+ const char* source,
721
+ char* dest,
722
+ int inputSize,
723
+ int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
724
+
725
+ int endOnInput, /* endOnOutputSize, endOnInputSize */
726
+ int prefix64k, /* noPrefix, withPrefix */
727
+ int partialDecoding, /* full, partial */
728
+ int targetOutputSize /* only used if partialDecoding==partial */
729
+ )
730
+ {
731
+ /* Local Variables */
732
+ const BYTE* restrict ip = (const BYTE*) source;
733
+ const BYTE* ref;
734
+ const BYTE* const iend = ip + inputSize;
735
+
736
+ BYTE* op = (BYTE*) dest;
737
+ BYTE* const oend = op + outputSize;
738
+ BYTE* cpy;
739
+ BYTE* oexit = op + targetOutputSize;
740
+
741
+ /*const size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; / static reduces speed for LZ4_decompress_safe() on GCC64 */
742
+ const size_t dec32table[] = {4-0, 4-3, 4-2, 4-3, 4-0, 4-0, 4-0, 4-0}; /* static reduces speed for LZ4_decompress_safe() on GCC64 */
743
+ static const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
744
+
745
+
746
+ /* Special cases */
747
+ if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
748
+ if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
749
+ if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
750
+
751
+
752
+ /* Main Loop */
753
+ while (1)
754
+ {
755
+ unsigned token;
756
+ size_t length;
757
+
758
+ /* get runlength */
759
+ token = *ip++;
760
+ if ((length=(token>>ML_BITS)) == RUN_MASK)
761
+ {
762
+ unsigned s=255;
763
+ while (((endOnInput)?ip<iend:1) && (s==255))
764
+ {
765
+ s = *ip++;
766
+ length += s;
767
+ }
768
+ }
769
+
770
+ /* copy literals */
771
+ cpy = op+length;
772
+ if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
773
+ || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
774
+ {
775
+ if (partialDecoding)
776
+ {
777
+ if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
778
+ if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
779
+ }
780
+ else
781
+ {
782
+ if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
783
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
784
+ }
785
+ memcpy(op, ip, length);
786
+ ip += length;
787
+ op += length;
788
+ break; /* Necessarily EOF, due to parsing restrictions */
789
+ }
790
+ LZ4_WILDCOPY(op, ip, cpy); ip -= (op-cpy); op = cpy;
791
+
792
+ /* get offset */
793
+ LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
794
+ if ((prefix64k==noPrefix) && (unlikely(ref < (BYTE* const)dest))) goto _output_error; /* Error : offset outside destination buffer */
795
+
796
+ /* get matchlength */
797
+ if ((length=(token&ML_MASK)) == ML_MASK)
798
+ {
799
+ while ((!endOnInput) || (ip<iend-(LASTLITERALS+1))) /* Ensure enough bytes remain for LASTLITERALS + token */
800
+ {
801
+ unsigned s = *ip++;
802
+ length += s;
803
+ if (s==255) continue;
804
+ break;
805
+ }
806
+ }
807
+
808
+ /* copy repeated sequence */
809
+ if (unlikely((op-ref)<(int)STEPSIZE))
810
+ {
811
+ const size_t dec64 = dec64table[(sizeof(void*)==4) ? 0 : op-ref];
812
+ op[0] = ref[0];
813
+ op[1] = ref[1];
814
+ op[2] = ref[2];
815
+ op[3] = ref[3];
816
+ /*op += 4, ref += 4; ref -= dec32table[op-ref];
817
+ A32(op) = A32(ref);
818
+ op += STEPSIZE-4; ref -= dec64;*/
819
+ ref += dec32table[op-ref];
820
+ A32(op+4) = A32(ref);
821
+ op += STEPSIZE; ref -= dec64;
822
+ } else { LZ4_COPYSTEP(op,ref); }
823
+ cpy = op + length - (STEPSIZE-4);
824
+
825
+ if (unlikely(cpy>oend-COPYLENGTH-(STEPSIZE-4)))
826
+ {
827
+ if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last 5 bytes must be literals */
828
+ LZ4_SECURECOPY(op, ref, (oend-COPYLENGTH));
829
+ while(op<cpy) *op++=*ref++;
830
+ op=cpy;
831
+ continue;
832
+ }
833
+ LZ4_WILDCOPY(op, ref, cpy);
834
+ op=cpy; /* correction */
835
+ }
836
+
837
+ /* end of decoding */
838
+ if (endOnInput)
839
+ return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
840
+ else
841
+ return (int) (((char*)ip)-source); /* Nb of input bytes read */
842
+
843
+ /* Overflow error detected */
844
+ _output_error:
845
+ return (int) (-(((char*)ip)-source))-1;
846
+ }
847
+
848
+
849
+ int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
850
+ {
851
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
852
+ }
853
+
854
+ int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
855
+ {
856
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
857
+ }
858
+
859
+ int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
860
+ {
861
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);
862
+ }
863
+
864
+ int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
865
+ {
866
+ return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
867
+ }
868
+
869
+ int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
870
+ {
871
+ #ifdef _MSC_VER /* This version is faster with Visual */
872
+ return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
873
+ #else
874
+ return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
875
+ #endif
876
+ }
877
+