lz4-ruby 0.3.2-x86-mingw32 → 0.3.3-x86-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/CHANGELOG.rdoc +8 -0
- data/VERSION +1 -1
- data/ext/lz4ruby/lz4.c +585 -215
- data/ext/lz4ruby/lz4.h +302 -247
- data/ext/lz4ruby/lz4hc.c +4 -2
- data/ext/lz4ruby/lz4hc.h +173 -172
- data/ext/lz4ruby/lz4ruby.c +1 -1
- data/lib/1.8/lz4ruby.so +0 -0
- data/lib/1.9/lz4ruby.so +0 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
|
-
SHA1:
|
3
|
-
data.tar.gz: ae57034ee26f014d90674e7123ce29aaa9c6f203
|
4
|
-
metadata.gz: 24241cb6aae911d4417cf25f9da0244850cacab9
|
5
2
|
SHA512:
|
6
|
-
data.tar.gz:
|
7
|
-
metadata.gz:
|
3
|
+
data.tar.gz: bb5a465b1602a58ee09e3688ba71c69654e0b388140562168354233455f567856e7f96070eb6701c1b810c9c76838e4ec1ba44b22e97939e31ddacc8872d95bd
|
4
|
+
metadata.gz: b942bfafc62c2e8bda87ba04370a4418334b00f8850ef906acd6d044d72a7a38f397aeb3b4263563bea39b29709a5c970c34f7f34d01d3bc2a2eb9dd196442cb
|
5
|
+
SHA1:
|
6
|
+
data.tar.gz: 6012e6042e67467e33076acf17a8ca1ed4b15c40
|
7
|
+
metadata.gz: f1e812142bac7e870139353e38ee8440df15e0a9
|
data/CHANGELOG.rdoc
CHANGED
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
0.3.
|
1
|
+
0.3.3
|
data/ext/lz4ruby/lz4.c
CHANGED
@@ -34,15 +34,6 @@
|
|
34
34
|
/**************************************
|
35
35
|
Tuning parameters
|
36
36
|
**************************************/
|
37
|
-
/*
|
38
|
-
* MEMORY_USAGE :
|
39
|
-
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
40
|
-
* Increasing memory usage improves compression ratio
|
41
|
-
* Reduced memory usage can improve speed, due to cache effect
|
42
|
-
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
|
43
|
-
*/
|
44
|
-
#define MEMORY_USAGE 14
|
45
|
-
|
46
37
|
/*
|
47
38
|
* HEAPMODE :
|
48
39
|
* Select how default compression functions will allocate memory for their hash table,
|
@@ -56,8 +47,9 @@
|
|
56
47
|
**************************************/
|
57
48
|
/* 32 or 64 bits ? */
|
58
49
|
#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
|
59
|
-
|| defined(__powerpc64__) || defined(
|
60
|
-
|| defined(
|
50
|
+
|| defined(__powerpc64__) || defined(__powerpc64le__) \
|
51
|
+
|| defined(__ppc64__) || defined(__ppc64le__) \
|
52
|
+
|| defined(__PPC64__) || defined(__PPC64LE__) \
|
61
53
|
|| defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) /* Detects 64 bits mode */
|
62
54
|
# define LZ4_ARCH64 1
|
63
55
|
#else
|
@@ -68,6 +60,7 @@
|
|
68
60
|
* Little Endian or Big Endian ?
|
69
61
|
* Overwrite the #define below if you know your architecture endianess
|
70
62
|
*/
|
63
|
+
#include <stdlib.h> /* Apparently required to detect endianess */
|
71
64
|
#if defined (__GLIBC__)
|
72
65
|
# include <endian.h>
|
73
66
|
# if (__BYTE_ORDER == __BIG_ENDIAN)
|
@@ -224,9 +217,9 @@ typedef struct {size_t v;} _PACKED size_t_S;
|
|
224
217
|
/**************************************
|
225
218
|
Constants
|
226
219
|
**************************************/
|
227
|
-
#define LZ4_HASHLOG (
|
228
|
-
#define HASHTABLESIZE (1 <<
|
229
|
-
#define
|
220
|
+
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
|
221
|
+
#define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
|
222
|
+
#define HASH_SIZE_U32 (1 << LZ4_HASHLOG)
|
230
223
|
|
231
224
|
#define MINMATCH 4
|
232
225
|
|
@@ -255,16 +248,19 @@ static const int LZ4_minLength = (MFLIMIT+1);
|
|
255
248
|
Structures and local types
|
256
249
|
**************************************/
|
257
250
|
typedef struct {
|
258
|
-
U32
|
251
|
+
U32 hashTable[HASH_SIZE_U32];
|
252
|
+
U32 currentOffset;
|
253
|
+
U32 initCheck;
|
254
|
+
const BYTE* dictionary;
|
259
255
|
const BYTE* bufferStart;
|
260
|
-
|
261
|
-
|
262
|
-
} LZ4_Data_Structure;
|
256
|
+
U32 dictSize;
|
257
|
+
} LZ4_stream_t_internal;
|
263
258
|
|
264
|
-
typedef enum { notLimited = 0,
|
259
|
+
typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
|
265
260
|
typedef enum { byPtr, byU32, byU16 } tableType_t;
|
266
261
|
|
267
|
-
typedef enum {
|
262
|
+
typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
|
263
|
+
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
|
268
264
|
|
269
265
|
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
|
270
266
|
typedef enum { full = 0, partial = 1 } earlyEnd_directive;
|
@@ -289,12 +285,12 @@ typedef enum { full = 0, partial = 1 } earlyEnd_directive;
|
|
289
285
|
/**************************************
|
290
286
|
Macros
|
291
287
|
**************************************/
|
288
|
+
#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
|
292
289
|
#if LZ4_ARCH64 || !defined(__GNUC__)
|
293
|
-
# define LZ4_WILDCOPY(d,s,e)
|
290
|
+
# define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } /* at the end, d>=e; */
|
294
291
|
#else
|
295
|
-
# define LZ4_WILDCOPY(d,s,e)
|
292
|
+
# define LZ4_WILDCOPY(d,s,e) { if (likely(e-d <= 8)) LZ4_COPY8(d,s) else do { LZ4_COPY8(d,s) } while (d<e); }
|
296
293
|
#endif
|
297
|
-
#define LZ4_SECURECOPY(d,s,e) { if (d<e) LZ4_WILDCOPY(d,s,e); }
|
298
294
|
|
299
295
|
|
300
296
|
/****************************
|
@@ -302,7 +298,7 @@ typedef enum { full = 0, partial = 1 } earlyEnd_directive;
|
|
302
298
|
****************************/
|
303
299
|
#if LZ4_ARCH64
|
304
300
|
|
305
|
-
|
301
|
+
int LZ4_NbCommonBytes (register U64 val)
|
306
302
|
{
|
307
303
|
# if defined(LZ4_BIG_ENDIAN)
|
308
304
|
# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
@@ -334,7 +330,7 @@ FORCE_INLINE int LZ4_NbCommonBytes (register U64 val)
|
|
334
330
|
|
335
331
|
#else
|
336
332
|
|
337
|
-
|
333
|
+
int LZ4_NbCommonBytes (register U32 val)
|
338
334
|
{
|
339
335
|
# if defined(LZ4_BIG_ENDIAN)
|
340
336
|
# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
@@ -366,10 +362,12 @@ FORCE_INLINE int LZ4_NbCommonBytes (register U32 val)
|
|
366
362
|
#endif
|
367
363
|
|
368
364
|
|
369
|
-
|
365
|
+
/********************************
|
370
366
|
Compression functions
|
371
|
-
|
372
|
-
|
367
|
+
********************************/
|
368
|
+
int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
|
369
|
+
|
370
|
+
static int LZ4_hashSequence(U32 sequence, tableType_t tableType)
|
373
371
|
{
|
374
372
|
if (tableType == byU16)
|
375
373
|
return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
|
@@ -377,9 +375,9 @@ FORCE_INLINE int LZ4_hashSequence(U32 sequence, tableType_t tableType)
|
|
377
375
|
return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
|
378
376
|
}
|
379
377
|
|
380
|
-
|
378
|
+
static int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
|
381
379
|
|
382
|
-
|
380
|
+
static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
|
383
381
|
{
|
384
382
|
switch (tableType)
|
385
383
|
{
|
@@ -389,58 +387,97 @@ FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, t
|
|
389
387
|
}
|
390
388
|
}
|
391
389
|
|
392
|
-
|
390
|
+
static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
|
393
391
|
{
|
394
392
|
U32 h = LZ4_hashPosition(p, tableType);
|
395
393
|
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
|
396
394
|
}
|
397
395
|
|
398
|
-
|
396
|
+
static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
|
399
397
|
{
|
400
398
|
if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
|
401
399
|
if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
|
402
400
|
{ U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
|
403
401
|
}
|
404
402
|
|
405
|
-
|
403
|
+
static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
|
406
404
|
{
|
407
405
|
U32 h = LZ4_hashPosition(p, tableType);
|
408
406
|
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
|
409
407
|
}
|
410
408
|
|
409
|
+
static unsigned LZ4_count(const BYTE* pIn, const BYTE* pRef, const BYTE* pInLimit)
|
410
|
+
{
|
411
|
+
const BYTE* const pStart = pIn;
|
411
412
|
|
412
|
-
|
413
|
+
while (likely(pIn<pInLimit-(STEPSIZE-1)))
|
414
|
+
{
|
415
|
+
size_t diff = AARCH(pRef) ^ AARCH(pIn);
|
416
|
+
if (!diff) { pIn+=STEPSIZE; pRef+=STEPSIZE; continue; }
|
417
|
+
pIn += LZ4_NbCommonBytes(diff);
|
418
|
+
return (unsigned)(pIn - pStart);
|
419
|
+
}
|
420
|
+
if (sizeof(void*)==8) if ((pIn<(pInLimit-3)) && (A32(pRef) == A32(pIn))) { pIn+=4; pRef+=4; }
|
421
|
+
if ((pIn<(pInLimit-1)) && (A16(pRef) == A16(pIn))) { pIn+=2; pRef+=2; }
|
422
|
+
if ((pIn<pInLimit) && (*pRef == *pIn)) pIn++;
|
423
|
+
|
424
|
+
return (unsigned)(pIn - pStart);
|
425
|
+
}
|
426
|
+
|
427
|
+
|
428
|
+
static int LZ4_compress_generic(
|
413
429
|
void* ctx,
|
414
430
|
const char* source,
|
415
431
|
char* dest,
|
416
432
|
int inputSize,
|
417
433
|
int maxOutputSize,
|
418
434
|
|
419
|
-
limitedOutput_directive
|
435
|
+
limitedOutput_directive outputLimited,
|
420
436
|
tableType_t tableType,
|
421
|
-
|
437
|
+
dict_directive dict,
|
438
|
+
dictIssue_directive dictIssue)
|
422
439
|
{
|
440
|
+
LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx;
|
441
|
+
|
423
442
|
const BYTE* ip = (const BYTE*) source;
|
424
|
-
const BYTE*
|
425
|
-
const BYTE*
|
443
|
+
const BYTE* base;
|
444
|
+
const BYTE* lowLimit;
|
445
|
+
const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
|
446
|
+
const BYTE* const dictionary = dictPtr->dictionary;
|
447
|
+
const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
|
448
|
+
const size_t dictDelta = dictEnd - (const BYTE*)source;
|
426
449
|
const BYTE* anchor = (const BYTE*) source;
|
427
450
|
const BYTE* const iend = ip + inputSize;
|
428
451
|
const BYTE* const mflimit = iend - MFLIMIT;
|
429
452
|
const BYTE* const matchlimit = iend - LASTLITERALS;
|
430
453
|
|
431
454
|
BYTE* op = (BYTE*) dest;
|
432
|
-
BYTE* const
|
455
|
+
BYTE* const olimit = op + maxOutputSize;
|
433
456
|
|
434
|
-
int length;
|
435
457
|
const int skipStrength = SKIPSTRENGTH;
|
436
458
|
U32 forwardH;
|
459
|
+
size_t refDelta=0;
|
437
460
|
|
438
461
|
/* Init conditions */
|
439
|
-
if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0;
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
462
|
+
if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
|
463
|
+
switch(dict)
|
464
|
+
{
|
465
|
+
case noDict:
|
466
|
+
default:
|
467
|
+
base = (const BYTE*)source;
|
468
|
+
lowLimit = (const BYTE*)source;
|
469
|
+
break;
|
470
|
+
case withPrefix64k:
|
471
|
+
base = (const BYTE*)source - dictPtr->currentOffset;
|
472
|
+
lowLimit = (const BYTE*)source - dictPtr->dictSize;
|
473
|
+
break;
|
474
|
+
case usingExtDict:
|
475
|
+
base = (const BYTE*)source - dictPtr->currentOffset;
|
476
|
+
lowLimit = (const BYTE*)source;
|
477
|
+
break;
|
478
|
+
}
|
479
|
+
if ((tableType == byU16) && (inputSize>=(int)LZ4_64KLIMIT)) return 0; /* Size too large (not within 64K limit) */
|
480
|
+
if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
444
481
|
|
445
482
|
/* First Byte */
|
446
483
|
LZ4_putPosition(ip, ctx, tableType, base);
|
@@ -449,98 +486,148 @@ FORCE_INLINE int LZ4_compress_generic(
|
|
449
486
|
/* Main Loop */
|
450
487
|
for ( ; ; )
|
451
488
|
{
|
452
|
-
int findMatchAttempts = (1U << skipStrength) + 3;
|
453
|
-
const BYTE* forwardIp = ip;
|
454
489
|
const BYTE* ref;
|
455
490
|
BYTE* token;
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
491
|
+
{
|
492
|
+
const BYTE* forwardIp = ip;
|
493
|
+
unsigned step=1;
|
494
|
+
unsigned searchMatchNb = (1U << skipStrength);
|
495
|
+
|
496
|
+
/* Find a match */
|
497
|
+
do {
|
498
|
+
U32 h = forwardH;
|
499
|
+
ip = forwardIp;
|
500
|
+
forwardIp += step;
|
501
|
+
step = searchMatchNb++ >> skipStrength;
|
502
|
+
//if (step>8) step=8; // required for valid forwardIp ; slows down uncompressible data a bit
|
503
|
+
|
504
|
+
if (unlikely(forwardIp > mflimit)) goto _last_literals;
|
505
|
+
|
506
|
+
ref = LZ4_getPositionOnHash(h, ctx, tableType, base);
|
507
|
+
if (dict==usingExtDict)
|
508
|
+
{
|
509
|
+
if (ref<(const BYTE*)source)
|
510
|
+
{
|
511
|
+
refDelta = dictDelta;
|
512
|
+
lowLimit = dictionary;
|
513
|
+
}
|
514
|
+
else
|
515
|
+
{
|
516
|
+
refDelta = 0;
|
517
|
+
lowLimit = (const BYTE*)source;
|
518
|
+
}
|
519
|
+
}
|
520
|
+
forwardH = LZ4_hashPosition(forwardIp, tableType);
|
521
|
+
LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
|
522
|
+
|
523
|
+
} while ( ((dictIssue==dictSmall) ? (ref < lowRefLimit) : 0)
|
524
|
+
|| ((tableType==byU16) ? 0 : (ref + MAX_DISTANCE < ip))
|
525
|
+
|| (A32(ref+refDelta) != A32(ip)) );
|
526
|
+
}
|
471
527
|
|
472
528
|
/* Catch up */
|
473
|
-
while ((ip>anchor) && (ref > lowLimit) && (unlikely(ip[-1]==ref[-1]))) { ip--; ref--; }
|
529
|
+
while ((ip>anchor) && (ref+refDelta > lowLimit) && (unlikely(ip[-1]==ref[refDelta-1]))) { ip--; ref--; }
|
474
530
|
|
475
|
-
/* Encode Literal length */
|
476
|
-
length = (int)(ip - anchor);
|
477
|
-
token = op++;
|
478
|
-
if ((limitedOutput) && (unlikely(op + length + (2 + 1 + LASTLITERALS) + (length/255) > oend))) return 0; /* Check output limit */
|
479
|
-
if (length>=(int)RUN_MASK)
|
480
531
|
{
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
532
|
+
/* Encode Literal length */
|
533
|
+
unsigned litLength = (unsigned)(ip - anchor);
|
534
|
+
token = op++;
|
535
|
+
if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
|
536
|
+
return 0; /* Check output limit */
|
537
|
+
if (litLength>=RUN_MASK)
|
538
|
+
{
|
539
|
+
int len = (int)litLength-RUN_MASK;
|
540
|
+
*token=(RUN_MASK<<ML_BITS);
|
541
|
+
for(; len >= 255 ; len-=255) *op++ = 255;
|
542
|
+
*op++ = (BYTE)len;
|
543
|
+
}
|
544
|
+
else *token = (BYTE)(litLength<<ML_BITS);
|
487
545
|
|
488
|
-
|
489
|
-
|
546
|
+
/* Copy Literals */
|
547
|
+
{ BYTE* end = op+litLength; LZ4_WILDCOPY(op,anchor,end); op=end; }
|
548
|
+
}
|
490
549
|
|
491
550
|
_next_match:
|
492
551
|
/* Encode Offset */
|
493
|
-
LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
|
494
|
-
|
495
|
-
/* Start Counting */
|
496
|
-
ip+=MINMATCH; ref+=MINMATCH; /* MinMatch already verified */
|
497
|
-
anchor = ip;
|
498
|
-
while (likely(ip<matchlimit-(STEPSIZE-1)))
|
499
|
-
{
|
500
|
-
size_t diff = AARCH(ref) ^ AARCH(ip);
|
501
|
-
if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
|
502
|
-
ip += LZ4_NbCommonBytes(diff);
|
503
|
-
goto _endCount;
|
504
|
-
}
|
505
|
-
if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
|
506
|
-
if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
|
507
|
-
if ((ip<matchlimit) && (*ref == *ip)) ip++;
|
508
|
-
_endCount:
|
552
|
+
LZ4_WRITE_LITTLEENDIAN_16(op, (U16)(ip-ref));
|
509
553
|
|
510
554
|
/* Encode MatchLength */
|
511
|
-
length = (int)(ip - anchor);
|
512
|
-
if ((limitedOutput) && (unlikely(op + (1 + LASTLITERALS) + (length>>8) > oend))) return 0; /* Check output limit */
|
513
|
-
if (length>=(int)ML_MASK)
|
514
555
|
{
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
556
|
+
unsigned matchLength;
|
557
|
+
|
558
|
+
if ((dict==usingExtDict) && (lowLimit==dictionary))
|
559
|
+
{
|
560
|
+
const BYTE* limit;
|
561
|
+
ref += refDelta;
|
562
|
+
limit = ip + (dictEnd-ref);
|
563
|
+
if (limit > matchlimit) limit = matchlimit;
|
564
|
+
matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, limit);
|
565
|
+
ip += MINMATCH + matchLength;
|
566
|
+
if (ip==limit)
|
567
|
+
{
|
568
|
+
unsigned more = LZ4_count(ip, (const BYTE*)source, matchlimit);
|
569
|
+
matchLength += more;
|
570
|
+
ip += more;
|
571
|
+
}
|
572
|
+
}
|
573
|
+
else
|
574
|
+
{
|
575
|
+
matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, matchlimit);
|
576
|
+
ip += MINMATCH + matchLength;
|
577
|
+
}
|
578
|
+
|
579
|
+
if (matchLength>=ML_MASK)
|
580
|
+
{
|
581
|
+
if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
|
582
|
+
return 0; /* Check output limit */
|
583
|
+
*token += ML_MASK;
|
584
|
+
matchLength -= ML_MASK;
|
585
|
+
for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
|
586
|
+
if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
|
587
|
+
*op++ = (BYTE)matchLength;
|
588
|
+
}
|
589
|
+
else *token += (BYTE)(matchLength);
|
520
590
|
}
|
521
|
-
|
591
|
+
|
592
|
+
anchor = ip;
|
522
593
|
|
523
594
|
/* Test end of chunk */
|
524
|
-
if (ip > mflimit)
|
595
|
+
if (ip > mflimit) break;
|
525
596
|
|
526
597
|
/* Fill table */
|
527
598
|
LZ4_putPosition(ip-2, ctx, tableType, base);
|
528
599
|
|
529
600
|
/* Test next position */
|
530
601
|
ref = LZ4_getPosition(ip, ctx, tableType, base);
|
602
|
+
if (dict==usingExtDict)
|
603
|
+
{
|
604
|
+
if (ref<(const BYTE*)source)
|
605
|
+
{
|
606
|
+
refDelta = dictDelta;
|
607
|
+
lowLimit = dictionary;
|
608
|
+
}
|
609
|
+
else
|
610
|
+
{
|
611
|
+
refDelta = 0;
|
612
|
+
lowLimit = (const BYTE*)source;
|
613
|
+
}
|
614
|
+
}
|
531
615
|
LZ4_putPosition(ip, ctx, tableType, base);
|
532
|
-
if ((
|
616
|
+
if ( ((dictIssue==dictSmall) ? (ref>=lowRefLimit) : 1)
|
617
|
+
&& (ref+MAX_DISTANCE>=ip)
|
618
|
+
&& (A32(ref+refDelta)==A32(ip)) )
|
619
|
+
{ token=op++; *token=0; goto _next_match; }
|
533
620
|
|
534
621
|
/* Prepare next loop */
|
535
|
-
|
536
|
-
forwardH = LZ4_hashPosition(ip, tableType);
|
622
|
+
forwardH = LZ4_hashPosition(++ip, tableType);
|
537
623
|
}
|
538
624
|
|
539
625
|
_last_literals:
|
540
626
|
/* Encode Last Literals */
|
541
627
|
{
|
542
628
|
int lastRun = (int)(iend - anchor);
|
543
|
-
if ((
|
629
|
+
if ((outputLimited) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize))
|
630
|
+
return 0; /* Check output limit */
|
544
631
|
if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
|
545
632
|
else *op++ = (BYTE)(lastRun<<ML_BITS);
|
546
633
|
memcpy(op, anchor, iend - anchor);
|
@@ -555,16 +642,16 @@ _last_literals:
|
|
555
642
|
int LZ4_compress(const char* source, char* dest, int inputSize)
|
556
643
|
{
|
557
644
|
#if (HEAPMODE)
|
558
|
-
void* ctx = ALLOCATOR(
|
645
|
+
void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
|
559
646
|
#else
|
560
|
-
U32 ctx[
|
647
|
+
U32 ctx[LZ4_STREAMSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
|
561
648
|
#endif
|
562
649
|
int result;
|
563
650
|
|
564
651
|
if (inputSize < (int)LZ4_64KLIMIT)
|
565
|
-
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16,
|
652
|
+
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
|
566
653
|
else
|
567
|
-
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr,
|
654
|
+
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
|
568
655
|
|
569
656
|
#if (HEAPMODE)
|
570
657
|
FREEMEM(ctx);
|
@@ -575,16 +662,16 @@ int LZ4_compress(const char* source, char* dest, int inputSize)
|
|
575
662
|
int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
|
576
663
|
{
|
577
664
|
#if (HEAPMODE)
|
578
|
-
void* ctx = ALLOCATOR(
|
665
|
+
void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
|
579
666
|
#else
|
580
|
-
U32 ctx[
|
667
|
+
U32 ctx[LZ4_STREAMSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
|
581
668
|
#endif
|
582
669
|
int result;
|
583
670
|
|
584
671
|
if (inputSize < (int)LZ4_64KLIMIT)
|
585
|
-
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize,
|
672
|
+
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
|
586
673
|
else
|
587
|
-
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize,
|
674
|
+
result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
|
588
675
|
|
589
676
|
#if (HEAPMODE)
|
590
677
|
FREEMEM(ctx);
|
@@ -593,123 +680,182 @@ int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, in
|
|
593
680
|
}
|
594
681
|
|
595
682
|
|
596
|
-
|
597
|
-
|
598
|
-
|
683
|
+
/*****************************************
|
684
|
+
Experimental : Streaming functions
|
685
|
+
*****************************************/
|
599
686
|
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize)
|
687
|
+
void* LZ4_createStream()
|
604
688
|
{
|
605
|
-
|
606
|
-
MEM_INIT(
|
689
|
+
void* lz4s = ALLOCATOR(4, LZ4_STREAMSIZE_U32);
|
690
|
+
MEM_INIT(lz4s, 0, LZ4_STREAMSIZE);
|
691
|
+
return lz4s;
|
692
|
+
}
|
607
693
|
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
694
|
+
int LZ4_free (void* LZ4_stream)
|
695
|
+
{
|
696
|
+
FREEMEM(LZ4_stream);
|
697
|
+
return (0);
|
612
698
|
}
|
613
699
|
|
614
700
|
|
615
|
-
int
|
701
|
+
int LZ4_loadDict (void* LZ4_dict, const char* dictionary, int dictSize)
|
616
702
|
{
|
617
|
-
|
618
|
-
|
703
|
+
LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
|
704
|
+
const BYTE* p = (const BYTE*)dictionary;
|
705
|
+
const BYTE* const dictEnd = p + dictSize;
|
706
|
+
const BYTE* base;
|
619
707
|
|
620
|
-
|
621
|
-
|
622
|
-
else
|
623
|
-
return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
|
624
|
-
}
|
708
|
+
LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
|
709
|
+
if (dict->initCheck) MEM_INIT(dict, 0, sizeof(LZ4_stream_t_internal)); /* Uninitialized structure detected */
|
625
710
|
|
711
|
+
if (dictSize < MINMATCH)
|
712
|
+
{
|
713
|
+
dict->dictionary = NULL;
|
714
|
+
dict->dictSize = 0;
|
715
|
+
return 1;
|
716
|
+
}
|
626
717
|
|
627
|
-
|
628
|
-
|
629
|
-
|
718
|
+
if (p <= dictEnd - 64 KB) p = dictEnd - 64 KB;
|
719
|
+
base = p - dict->currentOffset;
|
720
|
+
dict->dictionary = p;
|
721
|
+
dict->dictSize = (U32)(dictEnd - p);
|
722
|
+
dict->currentOffset += dict->dictSize;
|
630
723
|
|
631
|
-
|
632
|
-
{
|
633
|
-
|
634
|
-
|
724
|
+
while (p <= dictEnd-MINMATCH)
|
725
|
+
{
|
726
|
+
LZ4_putPosition(p, dict, byU32, base);
|
727
|
+
p+=3;
|
728
|
+
}
|
635
729
|
|
636
|
-
|
637
|
-
{
|
638
|
-
MEM_INIT(lz4ds->hashTable, 0, sizeof(lz4ds->hashTable));
|
639
|
-
lz4ds->bufferStart = base;
|
640
|
-
lz4ds->base = base;
|
641
|
-
lz4ds->nextBlock = base;
|
730
|
+
return 1;
|
642
731
|
}
|
643
732
|
|
644
|
-
int LZ4_resetStreamState(void* state, const char* inputBuffer)
|
645
|
-
{
|
646
|
-
if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
|
647
|
-
LZ4_init((LZ4_Data_Structure*)state, (const BYTE*)inputBuffer);
|
648
|
-
return 0;
|
649
|
-
}
|
650
733
|
|
651
|
-
void*
|
734
|
+
void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
|
652
735
|
{
|
653
|
-
|
654
|
-
|
655
|
-
|
736
|
+
if ((LZ4_dict->currentOffset > 0x80000000) ||
|
737
|
+
((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */
|
738
|
+
{
|
739
|
+
/* rescale hash table */
|
740
|
+
U32 delta = LZ4_dict->currentOffset - 64 KB;
|
741
|
+
const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
|
742
|
+
int i;
|
743
|
+
for (i=0; i<HASH_SIZE_U32; i++)
|
744
|
+
{
|
745
|
+
if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
|
746
|
+
else LZ4_dict->hashTable[i] -= delta;
|
747
|
+
}
|
748
|
+
LZ4_dict->currentOffset = 64 KB;
|
749
|
+
if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
|
750
|
+
LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
|
751
|
+
}
|
656
752
|
}
|
657
753
|
|
658
754
|
|
659
|
-
int
|
755
|
+
FORCE_INLINE int LZ4_compress_continue_generic (void* LZ4_stream, const char* source, char* dest, int inputSize,
|
756
|
+
int maxOutputSize, limitedOutput_directive limit)
|
660
757
|
{
|
661
|
-
|
662
|
-
|
663
|
-
}
|
758
|
+
LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_stream;
|
759
|
+
const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
|
664
760
|
|
761
|
+
const BYTE* smallest = (const BYTE*) source;
|
762
|
+
if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
|
763
|
+
if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
|
764
|
+
LZ4_renormDictT(streamPtr, smallest);
|
665
765
|
|
666
|
-
|
667
|
-
{
|
668
|
-
LZ4_Data_Structure* lz4ds = (LZ4_Data_Structure*)LZ4_Data;
|
669
|
-
size_t delta = lz4ds->nextBlock - (lz4ds->bufferStart + 64 KB);
|
670
|
-
|
671
|
-
if ( (lz4ds->base - delta > lz4ds->base) /* underflow control */
|
672
|
-
|| ((size_t)(lz4ds->nextBlock - lz4ds->base) > 0xE0000000) ) /* close to 32-bits limit */
|
766
|
+
/* Check overlapping input/dictionary space */
|
673
767
|
{
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
for (nH=0; nH < HASHNBCELLS4; nH++)
|
768
|
+
const BYTE* sourceEnd = (const BYTE*) source + inputSize;
|
769
|
+
if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd))
|
678
770
|
{
|
679
|
-
|
680
|
-
|
771
|
+
streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
|
772
|
+
if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
|
773
|
+
if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
|
774
|
+
streamPtr->dictionary = dictEnd - streamPtr->dictSize;
|
681
775
|
}
|
682
|
-
memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
|
683
|
-
lz4ds->base = lz4ds->bufferStart;
|
684
|
-
lz4ds->nextBlock = lz4ds->base + 64 KB;
|
685
776
|
}
|
686
|
-
|
777
|
+
|
778
|
+
/* prefix mode : source data follows dictionary */
|
779
|
+
if (dictEnd == (const BYTE*)source)
|
780
|
+
{
|
781
|
+
int result;
|
782
|
+
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
|
783
|
+
result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, dictSmall);
|
784
|
+
else
|
785
|
+
result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, noDictIssue);
|
786
|
+
streamPtr->dictSize += (U32)inputSize;
|
787
|
+
streamPtr->currentOffset += (U32)inputSize;
|
788
|
+
return result;
|
789
|
+
}
|
790
|
+
|
791
|
+
/* external dictionary mode */
|
687
792
|
{
|
688
|
-
|
689
|
-
|
690
|
-
|
793
|
+
int result;
|
794
|
+
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
|
795
|
+
result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, dictSmall);
|
796
|
+
else
|
797
|
+
result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, noDictIssue);
|
798
|
+
streamPtr->dictionary = (const BYTE*)source;
|
799
|
+
streamPtr->dictSize = (U32)inputSize;
|
800
|
+
streamPtr->currentOffset += (U32)inputSize;
|
801
|
+
return result;
|
691
802
|
}
|
803
|
+
}
|
804
|
+
|
805
|
+
|
806
|
+
int LZ4_compress_continue (void* LZ4_stream, const char* source, char* dest, int inputSize)
|
807
|
+
{
|
808
|
+
return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, 0, notLimited);
|
809
|
+
}
|
692
810
|
|
693
|
-
|
811
|
+
int LZ4_compress_limitedOutput_continue (void* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize)
|
812
|
+
{
|
813
|
+
return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput);
|
694
814
|
}
|
695
815
|
|
696
816
|
|
697
|
-
|
817
|
+
// Hidden debug function, to force separate dictionary mode
|
818
|
+
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
|
698
819
|
{
|
699
|
-
|
820
|
+
LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_dict;
|
821
|
+
int result;
|
822
|
+
const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
|
823
|
+
|
824
|
+
const BYTE* smallest = dictEnd;
|
825
|
+
if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
|
826
|
+
LZ4_renormDictT((LZ4_stream_t_internal*)LZ4_dict, smallest);
|
827
|
+
|
828
|
+
result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue);
|
829
|
+
|
830
|
+
streamPtr->dictionary = (const BYTE*)source;
|
831
|
+
streamPtr->dictSize = (U32)inputSize;
|
832
|
+
streamPtr->currentOffset += (U32)inputSize;
|
833
|
+
|
834
|
+
return result;
|
700
835
|
}
|
701
836
|
|
702
837
|
|
703
|
-
int
|
838
|
+
int LZ4_saveDict (void* LZ4_dict, char* safeBuffer, int dictSize)
|
704
839
|
{
|
705
|
-
|
840
|
+
LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
|
841
|
+
const BYTE* previousDictEnd = dict->dictionary + dict->dictSize;
|
842
|
+
|
843
|
+
if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
|
844
|
+
if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
|
845
|
+
|
846
|
+
memcpy(safeBuffer, previousDictEnd - dictSize, dictSize);
|
847
|
+
|
848
|
+
dict->dictionary = (const BYTE*)safeBuffer;
|
849
|
+
dict->dictSize = (U32)dictSize;
|
850
|
+
|
851
|
+
return 1;
|
706
852
|
}
|
707
853
|
|
708
854
|
|
855
|
+
|
709
856
|
/****************************
|
710
857
|
Decompression functions
|
711
858
|
****************************/
|
712
|
-
|
713
859
|
/*
|
714
860
|
* This generic decompression function cover all use cases.
|
715
861
|
* It shall be instanciated several times, using different sets of directives
|
@@ -723,9 +869,11 @@ FORCE_INLINE int LZ4_decompress_generic(
|
|
723
869
|
int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
|
724
870
|
|
725
871
|
int endOnInput, /* endOnOutputSize, endOnInputSize */
|
726
|
-
int prefix64k, /* noPrefix, withPrefix */
|
727
872
|
int partialDecoding, /* full, partial */
|
728
|
-
int targetOutputSize
|
873
|
+
int targetOutputSize, /* only used if partialDecoding==partial */
|
874
|
+
int dict, /* noDict, withPrefix64k, usingExtDict */
|
875
|
+
const char* dictStart, /* only if dict==usingExtDict */
|
876
|
+
int dictSize /* note : = 0 if noDict */
|
729
877
|
)
|
730
878
|
{
|
731
879
|
/* Local Variables */
|
@@ -737,11 +885,19 @@ FORCE_INLINE int LZ4_decompress_generic(
|
|
737
885
|
BYTE* const oend = op + outputSize;
|
738
886
|
BYTE* cpy;
|
739
887
|
BYTE* oexit = op + targetOutputSize;
|
888
|
+
const BYTE* const lowLimit = (const BYTE*)dest - dictSize;
|
740
889
|
|
741
|
-
|
890
|
+
const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
|
891
|
+
//#define OLD
|
892
|
+
#ifdef OLD
|
893
|
+
const size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; /* static reduces speed for LZ4_decompress_safe() on GCC64 */
|
894
|
+
#else
|
742
895
|
const size_t dec32table[] = {4-0, 4-3, 4-2, 4-3, 4-0, 4-0, 4-0, 4-0}; /* static reduces speed for LZ4_decompress_safe() on GCC64 */
|
896
|
+
#endif
|
743
897
|
static const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
|
744
898
|
|
899
|
+
const int checkOffset = (endOnInput) && (dictSize < (int)(64 KB));
|
900
|
+
|
745
901
|
|
746
902
|
/* Special cases */
|
747
903
|
if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
|
@@ -759,12 +915,16 @@ FORCE_INLINE int LZ4_decompress_generic(
|
|
759
915
|
token = *ip++;
|
760
916
|
if ((length=(token>>ML_BITS)) == RUN_MASK)
|
761
917
|
{
|
762
|
-
unsigned s
|
763
|
-
|
918
|
+
unsigned s;
|
919
|
+
do
|
764
920
|
{
|
765
921
|
s = *ip++;
|
766
922
|
length += s;
|
767
923
|
}
|
924
|
+
while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255));
|
925
|
+
//if ((sizeof(void*)==4) && unlikely(length>LZ4_MAX_INPUT_SIZE)) goto _output_error; /* overflow detection */
|
926
|
+
if ((sizeof(void*)==4) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* quickfix issue 134 */
|
927
|
+
if ((endOnInput) && (sizeof(void*)==4) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* quickfix issue 134 */
|
768
928
|
}
|
769
929
|
|
770
930
|
/* copy literals */
|
@@ -791,18 +951,52 @@ FORCE_INLINE int LZ4_decompress_generic(
|
|
791
951
|
|
792
952
|
/* get offset */
|
793
953
|
LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
|
794
|
-
if ((
|
954
|
+
if ((checkOffset) && (unlikely(ref < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */
|
795
955
|
|
796
956
|
/* get matchlength */
|
797
957
|
if ((length=(token&ML_MASK)) == ML_MASK)
|
798
958
|
{
|
799
|
-
|
959
|
+
unsigned s;
|
960
|
+
do
|
800
961
|
{
|
801
|
-
|
962
|
+
if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
|
963
|
+
s = *ip++;
|
802
964
|
length += s;
|
803
|
-
|
804
|
-
|
965
|
+
} while (s==255);
|
966
|
+
//if ((sizeof(void*)==4) && unlikely(length>LZ4_MAX_INPUT_SIZE)) goto _output_error; /* overflow detection */
|
967
|
+
if ((sizeof(void*)==4) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* quickfix issue 134 */
|
968
|
+
}
|
969
|
+
|
970
|
+
/* check external dictionary */
|
971
|
+
if ((dict==usingExtDict) && (ref < (BYTE* const)dest))
|
972
|
+
{
|
973
|
+
if (unlikely(op+length+MINMATCH > oend-LASTLITERALS)) goto _output_error;
|
974
|
+
|
975
|
+
if (length+MINMATCH <= (size_t)(dest-(char*)ref))
|
976
|
+
{
|
977
|
+
ref = dictEnd - (dest-(char*)ref);
|
978
|
+
memcpy(op, ref, length+MINMATCH);
|
979
|
+
op += length+MINMATCH;
|
805
980
|
}
|
981
|
+
else
|
982
|
+
{
|
983
|
+
size_t copySize = (size_t)(dest-(char*)ref);
|
984
|
+
memcpy(op, dictEnd - copySize, copySize);
|
985
|
+
op += copySize;
|
986
|
+
copySize = length+MINMATCH - copySize;
|
987
|
+
if (copySize > (size_t)((char*)op-dest)) /* overlap */
|
988
|
+
{
|
989
|
+
BYTE* const cpy = op + copySize;
|
990
|
+
const BYTE* ref = (BYTE*)dest;
|
991
|
+
while (op < cpy) *op++ = *ref++;
|
992
|
+
}
|
993
|
+
else
|
994
|
+
{
|
995
|
+
memcpy(op, dest, copySize);
|
996
|
+
op += copySize;
|
997
|
+
}
|
998
|
+
}
|
999
|
+
continue;
|
806
1000
|
}
|
807
1001
|
|
808
1002
|
/* copy repeated sequence */
|
@@ -813,19 +1007,22 @@ FORCE_INLINE int LZ4_decompress_generic(
|
|
813
1007
|
op[1] = ref[1];
|
814
1008
|
op[2] = ref[2];
|
815
1009
|
op[3] = ref[3];
|
816
|
-
|
1010
|
+
#ifdef OLD
|
1011
|
+
op += 4, ref += 4; ref -= dec32table[op-ref];
|
817
1012
|
A32(op) = A32(ref);
|
818
|
-
op += STEPSIZE-4; ref -= dec64
|
1013
|
+
op += STEPSIZE-4; ref -= dec64;
|
1014
|
+
#else
|
819
1015
|
ref += dec32table[op-ref];
|
820
1016
|
A32(op+4) = A32(ref);
|
821
1017
|
op += STEPSIZE; ref -= dec64;
|
1018
|
+
#endif
|
822
1019
|
} else { LZ4_COPYSTEP(op,ref); }
|
823
1020
|
cpy = op + length - (STEPSIZE-4);
|
824
1021
|
|
825
1022
|
if (unlikely(cpy>oend-COPYLENGTH-(STEPSIZE-4)))
|
826
1023
|
{
|
827
1024
|
if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last 5 bytes must be literals */
|
828
|
-
|
1025
|
+
if (op<oend-COPYLENGTH) LZ4_WILDCOPY(op, ref, (oend-COPYLENGTH));
|
829
1026
|
while(op<cpy) *op++=*ref++;
|
830
1027
|
op=cpy;
|
831
1028
|
continue;
|
@@ -846,32 +1043,205 @@ _output_error:
|
|
846
1043
|
}
|
847
1044
|
|
848
1045
|
|
849
|
-
int LZ4_decompress_safe(const char* source, char* dest, int
|
1046
|
+
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxOutputSize)
|
850
1047
|
{
|
851
|
-
return LZ4_decompress_generic(source, dest,
|
1048
|
+
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, noDict, NULL, 0);
|
852
1049
|
}
|
853
1050
|
|
854
|
-
int
|
1051
|
+
int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxOutputSize)
|
855
1052
|
{
|
856
|
-
return LZ4_decompress_generic(source, dest,
|
1053
|
+
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, partial, targetOutputSize, noDict, NULL, 0);
|
857
1054
|
}
|
858
1055
|
|
859
|
-
int
|
1056
|
+
int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
|
860
1057
|
{
|
861
|
-
return LZ4_decompress_generic(source, dest,
|
1058
|
+
return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, NULL, 0);
|
862
1059
|
}
|
863
1060
|
|
864
|
-
|
1061
|
+
/* streaming decompression functions */
|
1062
|
+
|
1063
|
+
//#define LZ4_STREAMDECODESIZE_U32 4
|
1064
|
+
//#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U32 * sizeof(unsigned int))
|
1065
|
+
//typedef struct { unsigned int table[LZ4_STREAMDECODESIZE_U32]; } LZ4_streamDecode_t;
|
1066
|
+
typedef struct
|
865
1067
|
{
|
866
|
-
|
1068
|
+
const char* dictionary;
|
1069
|
+
int dictSize;
|
1070
|
+
} LZ4_streamDecode_t_internal;
|
1071
|
+
|
1072
|
+
/*
|
1073
|
+
* If you prefer dynamic allocation methods,
|
1074
|
+
* LZ4_createStreamDecode()
|
1075
|
+
* provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
|
1076
|
+
*/
|
1077
|
+
void* LZ4_createStreamDecode()
|
1078
|
+
{
|
1079
|
+
void* lz4s = ALLOCATOR(sizeof(U32), LZ4_STREAMDECODESIZE_U32);
|
1080
|
+
MEM_INIT(lz4s, 0, LZ4_STREAMDECODESIZE);
|
1081
|
+
return lz4s;
|
867
1082
|
}
|
868
1083
|
|
869
|
-
|
1084
|
+
/*
|
1085
|
+
* LZ4_setDictDecode
|
1086
|
+
* Use this function to instruct where to find the dictionary
|
1087
|
+
* This function is not necessary if previous data is still available where it was decoded.
|
1088
|
+
* Loading a size of 0 is allowed (same effect as no dictionary).
|
1089
|
+
* Return : 1 if OK, 0 if error
|
1090
|
+
*/
|
1091
|
+
int LZ4_setDictDecode (void* LZ4_streamDecode, const char* dictionary, int dictSize)
|
870
1092
|
{
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
return
|
875
|
-
#endif
|
1093
|
+
LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
|
1094
|
+
lz4sd->dictionary = dictionary;
|
1095
|
+
lz4sd->dictSize = dictSize;
|
1096
|
+
return 1;
|
876
1097
|
}
|
877
1098
|
|
1099
|
+
/*
|
1100
|
+
*_continue() :
|
1101
|
+
These decoding functions allow decompression of multiple blocks in "streaming" mode.
|
1102
|
+
Previously decoded blocks must still be available at the memory position where they were decoded.
|
1103
|
+
If it's not possible, save the relevant part of decoded data into a safe buffer,
|
1104
|
+
and indicate where it stands using LZ4_setDictDecode()
|
1105
|
+
*/
|
1106
|
+
int LZ4_decompress_safe_continue (void* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
|
1107
|
+
{
|
1108
|
+
LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
|
1109
|
+
int result;
|
1110
|
+
|
1111
|
+
result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, lz4sd->dictionary, lz4sd->dictSize);
|
1112
|
+
if (result <= 0) return result;
|
1113
|
+
if (lz4sd->dictionary + lz4sd->dictSize == dest)
|
1114
|
+
{
|
1115
|
+
lz4sd->dictSize += result;
|
1116
|
+
}
|
1117
|
+
else
|
1118
|
+
{
|
1119
|
+
lz4sd->dictionary = dest;
|
1120
|
+
lz4sd->dictSize = result;
|
1121
|
+
}
|
1122
|
+
|
1123
|
+
return result;
|
1124
|
+
}
|
1125
|
+
|
1126
|
+
int LZ4_decompress_fast_continue (void* LZ4_streamDecode, const char* source, char* dest, int originalSize)
|
1127
|
+
{
|
1128
|
+
LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
|
1129
|
+
int result;
|
1130
|
+
|
1131
|
+
result = LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, lz4sd->dictionary, lz4sd->dictSize);
|
1132
|
+
if (result <= 0) return result;
|
1133
|
+
if (lz4sd->dictionary + lz4sd->dictSize == dest)
|
1134
|
+
{
|
1135
|
+
lz4sd->dictSize += result;
|
1136
|
+
}
|
1137
|
+
else
|
1138
|
+
{
|
1139
|
+
lz4sd->dictionary = dest;
|
1140
|
+
lz4sd->dictSize = result;
|
1141
|
+
}
|
1142
|
+
|
1143
|
+
return result;
|
1144
|
+
}
|
1145
|
+
|
1146
|
+
|
1147
|
+
/*
|
1148
|
+
Advanced decoding functions :
|
1149
|
+
*_usingDict() :
|
1150
|
+
These decoding functions work the same as "_continue" ones,
|
1151
|
+
the dictionary must be explicitly provided within parameters
|
1152
|
+
*/
|
1153
|
+
|
1154
|
+
int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
|
1155
|
+
{
|
1156
|
+
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, dictStart, dictSize);
|
1157
|
+
}
|
1158
|
+
|
1159
|
+
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
|
1160
|
+
{
|
1161
|
+
return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, dictStart, dictSize);
|
1162
|
+
}
|
1163
|
+
|
1164
|
+
|
1165
|
+
/***************************************************
|
1166
|
+
Obsolete Functions
|
1167
|
+
***************************************************/
|
1168
|
+
/*
|
1169
|
+
These function names are deprecated and should no longer be used.
|
1170
|
+
They are only provided here for compatibility with older user programs.
|
1171
|
+
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
|
1172
|
+
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
|
1173
|
+
*/
|
1174
|
+
int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
|
1175
|
+
int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
|
1176
|
+
|
1177
|
+
|
1178
|
+
/* Obsolete Streaming functions */
|
1179
|
+
|
1180
|
+
int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
|
1181
|
+
|
1182
|
+
void LZ4_init(LZ4_stream_t_internal* lz4ds, const BYTE* base)
|
1183
|
+
{
|
1184
|
+
MEM_INIT(lz4ds, 0, LZ4_STREAMSIZE);
|
1185
|
+
lz4ds->bufferStart = base;
|
1186
|
+
}
|
1187
|
+
|
1188
|
+
int LZ4_resetStreamState(void* state, const char* inputBuffer)
|
1189
|
+
{
|
1190
|
+
if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
|
1191
|
+
LZ4_init((LZ4_stream_t_internal*)state, (const BYTE*)inputBuffer);
|
1192
|
+
return 0;
|
1193
|
+
}
|
1194
|
+
|
1195
|
+
void* LZ4_create (const char* inputBuffer)
|
1196
|
+
{
|
1197
|
+
void* lz4ds = ALLOCATOR(4, LZ4_STREAMSIZE_U32);
|
1198
|
+
LZ4_init ((LZ4_stream_t_internal*)lz4ds, (const BYTE*)inputBuffer);
|
1199
|
+
return lz4ds;
|
1200
|
+
}
|
1201
|
+
|
1202
|
+
char* LZ4_slideInputBuffer (void* LZ4_Data)
|
1203
|
+
{
|
1204
|
+
LZ4_stream_t_internal* lz4ds = (LZ4_stream_t_internal*)LZ4_Data;
|
1205
|
+
|
1206
|
+
LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)lz4ds->bufferStart, 64 KB);
|
1207
|
+
|
1208
|
+
return (char*)(lz4ds->bufferStart + 64 KB);
|
1209
|
+
}
|
1210
|
+
|
1211
|
+
/* Obsolete compresson functions using User-allocated state */
|
1212
|
+
|
1213
|
+
int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
|
1214
|
+
|
1215
|
+
int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize)
|
1216
|
+
{
|
1217
|
+
if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */
|
1218
|
+
MEM_INIT(state, 0, LZ4_STREAMSIZE);
|
1219
|
+
|
1220
|
+
if (inputSize < (int)LZ4_64KLIMIT)
|
1221
|
+
return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
|
1222
|
+
else
|
1223
|
+
return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
|
1224
|
+
}
|
1225
|
+
|
1226
|
+
int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
|
1227
|
+
{
|
1228
|
+
if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */
|
1229
|
+
MEM_INIT(state, 0, LZ4_STREAMSIZE);
|
1230
|
+
|
1231
|
+
if (inputSize < (int)LZ4_64KLIMIT)
|
1232
|
+
return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
|
1233
|
+
else
|
1234
|
+
return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
|
1235
|
+
}
|
1236
|
+
|
1237
|
+
/* Obsolete streaming decompression functions */
|
1238
|
+
|
1239
|
+
int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
|
1240
|
+
{
|
1241
|
+
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, NULL, 64 KB);
|
1242
|
+
}
|
1243
|
+
|
1244
|
+
int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
|
1245
|
+
{
|
1246
|
+
return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, NULL, 64 KB);
|
1247
|
+
}
|