extlz4 0.3.1 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +9 -4
  3. data/bin/extlz4 +1 -1
  4. data/contrib/lz4/NEWS +36 -0
  5. data/contrib/lz4/README.md +11 -12
  6. data/contrib/lz4/build/README.md +55 -0
  7. data/contrib/lz4/build/VS2010/datagen/datagen.vcxproj +169 -0
  8. data/contrib/lz4/build/VS2010/frametest/frametest.vcxproj +176 -0
  9. data/contrib/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +180 -0
  10. data/contrib/lz4/build/VS2010/fullbench/fullbench.vcxproj +176 -0
  11. data/contrib/lz4/build/VS2010/fuzzer/fuzzer.vcxproj +173 -0
  12. data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc +51 -0
  13. data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj +179 -0
  14. data/contrib/lz4/build/VS2010/liblz4/liblz4.vcxproj +175 -0
  15. data/contrib/lz4/build/VS2010/lz4.sln +98 -0
  16. data/contrib/lz4/build/VS2010/lz4/lz4.rc +51 -0
  17. data/contrib/lz4/build/VS2010/lz4/lz4.vcxproj +189 -0
  18. data/contrib/lz4/build/VS2017/datagen/datagen.vcxproj +173 -0
  19. data/contrib/lz4/build/VS2017/frametest/frametest.vcxproj +180 -0
  20. data/contrib/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj +184 -0
  21. data/contrib/lz4/build/VS2017/fullbench/fullbench.vcxproj +180 -0
  22. data/contrib/lz4/build/VS2017/fuzzer/fuzzer.vcxproj +177 -0
  23. data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc +51 -0
  24. data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj +183 -0
  25. data/contrib/lz4/build/VS2017/liblz4/liblz4.vcxproj +179 -0
  26. data/contrib/lz4/build/VS2017/lz4.sln +103 -0
  27. data/contrib/lz4/build/VS2017/lz4/lz4.rc +51 -0
  28. data/contrib/lz4/build/VS2017/lz4/lz4.vcxproj +164 -0
  29. data/contrib/lz4/build/cmake/CMakeLists.txt +235 -0
  30. data/contrib/lz4/lib/README.md +27 -10
  31. data/contrib/lz4/lib/lz4.c +327 -230
  32. data/contrib/lz4/lib/lz4.h +80 -70
  33. data/contrib/lz4/lib/lz4frame.c +93 -54
  34. data/contrib/lz4/lib/lz4frame.h +22 -14
  35. data/contrib/lz4/lib/lz4hc.c +192 -115
  36. data/contrib/lz4/lib/lz4hc.h +15 -40
  37. data/contrib/lz4/ossfuzz/Makefile +12 -8
  38. data/contrib/lz4/ossfuzz/compress_frame_fuzzer.c +11 -5
  39. data/contrib/lz4/ossfuzz/compress_fuzzer.c +9 -2
  40. data/contrib/lz4/ossfuzz/compress_hc_fuzzer.c +10 -3
  41. data/contrib/lz4/ossfuzz/decompress_frame_fuzzer.c +11 -3
  42. data/contrib/lz4/ossfuzz/decompress_fuzzer.c +6 -2
  43. data/contrib/lz4/ossfuzz/fuzz_data_producer.c +77 -0
  44. data/contrib/lz4/ossfuzz/fuzz_data_producer.h +36 -0
  45. data/contrib/lz4/ossfuzz/round_trip_frame_fuzzer.c +8 -4
  46. data/contrib/lz4/ossfuzz/round_trip_fuzzer.c +9 -2
  47. data/contrib/lz4/ossfuzz/round_trip_hc_fuzzer.c +7 -2
  48. data/contrib/lz4/ossfuzz/travisoss.sh +6 -1
  49. data/contrib/lz4/tmp +0 -0
  50. data/contrib/lz4/tmpsparse +0 -0
  51. data/ext/extlz4.c +2 -0
  52. data/ext/extlz4.h +5 -0
  53. data/ext/hashargs.c +1 -1
  54. data/ext/hashargs.h +1 -1
  55. data/gemstub.rb +3 -14
  56. data/lib/extlz4.rb +0 -2
  57. data/lib/extlz4/oldstream.rb +1 -1
  58. metadata +40 -25
  59. data/lib/extlz4/version.rb +0 -3
@@ -66,17 +66,22 @@ extern "C" {
66
66
  *****************************************************************/
67
67
  /* LZ4_DLL_EXPORT :
68
68
  * Enable exporting of functions when building a Windows DLL
69
- * LZ4FLIB_API :
69
+ * LZ4FLIB_VISIBILITY :
70
70
  * Control library symbols visibility.
71
71
  */
72
+ #ifndef LZ4FLIB_VISIBILITY
73
+ # if defined(__GNUC__) && (__GNUC__ >= 4)
74
+ # define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
75
+ # else
76
+ # define LZ4FLIB_VISIBILITY
77
+ # endif
78
+ #endif
72
79
  #if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
73
- # define LZ4FLIB_API __declspec(dllexport)
80
+ # define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
74
81
  #elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
75
- # define LZ4FLIB_API __declspec(dllimport)
76
- #elif defined(__GNUC__) && (__GNUC__ >= 4)
77
- # define LZ4FLIB_API __attribute__ ((__visibility__ ("default")))
82
+ # define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
78
83
  #else
79
- # define LZ4FLIB_API
84
+ # define LZ4FLIB_API LZ4FLIB_VISIBILITY
80
85
  #endif
81
86
 
82
87
  #ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS
@@ -103,7 +108,7 @@ LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return
103
108
 
104
109
  /*-************************************
105
110
  * Frame compression types
106
- **************************************/
111
+ ************************************* */
107
112
  /* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */
108
113
  #ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
109
114
  # define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
@@ -113,7 +118,8 @@ LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return
113
118
 
114
119
  /* The larger the block size, the (slightly) better the compression ratio,
115
120
  * though there are diminishing returns.
116
- * Larger blocks also increase memory usage on both compression and decompression sides. */
121
+ * Larger blocks also increase memory usage on both compression and decompression sides.
122
+ */
117
123
  typedef enum {
118
124
  LZ4F_default=0,
119
125
  LZ4F_max64KB=4,
@@ -284,7 +290,7 @@ LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
284
290
  * @return is always the same for a srcSize and prefsPtr.
285
291
  * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
286
292
  * tech details :
287
- * @return includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
293
+ * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
288
294
  * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
289
295
  * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
290
296
  */
@@ -376,7 +382,7 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
376
382
  * note : Frame header size is variable, but is guaranteed to be
377
383
  * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes.
378
384
  */
379
- size_t LZ4F_headerSize(const void* src, size_t srcSize);
385
+ LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
380
386
 
381
387
  /*! LZ4F_getFrameInfo() :
382
388
  * This function extracts frame parameters (max blockSize, dictID, etc.).
@@ -426,8 +432,10 @@ LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
426
432
  const void* srcBuffer, size_t* srcSizePtr);
427
433
 
428
434
  /*! LZ4F_decompress() :
429
- * Call this function repetitively to regenerate compressed data from `srcBuffer`.
430
- * The function will read up to *srcSizePtr bytes from srcBuffer,
435
+ * Call this function repetitively to regenerate data compressed in `srcBuffer`.
436
+ *
437
+ * The function requires a valid dctx state.
438
+ * It will read up to *srcSizePtr bytes from srcBuffer,
431
439
  * and decompress data into dstBuffer, of capacity *dstSizePtr.
432
440
  *
433
441
  * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value).
@@ -493,9 +501,9 @@ extern "C" {
493
501
  * Use at your own risk.
494
502
  */
495
503
  #ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
496
- #define LZ4FLIB_STATIC_API LZ4FLIB_API
504
+ # define LZ4FLIB_STATIC_API LZ4FLIB_API
497
505
  #else
498
- #define LZ4FLIB_STATIC_API
506
+ # define LZ4FLIB_STATIC_API
499
507
  #endif
500
508
 
501
509
 
@@ -53,7 +53,7 @@
53
53
  #include "lz4hc.h"
54
54
 
55
55
 
56
- /*=== Common LZ4 definitions ===*/
56
+ /*=== Common definitions ===*/
57
57
  #if defined(__GNUC__)
58
58
  # pragma GCC diagnostic ignored "-Wunused-function"
59
59
  #endif
@@ -61,15 +61,16 @@
61
61
  # pragma clang diagnostic ignored "-Wunused-function"
62
62
  #endif
63
63
 
64
- /*=== Enums ===*/
65
- typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
66
-
67
-
68
64
  #define LZ4_COMMONDEFS_ONLY
69
65
  #ifndef LZ4_SRC_INCLUDED
70
66
  #include "lz4.c" /* LZ4_count, constants, mem */
71
67
  #endif
72
68
 
69
+
70
+ /*=== Enums ===*/
71
+ typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
72
+
73
+
73
74
  /*=== Constants ===*/
74
75
  #define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
75
76
  #define LZ4_OPT_NUM (1<<12)
@@ -92,7 +93,7 @@ static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)
92
93
  **************************************/
93
94
  static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
94
95
  {
95
- MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
96
+ MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
96
97
  MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
97
98
  }
98
99
 
@@ -161,8 +162,7 @@ int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
161
162
  static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
162
163
  {
163
164
  size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
164
- if (bitsToRotate == 0)
165
- return pattern;
165
+ if (bitsToRotate == 0) return pattern;
166
166
  return LZ4HC_rotl32(pattern, (int)bitsToRotate);
167
167
  }
168
168
 
@@ -172,7 +172,8 @@ static unsigned
172
172
  LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
173
173
  {
174
174
  const BYTE* const iStart = ip;
175
- reg_t const pattern = (sizeof(pattern)==8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32;
175
+ reg_t const pattern = (sizeof(pattern)==8) ?
176
+ (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
176
177
 
177
178
  while (likely(ip < iEnd-(sizeof(pattern)-1))) {
178
179
  reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
@@ -270,7 +271,7 @@ LZ4HC_InsertAndGetWiderMatch (
270
271
  DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
271
272
  matchIndex, lowestMatchIndex);
272
273
 
273
- while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) {
274
+ while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
274
275
  int matchLength=0;
275
276
  nbAttempts--;
276
277
  assert(matchIndex < ipIndex);
@@ -389,8 +390,8 @@ LZ4HC_InsertAndGetWiderMatch (
389
390
  if (lookBackLength==0) { /* no back possible */
390
391
  size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
391
392
  if ((size_t)longest < maxML) {
392
- assert(base + matchIndex < ip);
393
- if (ip - (base+matchIndex) > LZ4_DISTANCE_MAX) break;
393
+ assert(base + matchIndex != ip);
394
+ if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
394
395
  assert(maxML < 2 GB);
395
396
  longest = (int)maxML;
396
397
  *matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
@@ -410,7 +411,7 @@ LZ4HC_InsertAndGetWiderMatch (
410
411
  } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
411
412
 
412
413
  if ( dict == usingDictCtxHc
413
- && nbAttempts
414
+ && nbAttempts > 0
414
415
  && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
415
416
  size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
416
417
  U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
@@ -460,74 +461,90 @@ int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index tabl
460
461
  * @return : 0 if ok,
461
462
  * 1 if buffer issue detected */
462
463
  LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
463
- const BYTE** ip,
464
- BYTE** op,
465
- const BYTE** anchor,
464
+ const BYTE** _ip,
465
+ BYTE** _op,
466
+ const BYTE** _anchor,
466
467
  int matchLength,
467
468
  const BYTE* const match,
468
469
  limitedOutput_directive limit,
469
470
  BYTE* oend)
470
471
  {
472
+ #define ip (*_ip)
473
+ #define op (*_op)
474
+ #define anchor (*_anchor)
475
+
471
476
  size_t length;
472
- BYTE* const token = (*op)++;
477
+ BYTE* const token = op++;
473
478
 
474
479
  #if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
475
480
  static const BYTE* start = NULL;
476
481
  static U32 totalCost = 0;
477
- U32 const pos = (start==NULL) ? 0 : (U32)(*anchor - start);
478
- U32 const ll = (U32)(*ip - *anchor);
482
+ U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
483
+ U32 const ll = (U32)(ip - anchor);
479
484
  U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
480
485
  U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
481
486
  U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
482
- if (start==NULL) start = *anchor; /* only works for single segment */
487
+ if (start==NULL) start = anchor; /* only works for single segment */
483
488
  /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
484
- DEBUGLOG(6, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u",
489
+ DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
485
490
  pos,
486
- (U32)(*ip - *anchor), matchLength, (U32)(*ip-match),
491
+ (U32)(ip - anchor), matchLength, (U32)(ip-match),
487
492
  cost, totalCost);
488
493
  totalCost += cost;
489
494
  #endif
490
495
 
491
496
  /* Encode Literal length */
492
- length = (size_t)(*ip - *anchor);
493
- if ((limit) && ((*op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1; /* Check output limit */
497
+ length = (size_t)(ip - anchor);
498
+ LZ4_STATIC_ASSERT(notLimited == 0);
499
+ /* Check output limit */
500
+ if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
501
+ DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
502
+ (int)length, (int)(oend - op));
503
+ return 1;
504
+ }
494
505
  if (length >= RUN_MASK) {
495
506
  size_t len = length - RUN_MASK;
496
507
  *token = (RUN_MASK << ML_BITS);
497
- for(; len >= 255 ; len -= 255) *(*op)++ = 255;
498
- *(*op)++ = (BYTE)len;
508
+ for(; len >= 255 ; len -= 255) *op++ = 255;
509
+ *op++ = (BYTE)len;
499
510
  } else {
500
511
  *token = (BYTE)(length << ML_BITS);
501
512
  }
502
513
 
503
514
  /* Copy Literals */
504
- LZ4_wildCopy8(*op, *anchor, (*op) + length);
505
- *op += length;
515
+ LZ4_wildCopy8(op, anchor, op + length);
516
+ op += length;
506
517
 
507
518
  /* Encode Offset */
508
- assert( (*ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
509
- LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2;
519
+ assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
520
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
510
521
 
511
522
  /* Encode MatchLength */
512
523
  assert(matchLength >= MINMATCH);
513
524
  length = (size_t)matchLength - MINMATCH;
514
- if ((limit) && (*op + (length / 255) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */
525
+ if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
526
+ DEBUGLOG(6, "Not enough room to write match length");
527
+ return 1; /* Check output limit */
528
+ }
515
529
  if (length >= ML_MASK) {
516
530
  *token += ML_MASK;
517
531
  length -= ML_MASK;
518
- for(; length >= 510 ; length -= 510) { *(*op)++ = 255; *(*op)++ = 255; }
519
- if (length >= 255) { length -= 255; *(*op)++ = 255; }
520
- *(*op)++ = (BYTE)length;
532
+ for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
533
+ if (length >= 255) { length -= 255; *op++ = 255; }
534
+ *op++ = (BYTE)length;
521
535
  } else {
522
536
  *token += (BYTE)(length);
523
537
  }
524
538
 
525
539
  /* Prepare next loop */
526
- *ip += matchLength;
527
- *anchor = *ip;
540
+ ip += matchLength;
541
+ anchor = ip;
528
542
 
529
543
  return 0;
530
544
  }
545
+ #undef ip
546
+ #undef op
547
+ #undef anchor
531
548
 
532
549
  LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
533
550
  LZ4HC_CCtx_internal* const ctx,
@@ -535,7 +552,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
535
552
  char* const dest,
536
553
  int* srcSizePtr,
537
554
  int const maxOutputSize,
538
- unsigned maxNbAttempts,
555
+ int maxNbAttempts,
539
556
  const limitedOutput_directive limit,
540
557
  const dictCtx_directive dict
541
558
  )
@@ -565,7 +582,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
565
582
  /* init */
566
583
  *srcSizePtr = 0;
567
584
  if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
568
- if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
585
+ if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
569
586
 
570
587
  /* Main Loop */
571
588
  while (ip <= mflimit) {
@@ -637,7 +654,11 @@ _Search3:
637
654
  if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
638
655
  ip = start2;
639
656
  optr = op;
640
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) goto _dest_overflow;
657
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
658
+ ml = ml2;
659
+ ref = ref2;
660
+ goto _dest_overflow;
661
+ }
641
662
  continue;
642
663
  }
643
664
 
@@ -709,17 +730,18 @@ _Search3:
709
730
  _last_literals:
710
731
  /* Encode Last Literals */
711
732
  { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
712
- size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
713
- size_t const totalSize = 1 + litLength + lastRunSize;
733
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
734
+ size_t const totalSize = 1 + llAdd + lastRunSize;
714
735
  if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
715
736
  if (limit && (op + totalSize > oend)) {
716
- if (limit == limitedOutput) return 0; /* Check output limit */
737
+ if (limit == limitedOutput) return 0;
717
738
  /* adapt lastRunSize to fill 'dest' */
718
- lastRunSize = (size_t)(oend - op) - 1;
719
- litLength = (lastRunSize + 255 - RUN_MASK) / 255;
720
- lastRunSize -= litLength;
739
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
740
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
741
+ lastRunSize -= llAdd;
721
742
  }
722
- ip = anchor + lastRunSize;
743
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
744
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
723
745
 
724
746
  if (lastRunSize >= RUN_MASK) {
725
747
  size_t accumulator = lastRunSize - RUN_MASK;
@@ -739,9 +761,25 @@ _last_literals:
739
761
 
740
762
  _dest_overflow:
741
763
  if (limit == fillOutput) {
764
+ /* Assumption : ip, anchor, ml and ref must be set correctly */
765
+ size_t const ll = (size_t)(ip - anchor);
766
+ size_t const ll_addbytes = (ll + 240) / 255;
767
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
768
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
769
+ DEBUGLOG(6, "Last sequence overflowing");
742
770
  op = optr; /* restore correct out pointer */
771
+ if (op + ll_totalCost <= maxLitPos) {
772
+ /* ll validated; now adjust match length */
773
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
774
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
775
+ assert(maxMlSize < INT_MAX); assert(ml >= 0);
776
+ if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
777
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
778
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
779
+ } }
743
780
  goto _last_literals;
744
781
  }
782
+ /* compression failed */
745
783
  return 0;
746
784
  }
747
785
 
@@ -752,7 +790,7 @@ static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
752
790
  int const nbSearches, size_t sufficient_len,
753
791
  const limitedOutput_directive limit, int const fullUpdate,
754
792
  const dictCtx_directive dict,
755
- HCfavor_e favorDecSpeed);
793
+ const HCfavor_e favorDecSpeed);
756
794
 
757
795
 
758
796
  LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
@@ -769,7 +807,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
769
807
  typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
770
808
  typedef struct {
771
809
  lz4hc_strat_e strat;
772
- U32 nbSearches;
810
+ int nbSearches;
773
811
  U32 targetLength;
774
812
  } cParams_t;
775
813
  static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
@@ -788,7 +826,8 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
788
826
  { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
789
827
  };
790
828
 
791
- DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d)", ctx, src, *srcSizePtr);
829
+ DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
830
+ ctx, src, *srcSizePtr, limit);
792
831
 
793
832
  if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
794
833
  if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
@@ -808,7 +847,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
808
847
  assert(cParam.strat == lz4opt);
809
848
  result = LZ4HC_compress_optimal(ctx,
810
849
  src, dst, srcSizePtr, dstCapacity,
811
- (int)cParam.nbSearches, cParam.targetLength, limit,
850
+ cParam.nbSearches, cParam.targetLength, limit,
812
851
  cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
813
852
  dict, favor);
814
853
  }
@@ -881,27 +920,22 @@ LZ4HC_compress_generic (
881
920
 
882
921
  int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
883
922
 
884
- #ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
885
- * it reports an aligment of 8-bytes,
886
- * while actually aligning LZ4_streamHC_t on 4 bytes. */
887
923
  static size_t LZ4_streamHC_t_alignment(void)
888
924
  {
889
- struct { char c; LZ4_streamHC_t t; } t_a;
890
- return sizeof(t_a) - sizeof(t_a.t);
891
- }
925
+ #if LZ4_ALIGN_TEST
926
+ typedef struct { char c; LZ4_streamHC_t t; } t_a;
927
+ return sizeof(t_a) - sizeof(LZ4_streamHC_t);
928
+ #else
929
+ return 1; /* effectively disabled */
892
930
  #endif
931
+ }
893
932
 
894
933
  /* state is presumed correctly initialized,
895
934
  * in which case its size and alignment have already been validate */
896
935
  int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
897
936
  {
898
937
  LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
899
- #ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
900
- * it reports an aligment of 8-bytes,
901
- * while actually aligning LZ4_streamHC_t on 4 bytes. */
902
- assert(((size_t)state & (LZ4_streamHC_t_alignment() - 1)) == 0); /* check alignment */
903
- #endif
904
- if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
938
+ if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
905
939
  LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
906
940
  LZ4HC_init_internal (ctx, (const BYTE*)src);
907
941
  if (dstCapacity < LZ4_compressBound(srcSize))
@@ -950,10 +984,11 @@ int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* s
950
984
  /* allocation */
951
985
  LZ4_streamHC_t* LZ4_createStreamHC(void)
952
986
  {
953
- LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
954
- if (LZ4_streamHCPtr==NULL) return NULL;
955
- LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); /* full initialization, malloc'ed buffer can be full of garbage */
956
- return LZ4_streamHCPtr;
987
+ LZ4_streamHC_t* const state =
988
+ (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
989
+ if (state == NULL) return NULL;
990
+ LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
991
+ return state;
957
992
  }
958
993
 
959
994
  int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
@@ -968,22 +1003,16 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
968
1003
  LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
969
1004
  {
970
1005
  LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
971
- if (buffer == NULL) return NULL;
972
- if (size < sizeof(LZ4_streamHC_t)) return NULL;
973
- #ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
974
- * it reports an aligment of 8-bytes,
975
- * while actually aligning LZ4_streamHC_t on 4 bytes. */
976
- if (((size_t)buffer) & (LZ4_streamHC_t_alignment() - 1)) return NULL; /* alignment check */
977
- #endif
978
1006
  /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
979
1007
  LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
980
- DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", LZ4_streamHCPtr, (unsigned)size);
981
- /* end-base will trigger a clearTable on starting compression */
982
- LZ4_streamHCPtr->internal_donotuse.end = (const BYTE *)(ptrdiff_t)-1;
983
- LZ4_streamHCPtr->internal_donotuse.base = NULL;
984
- LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
985
- LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = 0;
986
- LZ4_streamHCPtr->internal_donotuse.dirty = 0;
1008
+ DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
1009
+ /* check conditions */
1010
+ if (buffer == NULL) return NULL;
1011
+ if (size < sizeof(LZ4_streamHC_t)) return NULL;
1012
+ if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
1013
+ /* init */
1014
+ { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
1015
+ MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
987
1016
  LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
988
1017
  return LZ4_streamHCPtr;
989
1018
  }
@@ -1028,7 +1057,7 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
1028
1057
  const char* dictionary, int dictSize)
1029
1058
  {
1030
1059
  LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
1031
- DEBUGLOG(4, "LZ4_loadDictHC(%p, %p, %d)", LZ4_streamHCPtr, dictionary, dictSize);
1060
+ DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
1032
1061
  assert(LZ4_streamHCPtr != NULL);
1033
1062
  if (dictSize > 64 KB) {
1034
1063
  dictionary += (size_t)dictSize - 64 KB;
@@ -1069,14 +1098,15 @@ static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBl
1069
1098
  ctxPtr->dictCtx = NULL;
1070
1099
  }
1071
1100
 
1072
- static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
1073
- const char* src, char* dst,
1074
- int* srcSizePtr, int dstCapacity,
1075
- limitedOutput_directive limit)
1101
+ static int
1102
+ LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
1103
+ const char* src, char* dst,
1104
+ int* srcSizePtr, int dstCapacity,
1105
+ limitedOutput_directive limit)
1076
1106
  {
1077
1107
  LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
1078
- DEBUGLOG(4, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d)",
1079
- LZ4_streamHCPtr, src, *srcSizePtr);
1108
+ DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
1109
+ LZ4_streamHCPtr, src, *srcSizePtr, limit);
1080
1110
  assert(ctxPtr != NULL);
1081
1111
  /* auto-init if forgotten */
1082
1112
  if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
@@ -1100,8 +1130,7 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
1100
1130
  if (sourceEnd > dictEnd) sourceEnd = dictEnd;
1101
1131
  ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
1102
1132
  if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
1103
- }
1104
- }
1133
+ } }
1105
1134
 
1106
1135
  return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
1107
1136
  }
@@ -1121,23 +1150,30 @@ int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const ch
1121
1150
 
1122
1151
 
1123
1152
 
1124
- /* dictionary saving */
1125
-
1153
+ /* LZ4_saveDictHC :
1154
+ * save history content
1155
+ * into a user-provided buffer
1156
+ * which is then used to continue compression
1157
+ */
1126
1158
  int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
1127
1159
  {
1128
1160
  LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
1129
1161
  int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
1130
- DEBUGLOG(4, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
1162
+ DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
1163
+ assert(prefixSize >= 0);
1131
1164
  if (dictSize > 64 KB) dictSize = 64 KB;
1132
1165
  if (dictSize < 4) dictSize = 0;
1133
1166
  if (dictSize > prefixSize) dictSize = prefixSize;
1134
- memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
1167
+ if (safeBuffer == NULL) assert(dictSize == 0);
1168
+ if (dictSize > 0)
1169
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
1135
1170
  { U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
1136
1171
  streamPtr->end = (const BYTE*)safeBuffer + dictSize;
1137
1172
  streamPtr->base = streamPtr->end - endIndex;
1138
1173
  streamPtr->dictLimit = endIndex - (U32)dictSize;
1139
1174
  streamPtr->lowLimit = endIndex - (U32)dictSize;
1140
- if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit;
1175
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
1176
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
1141
1177
  }
1142
1178
  return dictSize;
1143
1179
  }
@@ -1287,8 +1323,13 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1287
1323
  const dictCtx_directive dict,
1288
1324
  const HCfavor_e favorDecSpeed)
1289
1325
  {
1326
+ int retval = 0;
1290
1327
  #define TRAILING_LITERALS 3
1328
+ #ifdef LZ4HC_HEAPMODE
1329
+ LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
1330
+ #else
1291
1331
  LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
1332
+ #endif
1292
1333
 
1293
1334
  const BYTE* ip = (const BYTE*) source;
1294
1335
  const BYTE* anchor = ip;
@@ -1298,15 +1339,19 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1298
1339
  BYTE* op = (BYTE*) dst;
1299
1340
  BYTE* opSaved = (BYTE*) dst;
1300
1341
  BYTE* oend = op + dstCapacity;
1342
+ int ovml = MINMATCH; /* overflow - last sequence */
1343
+ const BYTE* ovref = NULL;
1301
1344
 
1302
1345
  /* init */
1346
+ #ifdef LZ4HC_HEAPMODE
1347
+ if (opt == NULL) goto _return_label;
1348
+ #endif
1303
1349
  DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
1304
1350
  *srcSizePtr = 0;
1305
1351
  if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
1306
1352
  if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
1307
1353
 
1308
1354
  /* Main Loop */
1309
- assert(ip - anchor < LZ4_MAX_INPUT_SIZE);
1310
1355
  while (ip <= mflimit) {
1311
1356
  int const llen = (int)(ip - anchor);
1312
1357
  int best_mlen, best_off;
@@ -1320,8 +1365,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1320
1365
  int const firstML = firstMatch.len;
1321
1366
  const BYTE* const matchPos = ip - firstMatch.off;
1322
1367
  opSaved = op;
1323
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */
1368
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
1369
+ ovml = firstML;
1370
+ ovref = matchPos;
1324
1371
  goto _dest_overflow;
1372
+ }
1325
1373
  continue;
1326
1374
  }
1327
1375
 
@@ -1463,7 +1511,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1463
1511
  best_off = opt[last_match_pos].off;
1464
1512
  cur = last_match_pos - best_mlen;
1465
1513
 
1466
- encode: /* cur, last_match_pos, best_mlen, best_off must be set */
1514
+ encode: /* cur, last_match_pos, best_mlen, best_off must be set */
1467
1515
  assert(cur < LZ4_OPT_NUM);
1468
1516
  assert(last_match_pos >= 1); /* == 1 when only one candidate */
1469
1517
  DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
@@ -1493,25 +1541,31 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1493
1541
  assert(ml >= MINMATCH);
1494
1542
  assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
1495
1543
  opSaved = op;
1496
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */
1544
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
1545
+ ovml = ml;
1546
+ ovref = ip - offset;
1497
1547
  goto _dest_overflow;
1498
- } }
1548
+ } } }
1499
1549
  } /* while (ip <= mflimit) */
1500
1550
 
1501
- _last_literals:
1551
+ _last_literals:
1502
1552
  /* Encode Last Literals */
1503
1553
  { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
1504
- size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
1505
- size_t const totalSize = 1 + litLength + lastRunSize;
1554
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
1555
+ size_t const totalSize = 1 + llAdd + lastRunSize;
1506
1556
  if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
1507
1557
  if (limit && (op + totalSize > oend)) {
1508
- if (limit == limitedOutput) return 0; /* Check output limit */
1558
+ if (limit == limitedOutput) { /* Check output limit */
1559
+ retval = 0;
1560
+ goto _return_label;
1561
+ }
1509
1562
  /* adapt lastRunSize to fill 'dst' */
1510
- lastRunSize = (size_t)(oend - op) - 1;
1511
- litLength = (lastRunSize + 255 - RUN_MASK) / 255;
1512
- lastRunSize -= litLength;
1563
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
1564
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
1565
+ lastRunSize -= llAdd;
1513
1566
  }
1514
- ip = anchor + lastRunSize;
1567
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
1568
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
1515
1569
 
1516
1570
  if (lastRunSize >= RUN_MASK) {
1517
1571
  size_t accumulator = lastRunSize - RUN_MASK;
@@ -1527,12 +1581,35 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1527
1581
 
1528
1582
  /* End */
1529
1583
  *srcSizePtr = (int) (((const char*)ip) - source);
1530
- return (int) ((char*)op-dst);
1584
+ retval = (int) ((char*)op-dst);
1585
+ goto _return_label;
1531
1586
 
1532
- _dest_overflow:
1533
- if (limit == fillOutput) {
1534
- op = opSaved; /* restore correct out pointer */
1535
- goto _last_literals;
1536
- }
1537
- return 0;
1538
- }
1587
+ _dest_overflow:
1588
+ if (limit == fillOutput) {
1589
+ /* Assumption : ip, anchor, ovml and ovref must be set correctly */
1590
+ size_t const ll = (size_t)(ip - anchor);
1591
+ size_t const ll_addbytes = (ll + 240) / 255;
1592
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
1593
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
1594
+ DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
1595
+ op = opSaved; /* restore correct out pointer */
1596
+ if (op + ll_totalCost <= maxLitPos) {
1597
+ /* ll validated; now adjust match length */
1598
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
1599
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
1600
+ assert(maxMlSize < INT_MAX); assert(ovml >= 0);
1601
+ if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
1602
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
1603
+ DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
1604
+ DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
1605
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
1606
+ DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
1607
+ } }
1608
+ goto _last_literals;
1609
+ }
1610
+ _return_label:
1611
+ #ifdef LZ4HC_HEAPMODE
1612
+ FREEMEM(opt);
1613
+ #endif
1614
+ return retval;
1615
+ }