isomorfeus-ferret 0.17.2 → 0.17.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (126) hide show
  1. checksums.yaml +4 -4
  2. data/ext/isomorfeus_ferret_ext/benchmark.c +9 -20
  3. data/ext/isomorfeus_ferret_ext/benchmarks_all.h +1 -2
  4. data/ext/isomorfeus_ferret_ext/bm_hash.c +1 -2
  5. data/ext/isomorfeus_ferret_ext/brotli_dec_decode.c +4 -2
  6. data/ext/isomorfeus_ferret_ext/brotli_enc_encode.c +3 -2
  7. data/ext/isomorfeus_ferret_ext/frb_analysis.c +4 -5
  8. data/ext/isomorfeus_ferret_ext/frb_field_info.c +3 -4
  9. data/ext/isomorfeus_ferret_ext/frb_index.c +118 -125
  10. data/ext/isomorfeus_ferret_ext/frb_lazy_doc.c +14 -16
  11. data/ext/isomorfeus_ferret_ext/frb_search.c +31 -23
  12. data/ext/isomorfeus_ferret_ext/frb_store.c +27 -13
  13. data/ext/isomorfeus_ferret_ext/frb_utils.c +3 -6
  14. data/ext/isomorfeus_ferret_ext/frt_analysis.c +39 -46
  15. data/ext/isomorfeus_ferret_ext/frt_analysis.h +9 -9
  16. data/ext/isomorfeus_ferret_ext/frt_array.c +11 -22
  17. data/ext/isomorfeus_ferret_ext/frt_bitvector.h +3 -6
  18. data/ext/isomorfeus_ferret_ext/frt_doc_field.c +87 -0
  19. data/ext/isomorfeus_ferret_ext/frt_doc_field.h +26 -0
  20. data/ext/isomorfeus_ferret_ext/frt_document.c +4 -97
  21. data/ext/isomorfeus_ferret_ext/frt_document.h +2 -27
  22. data/ext/isomorfeus_ferret_ext/frt_except.c +8 -6
  23. data/ext/isomorfeus_ferret_ext/frt_except.h +1 -2
  24. data/ext/isomorfeus_ferret_ext/frt_field_index.c +13 -32
  25. data/ext/isomorfeus_ferret_ext/frt_field_index.h +0 -6
  26. data/ext/isomorfeus_ferret_ext/frt_field_info.c +69 -0
  27. data/ext/isomorfeus_ferret_ext/frt_field_info.h +49 -0
  28. data/ext/isomorfeus_ferret_ext/frt_field_infos.c +196 -0
  29. data/ext/isomorfeus_ferret_ext/frt_field_infos.h +35 -0
  30. data/ext/isomorfeus_ferret_ext/frt_global.c +10 -4
  31. data/ext/isomorfeus_ferret_ext/frt_global.h +11 -15
  32. data/ext/isomorfeus_ferret_ext/frt_hash.c +8 -8
  33. data/ext/isomorfeus_ferret_ext/frt_hash.h +1 -2
  34. data/ext/isomorfeus_ferret_ext/frt_hashset.c +20 -40
  35. data/ext/isomorfeus_ferret_ext/frt_hashset.h +1 -2
  36. data/ext/isomorfeus_ferret_ext/frt_helper.c +7 -15
  37. data/ext/isomorfeus_ferret_ext/frt_in_stream.c +35 -45
  38. data/ext/isomorfeus_ferret_ext/frt_in_stream.h +3 -2
  39. data/ext/isomorfeus_ferret_ext/frt_ind.c +20 -38
  40. data/ext/isomorfeus_ferret_ext/frt_index.c +292 -790
  41. data/ext/isomorfeus_ferret_ext/frt_index.h +1 -102
  42. data/ext/isomorfeus_ferret_ext/frt_lang.c +5 -10
  43. data/ext/isomorfeus_ferret_ext/frt_lazy_doc.c +3 -3
  44. data/ext/isomorfeus_ferret_ext/frt_lazy_doc.h +1 -1
  45. data/ext/isomorfeus_ferret_ext/frt_lazy_doc_field.c +18 -25
  46. data/ext/isomorfeus_ferret_ext/frt_lazy_doc_field.h +5 -5
  47. data/ext/isomorfeus_ferret_ext/frt_mdbx_store.c +102 -70
  48. data/ext/isomorfeus_ferret_ext/frt_mempool.c +8 -16
  49. data/ext/isomorfeus_ferret_ext/frt_multimapper.c +23 -46
  50. data/ext/isomorfeus_ferret_ext/frt_multimapper.h +4 -8
  51. data/ext/isomorfeus_ferret_ext/frt_out_stream.c +31 -43
  52. data/ext/isomorfeus_ferret_ext/frt_out_stream.h +2 -2
  53. data/ext/isomorfeus_ferret_ext/frt_posh.c +6 -819
  54. data/ext/isomorfeus_ferret_ext/frt_posh.h +0 -57
  55. data/ext/isomorfeus_ferret_ext/frt_priorityqueue.c +11 -22
  56. data/ext/isomorfeus_ferret_ext/frt_priorityqueue.h +1 -2
  57. data/ext/isomorfeus_ferret_ext/frt_q_boolean.c +85 -171
  58. data/ext/isomorfeus_ferret_ext/frt_q_match_all.c +8 -16
  59. data/ext/isomorfeus_ferret_ext/frt_q_multi_term.c +1 -2
  60. data/ext/isomorfeus_ferret_ext/frt_q_parser.c +49 -98
  61. data/ext/isomorfeus_ferret_ext/frt_q_phrase.c +52 -104
  62. data/ext/isomorfeus_ferret_ext/frt_q_range.c +6 -12
  63. data/ext/isomorfeus_ferret_ext/frt_q_span.c +113 -226
  64. data/ext/isomorfeus_ferret_ext/frt_q_wildcard.c +1 -2
  65. data/ext/isomorfeus_ferret_ext/frt_ram_store.c +134 -85
  66. data/ext/isomorfeus_ferret_ext/frt_search.c +82 -164
  67. data/ext/isomorfeus_ferret_ext/frt_similarity.c +11 -22
  68. data/ext/isomorfeus_ferret_ext/frt_similarity.h +1 -2
  69. data/ext/isomorfeus_ferret_ext/frt_store.c +13 -25
  70. data/ext/isomorfeus_ferret_ext/frt_store.h +86 -52
  71. data/ext/isomorfeus_ferret_ext/frt_term_vectors.c +8 -16
  72. data/ext/isomorfeus_ferret_ext/frt_win32.h +5 -10
  73. data/ext/isomorfeus_ferret_ext/isomorfeus_ferret.c +12 -11
  74. data/ext/isomorfeus_ferret_ext/isomorfeus_ferret.h +11 -13
  75. data/ext/isomorfeus_ferret_ext/lz4.c +422 -195
  76. data/ext/isomorfeus_ferret_ext/lz4.h +114 -46
  77. data/ext/isomorfeus_ferret_ext/lz4frame.c +421 -242
  78. data/ext/isomorfeus_ferret_ext/lz4frame.h +122 -53
  79. data/ext/isomorfeus_ferret_ext/lz4hc.c +127 -111
  80. data/ext/isomorfeus_ferret_ext/lz4hc.h +14 -14
  81. data/ext/isomorfeus_ferret_ext/lz4xxhash.h +1 -1
  82. data/ext/isomorfeus_ferret_ext/mdbx.c +3762 -2526
  83. data/ext/isomorfeus_ferret_ext/mdbx.h +115 -70
  84. data/ext/isomorfeus_ferret_ext/test.c +40 -87
  85. data/ext/isomorfeus_ferret_ext/test.h +3 -6
  86. data/ext/isomorfeus_ferret_ext/test_1710.c +11 -13
  87. data/ext/isomorfeus_ferret_ext/test_analysis.c +32 -64
  88. data/ext/isomorfeus_ferret_ext/test_array.c +6 -12
  89. data/ext/isomorfeus_ferret_ext/test_bitvector.c +12 -24
  90. data/ext/isomorfeus_ferret_ext/test_document.c +23 -33
  91. data/ext/isomorfeus_ferret_ext/test_except.c +10 -21
  92. data/ext/isomorfeus_ferret_ext/test_fields.c +62 -68
  93. data/ext/isomorfeus_ferret_ext/test_file_deleter.c +15 -23
  94. data/ext/isomorfeus_ferret_ext/test_filter.c +17 -27
  95. data/ext/isomorfeus_ferret_ext/test_global.c +14 -29
  96. data/ext/isomorfeus_ferret_ext/test_hash.c +19 -38
  97. data/ext/isomorfeus_ferret_ext/test_hashset.c +8 -16
  98. data/ext/isomorfeus_ferret_ext/test_helper.c +4 -8
  99. data/ext/isomorfeus_ferret_ext/test_highlighter.c +16 -28
  100. data/ext/isomorfeus_ferret_ext/test_index.c +277 -487
  101. data/ext/isomorfeus_ferret_ext/test_lang.c +7 -14
  102. data/ext/isomorfeus_ferret_ext/test_mdbx_store.c +2 -5
  103. data/ext/isomorfeus_ferret_ext/test_mempool.c +5 -10
  104. data/ext/isomorfeus_ferret_ext/test_multimapper.c +3 -6
  105. data/ext/isomorfeus_ferret_ext/test_priorityqueue.c +9 -18
  106. data/ext/isomorfeus_ferret_ext/test_q_const_score.c +4 -6
  107. data/ext/isomorfeus_ferret_ext/test_q_filtered.c +3 -4
  108. data/ext/isomorfeus_ferret_ext/test_q_fuzzy.c +9 -15
  109. data/ext/isomorfeus_ferret_ext/test_q_parser.c +8 -16
  110. data/ext/isomorfeus_ferret_ext/test_q_span.c +19 -35
  111. data/ext/isomorfeus_ferret_ext/test_ram_store.c +14 -13
  112. data/ext/isomorfeus_ferret_ext/test_search.c +60 -109
  113. data/ext/isomorfeus_ferret_ext/test_segments.c +8 -13
  114. data/ext/isomorfeus_ferret_ext/test_similarity.c +2 -4
  115. data/ext/isomorfeus_ferret_ext/test_sort.c +14 -24
  116. data/ext/isomorfeus_ferret_ext/test_store.c +96 -115
  117. data/ext/isomorfeus_ferret_ext/test_term.c +9 -15
  118. data/ext/isomorfeus_ferret_ext/test_term_vectors.c +9 -14
  119. data/ext/isomorfeus_ferret_ext/test_test.c +4 -8
  120. data/ext/isomorfeus_ferret_ext/test_threading.c +14 -20
  121. data/ext/isomorfeus_ferret_ext/testhelper.c +11 -21
  122. data/ext/isomorfeus_ferret_ext/testhelper.h +1 -1
  123. data/ext/isomorfeus_ferret_ext/tests_all.h +1 -2
  124. data/lib/isomorfeus/ferret/index/index.rb +1 -1
  125. data/lib/isomorfeus/ferret/version.rb +1 -1
  126. metadata +24 -4
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  LZ4 HC - High Compression Mode of LZ4
3
- Copyright (C) 2011-2017, Yann Collet.
3
+ Copyright (C) 2011-2020, Yann Collet.
4
4
 
5
5
  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
6
 
@@ -42,7 +42,7 @@
42
42
  * Select how default compression function will allocate workplace memory,
43
43
  * in stack (0:fastest), or in heap (1:requires malloc()).
44
44
  * Since workplace is rather large, heap mode is recommended.
45
- */
45
+ **/
46
46
  #ifndef LZ4HC_HEAPMODE
47
47
  # define LZ4HC_HEAPMODE 1
48
48
  #endif
@@ -99,18 +99,20 @@ static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
99
99
 
100
100
  static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
101
101
  {
102
- uptrval startingOffset = (uptrval)(hc4->end - hc4->base);
103
- if (startingOffset > 1 GB) {
102
+ size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
103
+ size_t newStartingOffset = bufferSize + hc4->dictLimit;
104
+ assert(newStartingOffset >= bufferSize); /* check overflow */
105
+ if (newStartingOffset > 1 GB) {
104
106
  LZ4HC_clearTables(hc4);
105
- startingOffset = 0;
107
+ newStartingOffset = 0;
106
108
  }
107
- startingOffset += 64 KB;
108
- hc4->nextToUpdate = (U32) startingOffset;
109
- hc4->base = start - startingOffset;
109
+ newStartingOffset += 64 KB;
110
+ hc4->nextToUpdate = (U32)newStartingOffset;
111
+ hc4->prefixStart = start;
110
112
  hc4->end = start;
111
- hc4->dictBase = start - startingOffset;
112
- hc4->dictLimit = (U32) startingOffset;
113
- hc4->lowLimit = (U32) startingOffset;
113
+ hc4->dictStart = start;
114
+ hc4->dictLimit = (U32)newStartingOffset;
115
+ hc4->lowLimit = (U32)newStartingOffset;
114
116
  }
115
117
 
116
118
 
@@ -119,12 +121,15 @@ LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
119
121
  {
120
122
  U16* const chainTable = hc4->chainTable;
121
123
  U32* const hashTable = hc4->hashTable;
122
- const BYTE* const base = hc4->base;
123
- U32 const target = (U32)(ip - base);
124
+ const BYTE* const prefixPtr = hc4->prefixStart;
125
+ U32 const prefixIdx = hc4->dictLimit;
126
+ U32 const target = (U32)(ip - prefixPtr) + prefixIdx;
124
127
  U32 idx = hc4->nextToUpdate;
128
+ assert(ip >= prefixPtr);
129
+ assert(target >= prefixIdx);
125
130
 
126
131
  while (idx < target) {
127
- U32 const h = LZ4HC_hashPtr(base+idx);
132
+ U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx);
128
133
  size_t delta = idx - hashTable[h];
129
134
  if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
130
135
  DELTANEXTU16(chainTable, idx) = (U16)delta;
@@ -193,15 +198,14 @@ LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
193
198
  BYTE const byte = (BYTE)(pattern >> bitOffset);
194
199
  if (*ip != byte) break;
195
200
  ip ++; bitOffset -= 8;
196
- }
197
- }
201
+ } }
198
202
 
199
203
  return (unsigned)(ip - iStart);
200
204
  }
201
205
 
202
206
  /* LZ4HC_reverseCountPattern() :
203
207
  * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
204
- * read using natural platform endianess */
208
+ * read using natural platform endianness */
205
209
  static unsigned
206
210
  LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
207
211
  {
@@ -211,7 +215,7 @@ LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
211
215
  if (LZ4_read32(ip-4) != pattern) break;
212
216
  ip -= 4;
213
217
  }
214
- { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */
218
+ { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */
215
219
  while (likely(ip>iLow)) {
216
220
  if (ip[-1] != *bytePtr) break;
217
221
  ip--; bytePtr--;
@@ -234,28 +238,28 @@ typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
234
238
 
235
239
  LZ4_FORCE_INLINE int
236
240
  LZ4HC_InsertAndGetWiderMatch (
237
- LZ4HC_CCtx_internal* hc4,
238
- const BYTE* const ip,
239
- const BYTE* const iLowLimit,
240
- const BYTE* const iHighLimit,
241
- int longest,
242
- const BYTE** matchpos,
243
- const BYTE** startpos,
244
- const int maxNbAttempts,
245
- const int patternAnalysis,
246
- const int chainSwap,
247
- const dictCtx_directive dict,
248
- const HCfavor_e favorDecSpeed)
241
+ LZ4HC_CCtx_internal* const hc4,
242
+ const BYTE* const ip,
243
+ const BYTE* const iLowLimit, const BYTE* const iHighLimit,
244
+ int longest,
245
+ const BYTE** matchpos,
246
+ const BYTE** startpos,
247
+ const int maxNbAttempts,
248
+ const int patternAnalysis, const int chainSwap,
249
+ const dictCtx_directive dict,
250
+ const HCfavor_e favorDecSpeed)
249
251
  {
250
252
  U16* const chainTable = hc4->chainTable;
251
253
  U32* const HashTable = hc4->hashTable;
252
254
  const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
253
- const BYTE* const base = hc4->base;
254
- const U32 dictLimit = hc4->dictLimit;
255
- const BYTE* const lowPrefixPtr = base + dictLimit;
256
- const U32 ipIndex = (U32)(ip - base);
257
- const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
258
- const BYTE* const dictBase = hc4->dictBase;
255
+ const BYTE* const prefixPtr = hc4->prefixStart;
256
+ const U32 prefixIdx = hc4->dictLimit;
257
+ const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
258
+ const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex);
259
+ const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
260
+ const BYTE* const dictStart = hc4->dictStart;
261
+ const U32 dictIdx = hc4->lowLimit;
262
+ const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx;
259
263
  int const lookBackLength = (int)(ip-iLowLimit);
260
264
  int nbAttempts = maxNbAttempts;
261
265
  U32 matchChainPos = 0;
@@ -277,14 +281,13 @@ LZ4HC_InsertAndGetWiderMatch (
277
281
  assert(matchIndex < ipIndex);
278
282
  if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
279
283
  /* do nothing */
280
- } else if (matchIndex >= dictLimit) { /* within current Prefix */
281
- const BYTE* const matchPtr = base + matchIndex;
282
- assert(matchPtr >= lowPrefixPtr);
284
+ } else if (matchIndex >= prefixIdx) { /* within current Prefix */
285
+ const BYTE* const matchPtr = prefixPtr + matchIndex - prefixIdx;
283
286
  assert(matchPtr < ip);
284
287
  assert(longest >= 1);
285
288
  if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
286
289
  if (LZ4_read32(matchPtr) == pattern) {
287
- int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr) : 0;
290
+ int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0;
288
291
  matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
289
292
  matchLength -= back;
290
293
  if (matchLength > longest) {
@@ -293,24 +296,25 @@ LZ4HC_InsertAndGetWiderMatch (
293
296
  *startpos = ip + back;
294
297
  } } }
295
298
  } else { /* lowestMatchIndex <= matchIndex < dictLimit */
296
- const BYTE* const matchPtr = dictBase + matchIndex;
297
- if (LZ4_read32(matchPtr) == pattern) {
298
- const BYTE* const dictStart = dictBase + hc4->lowLimit;
299
+ const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx);
300
+ assert(matchIndex >= dictIdx);
301
+ if ( likely(matchIndex <= prefixIdx - 4)
302
+ && (LZ4_read32(matchPtr) == pattern) ) {
299
303
  int back = 0;
300
- const BYTE* vLimit = ip + (dictLimit - matchIndex);
304
+ const BYTE* vLimit = ip + (prefixIdx - matchIndex);
301
305
  if (vLimit > iHighLimit) vLimit = iHighLimit;
302
306
  matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
303
307
  if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
304
- matchLength += LZ4_count(ip+matchLength, lowPrefixPtr, iHighLimit);
308
+ matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit);
305
309
  back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
306
310
  matchLength -= back;
307
311
  if (matchLength > longest) {
308
312
  longest = matchLength;
309
- *matchpos = base + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
313
+ *matchpos = prefixPtr - prefixIdx + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
310
314
  *startpos = ip + back;
311
315
  } } }
312
316
 
313
- if (chainSwap && matchLength==longest) { /* better match => select a better chain */
317
+ if (chainSwap && matchLength==longest) { /* better match => select a better chain */
314
318
  assert(lookBackLength==0); /* search forward only */
315
319
  if (matchIndex + (U32)longest <= ipIndex) {
316
320
  int const kTrigger = 4;
@@ -326,8 +330,7 @@ LZ4HC_InsertAndGetWiderMatch (
326
330
  distanceToNextMatch = candidateDist;
327
331
  matchChainPos = (U32)pos;
328
332
  accel = 1 << kTrigger;
329
- }
330
- }
333
+ } }
331
334
  if (distanceToNextMatch > 1) {
332
335
  if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
333
336
  matchIndex -= distanceToNextMatch;
@@ -347,23 +350,24 @@ LZ4HC_InsertAndGetWiderMatch (
347
350
  repeat = rep_not;
348
351
  } }
349
352
  if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
350
- && LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
351
- const int extDict = matchCandidateIdx < dictLimit;
352
- const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
353
+ && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) {
354
+ const int extDict = matchCandidateIdx < prefixIdx;
355
+ const BYTE* const matchPtr = (extDict ? dictStart - dictIdx : prefixPtr - prefixIdx) + matchCandidateIdx;
353
356
  if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
354
- const BYTE* const dictStart = dictBase + hc4->lowLimit;
355
- const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
357
+ const BYTE* const iLimit = extDict ? dictEnd : iHighLimit;
356
358
  size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
357
359
  if (extDict && matchPtr + forwardPatternLength == iLimit) {
358
360
  U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
359
- forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
361
+ forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern);
360
362
  }
361
- { const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
363
+ { const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr;
362
364
  size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
363
365
  size_t currentSegmentLength;
364
- if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
366
+ if (!extDict
367
+ && matchPtr - backLength == prefixPtr
368
+ && dictIdx < prefixIdx) {
365
369
  U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
366
- backLength += LZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
370
+ backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern);
367
371
  }
368
372
  /* Limit backLength not go further than lowestMatchIndex */
369
373
  backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
@@ -373,28 +377,28 @@ LZ4HC_InsertAndGetWiderMatch (
373
377
  if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
374
378
  && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
375
379
  U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
376
- if (LZ4HC_protectDictEnd(dictLimit, newMatchIndex))
380
+ if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex))
377
381
  matchIndex = newMatchIndex;
378
382
  else {
379
383
  /* Can only happen if started in the prefix */
380
- assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
381
- matchIndex = dictLimit;
384
+ assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
385
+ matchIndex = prefixIdx;
382
386
  }
383
387
  } else {
384
388
  U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
385
- if (!LZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
386
- assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
387
- matchIndex = dictLimit;
389
+ if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) {
390
+ assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
391
+ matchIndex = prefixIdx;
388
392
  } else {
389
393
  matchIndex = newMatchIndex;
390
394
  if (lookBackLength==0) { /* no back possible */
391
395
  size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
392
396
  if ((size_t)longest < maxML) {
393
- assert(base + matchIndex != ip);
394
- if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
397
+ assert(prefixPtr - prefixIdx + matchIndex != ip);
398
+ if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break;
395
399
  assert(maxML < 2 GB);
396
400
  longest = (int)maxML;
397
- *matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
401
+ *matchpos = prefixPtr - prefixIdx + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
398
402
  *startpos = ip;
399
403
  }
400
404
  { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
@@ -413,12 +417,12 @@ LZ4HC_InsertAndGetWiderMatch (
413
417
  if ( dict == usingDictCtxHc
414
418
  && nbAttempts > 0
415
419
  && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
416
- size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
420
+ size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
417
421
  U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
418
422
  assert(dictEndOffset <= 1 GB);
419
423
  matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
420
424
  while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
421
- const BYTE* const matchPtr = dictCtx->base + dictMatchIndex;
425
+ const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex;
422
426
 
423
427
  if (LZ4_read32(matchPtr) == pattern) {
424
428
  int mlt;
@@ -426,11 +430,11 @@ LZ4HC_InsertAndGetWiderMatch (
426
430
  const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
427
431
  if (vLimit > iHighLimit) vLimit = iHighLimit;
428
432
  mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
429
- back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
433
+ back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
430
434
  mlt -= back;
431
435
  if (mlt > longest) {
432
436
  longest = mlt;
433
- *matchpos = base + matchIndex + back;
437
+ *matchpos = prefixPtr - prefixIdx + matchIndex + back;
434
438
  *startpos = ip + back;
435
439
  } }
436
440
 
@@ -442,13 +446,13 @@ LZ4HC_InsertAndGetWiderMatch (
442
446
  return longest;
443
447
  }
444
448
 
445
- LZ4_FORCE_INLINE
446
- int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
447
- const BYTE* const ip, const BYTE* const iLimit,
448
- const BYTE** matchpos,
449
- const int maxNbAttempts,
450
- const int patternAnalysis,
451
- const dictCtx_directive dict)
449
+ LZ4_FORCE_INLINE int
450
+ LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
451
+ const BYTE* const ip, const BYTE* const iLimit,
452
+ const BYTE** matchpos,
453
+ const int maxNbAttempts,
454
+ const int patternAnalysis,
455
+ const dictCtx_directive dict)
452
456
  {
453
457
  const BYTE* uselessPtr = ip;
454
458
  /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
@@ -751,7 +755,7 @@ _last_literals:
751
755
  } else {
752
756
  *op++ = (BYTE)(lastRunSize << ML_BITS);
753
757
  }
754
- memcpy(op, anchor, lastRunSize);
758
+ LZ4_memcpy(op, anchor, lastRunSize);
755
759
  op += lastRunSize;
756
760
  }
757
761
 
@@ -884,13 +888,13 @@ LZ4HC_compress_generic_dictCtx (
884
888
  limitedOutput_directive limit
885
889
  )
886
890
  {
887
- const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
891
+ const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit);
888
892
  assert(ctx->dictCtx != NULL);
889
893
  if (position >= 64 KB) {
890
894
  ctx->dictCtx = NULL;
891
895
  return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
892
896
  } else if (position == 0 && *srcSizePtr > 4 KB) {
893
- memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
897
+ LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
894
898
  LZ4HC_setExternalDict(ctx, (const BYTE *)src);
895
899
  ctx->compressionLevel = (short)cLevel;
896
900
  return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
@@ -953,13 +957,15 @@ int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int src
953
957
 
954
958
  int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
955
959
  {
960
+ int cSize;
956
961
  #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
957
962
  LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
963
+ if (statePtr==NULL) return 0;
958
964
  #else
959
965
  LZ4_streamHC_t state;
960
966
  LZ4_streamHC_t* const statePtr = &state;
961
967
  #endif
962
- int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
968
+ cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
963
969
  #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
964
970
  FREEMEM(statePtr);
965
971
  #endif
@@ -982,6 +988,7 @@ int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* s
982
988
  * Streaming Functions
983
989
  **************************************/
984
990
  /* allocation */
991
+ #if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
985
992
  LZ4_streamHC_t* LZ4_createStreamHC(void)
986
993
  {
987
994
  LZ4_streamHC_t* const state =
@@ -998,13 +1005,12 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
998
1005
  FREEMEM(LZ4_streamHCPtr);
999
1006
  return 0;
1000
1007
  }
1008
+ #endif
1001
1009
 
1002
1010
 
1003
1011
  LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
1004
1012
  {
1005
1013
  LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
1006
- /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
1007
- LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
1008
1014
  DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
1009
1015
  /* check conditions */
1010
1016
  if (buffer == NULL) return NULL;
@@ -1030,9 +1036,13 @@ void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLev
1030
1036
  if (LZ4_streamHCPtr->internal_donotuse.dirty) {
1031
1037
  LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1032
1038
  } else {
1033
- /* preserve end - base : can trigger clearTable's threshold */
1034
- LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.base;
1035
- LZ4_streamHCPtr->internal_donotuse.base = NULL;
1039
+ /* preserve end - prefixStart : can trigger clearTable's threshold */
1040
+ if (LZ4_streamHCPtr->internal_donotuse.end != NULL) {
1041
+ LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.prefixStart;
1042
+ } else {
1043
+ assert(LZ4_streamHCPtr->internal_donotuse.prefixStart == NULL);
1044
+ }
1045
+ LZ4_streamHCPtr->internal_donotuse.prefixStart = NULL;
1036
1046
  LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
1037
1047
  }
1038
1048
  LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
@@ -1083,14 +1093,14 @@ void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC
1083
1093
  static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
1084
1094
  {
1085
1095
  DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
1086
- if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
1096
+ if (ctxPtr->end >= ctxPtr->prefixStart + 4)
1087
1097
  LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
1088
1098
 
1089
1099
  /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
1090
1100
  ctxPtr->lowLimit = ctxPtr->dictLimit;
1091
- ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
1092
- ctxPtr->dictBase = ctxPtr->base;
1093
- ctxPtr->base = newBlock - ctxPtr->dictLimit;
1101
+ ctxPtr->dictStart = ctxPtr->prefixStart;
1102
+ ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart);
1103
+ ctxPtr->prefixStart = newBlock;
1094
1104
  ctxPtr->end = newBlock;
1095
1105
  ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
1096
1106
 
@@ -1109,11 +1119,11 @@ LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
1109
1119
  LZ4_streamHCPtr, src, *srcSizePtr, limit);
1110
1120
  assert(ctxPtr != NULL);
1111
1121
  /* auto-init if forgotten */
1112
- if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
1122
+ if (ctxPtr->prefixStart == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
1113
1123
 
1114
1124
  /* Check overflow */
1115
- if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
1116
- size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
1125
+ if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) {
1126
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart);
1117
1127
  if (dictSize > 64 KB) dictSize = 64 KB;
1118
1128
  LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
1119
1129
  }
@@ -1124,13 +1134,16 @@ LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
1124
1134
 
1125
1135
  /* Check overlapping input/dictionary space */
1126
1136
  { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
1127
- const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
1128
- const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
1137
+ const BYTE* const dictBegin = ctxPtr->dictStart;
1138
+ const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit);
1129
1139
  if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
1130
1140
  if (sourceEnd > dictEnd) sourceEnd = dictEnd;
1131
- ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
1132
- if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
1133
- } }
1141
+ ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart);
1142
+ ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart);
1143
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) {
1144
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
1145
+ ctxPtr->dictStart = ctxPtr->prefixStart;
1146
+ } } }
1134
1147
 
1135
1148
  return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
1136
1149
  }
@@ -1158,7 +1171,7 @@ int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const ch
1158
1171
  int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
1159
1172
  {
1160
1173
  LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
1161
- int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
1174
+ int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart);
1162
1175
  DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
1163
1176
  assert(prefixSize >= 0);
1164
1177
  if (dictSize > 64 KB) dictSize = 64 KB;
@@ -1166,12 +1179,13 @@ int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictS
1166
1179
  if (dictSize > prefixSize) dictSize = prefixSize;
1167
1180
  if (safeBuffer == NULL) assert(dictSize == 0);
1168
1181
  if (dictSize > 0)
1169
- memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
1170
- { U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
1182
+ LZ4_memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
1183
+ { U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit;
1171
1184
  streamPtr->end = (const BYTE*)safeBuffer + dictSize;
1172
- streamPtr->base = streamPtr->end - endIndex;
1185
+ streamPtr->prefixStart = streamPtr->end - dictSize;
1173
1186
  streamPtr->dictLimit = endIndex - (U32)dictSize;
1174
1187
  streamPtr->lowLimit = endIndex - (U32)dictSize;
1188
+ streamPtr->dictStart = streamPtr->prefixStart;
1175
1189
  if (streamPtr->nextToUpdate < streamPtr->dictLimit)
1176
1190
  streamPtr->nextToUpdate = streamPtr->dictLimit;
1177
1191
  }
@@ -1199,7 +1213,7 @@ int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src,
1199
1213
 
1200
1214
 
1201
1215
  /* Deprecated streaming functions */
1202
- int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
1216
+ int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); }
1203
1217
 
1204
1218
  /* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
1205
1219
  * @return : 0 on success, !=0 if error */
@@ -1211,6 +1225,7 @@ int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
1211
1225
  return 0;
1212
1226
  }
1213
1227
 
1228
+ #if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1214
1229
  void* LZ4_createHC (const char* inputBuffer)
1215
1230
  {
1216
1231
  LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
@@ -1225,6 +1240,7 @@ int LZ4_freeHC (void* LZ4HC_Data)
1225
1240
  FREEMEM(LZ4HC_Data);
1226
1241
  return 0;
1227
1242
  }
1243
+ #endif
1228
1244
 
1229
1245
  int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
1230
1246
  {
@@ -1238,11 +1254,11 @@ int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, c
1238
1254
 
1239
1255
  char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
1240
1256
  {
1241
- LZ4_streamHC_t *ctx = (LZ4_streamHC_t*)LZ4HC_Data;
1242
- const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
1257
+ LZ4_streamHC_t* const ctx = (LZ4_streamHC_t*)LZ4HC_Data;
1258
+ const BYTE* bufferStart = ctx->internal_donotuse.prefixStart - ctx->internal_donotuse.dictLimit + ctx->internal_donotuse.lowLimit;
1243
1259
  LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
1244
1260
  /* avoid const char * -> char * conversion warning :( */
1245
- return (char *)(uptrval)bufferStart;
1261
+ return (char*)(uptrval)bufferStart;
1246
1262
  }
1247
1263
 
1248
1264
 
@@ -1325,7 +1341,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1325
1341
  {
1326
1342
  int retval = 0;
1327
1343
  #define TRAILING_LITERALS 3
1328
- #ifdef LZ4HC_HEAPMODE
1344
+ #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
1329
1345
  LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
1330
1346
  #else
1331
1347
  LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
@@ -1343,7 +1359,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1343
1359
  const BYTE* ovref = NULL;
1344
1360
 
1345
1361
  /* init */
1346
- #ifdef LZ4HC_HEAPMODE
1362
+ #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
1347
1363
  if (opt == NULL) goto _return_label;
1348
1364
  #endif
1349
1365
  DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
@@ -1575,7 +1591,7 @@ _last_literals:
1575
1591
  } else {
1576
1592
  *op++ = (BYTE)(lastRunSize << ML_BITS);
1577
1593
  }
1578
- memcpy(op, anchor, lastRunSize);
1594
+ LZ4_memcpy(op, anchor, lastRunSize);
1579
1595
  op += lastRunSize;
1580
1596
  }
1581
1597
 
@@ -1608,7 +1624,7 @@ if (limit == fillOutput) {
1608
1624
  goto _last_literals;
1609
1625
  }
1610
1626
  _return_label:
1611
- #ifdef LZ4HC_HEAPMODE
1627
+ #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
1612
1628
  FREEMEM(opt);
1613
1629
  #endif
1614
1630
  return retval;
@@ -1,7 +1,7 @@
1
1
  /*
2
2
  LZ4 HC - High Compression Mode of LZ4
3
3
  Header File
4
- Copyright (C) 2011-2017, Yann Collet.
4
+ Copyright (C) 2011-2020, Yann Collet.
5
5
  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
6
 
7
7
  Redistribution and use in source and binary forms, with or without
@@ -198,14 +198,17 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
198
198
  #define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
199
199
 
200
200
 
201
+ /* Never ever use these definitions directly !
202
+ * Declare or allocate an LZ4_streamHC_t instead.
203
+ **/
201
204
  typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
202
205
  struct LZ4HC_CCtx_internal
203
206
  {
204
207
  LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
205
208
  LZ4_u16 chainTable[LZ4HC_MAXD];
206
209
  const LZ4_byte* end; /* next block here to continue on current prefix */
207
- const LZ4_byte* base; /* All index relative to this position */
208
- const LZ4_byte* dictBase; /* alternate base for extDict */
210
+ const LZ4_byte* prefixStart; /* Indexes relative to this position */
211
+ const LZ4_byte* dictStart; /* alternate reference for extDict */
209
212
  LZ4_u32 dictLimit; /* below that point, need extDict */
210
213
  LZ4_u32 lowLimit; /* below that point, no more dict */
211
214
  LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
@@ -216,20 +219,15 @@ struct LZ4HC_CCtx_internal
216
219
  const LZ4HC_CCtx_internal* dictCtx;
217
220
  };
218
221
 
219
-
220
- /* Do not use these definitions directly !
221
- * Declare or allocate an LZ4_streamHC_t instead.
222
- */
223
- #define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
224
- #define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
222
+ #define LZ4_STREAMHC_MINSIZE 262200 /* static size, for inter-version compatibility */
225
223
  union LZ4_streamHC_u {
226
- void* table[LZ4_STREAMHCSIZE_VOIDP];
224
+ char minStateSize[LZ4_STREAMHC_MINSIZE];
227
225
  LZ4HC_CCtx_internal internal_donotuse;
228
226
  }; /* previously typedef'd to LZ4_streamHC_t */
229
227
 
230
228
  /* LZ4_streamHC_t :
231
229
  * This structure allows static allocation of LZ4 HC streaming state.
232
- * This can be used to allocate statically, on state, or as part of a larger structure.
230
+ * This can be used to allocate statically on stack, or as part of a larger structure.
233
231
  *
234
232
  * Such state **must** be initialized using LZ4_initStreamHC() before first use.
235
233
  *
@@ -244,7 +242,7 @@ union LZ4_streamHC_u {
244
242
  * Required before first use of a statically allocated LZ4_streamHC_t.
245
243
  * Before v1.9.0 : use LZ4_resetStreamHC() instead
246
244
  */
247
- LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size);
245
+ LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC(void* buffer, size_t size);
248
246
 
249
247
 
250
248
  /*-************************************
@@ -272,9 +270,11 @@ LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_comp
272
270
  * LZ4_slideInputBufferHC() will truncate the history of the stream, rather
273
271
  * than preserve a window-sized chunk of history.
274
272
  */
273
+ #if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
275
274
  LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer);
276
- LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
277
275
  LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data);
276
+ #endif
277
+ LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
278
278
  LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
279
279
  LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
280
280
  LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
@@ -305,7 +305,7 @@ LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionL
305
305
  * They should not be linked from DLL,
306
306
  * as there is no guarantee of API stability yet.
307
307
  * Prototypes will be promoted to "stable" status
308
- * after successfull usage in real-life scenarios.
308
+ * after successful usage in real-life scenarios.
309
309
  ***************************************************/
310
310
  #ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */
311
311
  #ifndef LZ4_HC_SLO_098092834
@@ -315,7 +315,7 @@ struct XXH64_state_s {
315
315
 
316
316
 
317
317
  #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
318
- # include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
318
+ # include "lz4xxhash.c" /* include xxhash function bodies as `static`, for inlining */
319
319
  #endif
320
320
 
321
321
  #endif /* XXH_STATIC_LINKING_ONLY */