extzstd 0.3.1 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +28 -14
  3. data/contrib/zstd/CHANGELOG +114 -56
  4. data/contrib/zstd/CONTRIBUTING.md +14 -0
  5. data/contrib/zstd/Makefile +37 -31
  6. data/contrib/zstd/README.md +6 -0
  7. data/contrib/zstd/appveyor.yml +4 -1
  8. data/contrib/zstd/lib/Makefile +231 -134
  9. data/contrib/zstd/lib/README.md +28 -0
  10. data/contrib/zstd/lib/common/bitstream.h +24 -15
  11. data/contrib/zstd/lib/common/compiler.h +116 -3
  12. data/contrib/zstd/lib/common/cpu.h +0 -2
  13. data/contrib/zstd/lib/common/debug.h +11 -18
  14. data/contrib/zstd/lib/common/entropy_common.c +188 -42
  15. data/contrib/zstd/lib/common/error_private.c +1 -0
  16. data/contrib/zstd/lib/common/error_private.h +1 -1
  17. data/contrib/zstd/lib/common/fse.h +38 -11
  18. data/contrib/zstd/lib/common/fse_decompress.c +123 -16
  19. data/contrib/zstd/lib/common/huf.h +26 -5
  20. data/contrib/zstd/lib/common/mem.h +66 -93
  21. data/contrib/zstd/lib/common/pool.c +22 -16
  22. data/contrib/zstd/lib/common/pool.h +1 -1
  23. data/contrib/zstd/lib/common/threading.c +6 -5
  24. data/contrib/zstd/lib/common/xxhash.c +18 -56
  25. data/contrib/zstd/lib/common/xxhash.h +1 -1
  26. data/contrib/zstd/lib/common/zstd_common.c +9 -9
  27. data/contrib/zstd/lib/common/zstd_deps.h +111 -0
  28. data/contrib/zstd/lib/common/zstd_errors.h +1 -0
  29. data/contrib/zstd/lib/common/zstd_internal.h +89 -58
  30. data/contrib/zstd/lib/compress/fse_compress.c +30 -23
  31. data/contrib/zstd/lib/compress/hist.c +26 -28
  32. data/contrib/zstd/lib/compress/hist.h +1 -1
  33. data/contrib/zstd/lib/compress/huf_compress.c +210 -95
  34. data/contrib/zstd/lib/compress/zstd_compress.c +1339 -409
  35. data/contrib/zstd/lib/compress/zstd_compress_internal.h +119 -41
  36. data/contrib/zstd/lib/compress/zstd_compress_literals.c +4 -4
  37. data/contrib/zstd/lib/compress/zstd_compress_sequences.c +17 -3
  38. data/contrib/zstd/lib/compress/zstd_compress_superblock.c +23 -19
  39. data/contrib/zstd/lib/compress/zstd_cwksp.h +60 -24
  40. data/contrib/zstd/lib/compress/zstd_double_fast.c +22 -22
  41. data/contrib/zstd/lib/compress/zstd_fast.c +19 -19
  42. data/contrib/zstd/lib/compress/zstd_lazy.c +351 -77
  43. data/contrib/zstd/lib/compress/zstd_lazy.h +20 -0
  44. data/contrib/zstd/lib/compress/zstd_ldm.c +59 -18
  45. data/contrib/zstd/lib/compress/zstd_ldm.h +6 -0
  46. data/contrib/zstd/lib/compress/zstd_opt.c +190 -45
  47. data/contrib/zstd/lib/compress/zstdmt_compress.c +74 -406
  48. data/contrib/zstd/lib/compress/zstdmt_compress.h +26 -108
  49. data/contrib/zstd/lib/decompress/huf_decompress.c +302 -200
  50. data/contrib/zstd/lib/decompress/zstd_ddict.c +8 -8
  51. data/contrib/zstd/lib/decompress/zstd_ddict.h +1 -1
  52. data/contrib/zstd/lib/decompress/zstd_decompress.c +125 -80
  53. data/contrib/zstd/lib/decompress/zstd_decompress_block.c +145 -37
  54. data/contrib/zstd/lib/decompress/zstd_decompress_block.h +5 -2
  55. data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +11 -10
  56. data/contrib/zstd/lib/dictBuilder/cover.c +29 -20
  57. data/contrib/zstd/lib/dictBuilder/cover.h +1 -1
  58. data/contrib/zstd/lib/dictBuilder/fastcover.c +20 -19
  59. data/contrib/zstd/lib/dictBuilder/zdict.c +15 -16
  60. data/contrib/zstd/lib/dictBuilder/zdict.h +1 -1
  61. data/contrib/zstd/lib/legacy/zstd_v01.c +5 -1
  62. data/contrib/zstd/lib/legacy/zstd_v02.c +5 -1
  63. data/contrib/zstd/lib/legacy/zstd_v03.c +5 -1
  64. data/contrib/zstd/lib/legacy/zstd_v04.c +6 -2
  65. data/contrib/zstd/lib/legacy/zstd_v05.c +5 -1
  66. data/contrib/zstd/lib/legacy/zstd_v06.c +5 -1
  67. data/contrib/zstd/lib/legacy/zstd_v07.c +5 -1
  68. data/contrib/zstd/lib/libzstd.pc.in +3 -3
  69. data/contrib/zstd/lib/zstd.h +348 -47
  70. data/ext/extzstd.c +6 -0
  71. data/ext/extzstd.h +6 -0
  72. data/gemstub.rb +3 -21
  73. data/lib/extzstd.rb +0 -2
  74. data/lib/extzstd/version.rb +6 -1
  75. data/test/test_basic.rb +0 -5
  76. metadata +5 -4
@@ -44,6 +44,16 @@ typedef enum {
44
44
  ZSTD_cwksp_alloc_aligned
45
45
  } ZSTD_cwksp_alloc_phase_e;
46
46
 
47
+ /**
48
+ * Used to describe whether the workspace is statically allocated (and will not
49
+ * necessarily ever be freed), or if it's dynamically allocated and we can
50
+ * expect a well-formed caller to free this.
51
+ */
52
+ typedef enum {
53
+ ZSTD_cwksp_dynamic_alloc,
54
+ ZSTD_cwksp_static_alloc
55
+ } ZSTD_cwksp_static_alloc_e;
56
+
47
57
  /**
48
58
  * Zstd fits all its internal datastructures into a single continuous buffer,
49
59
  * so that it only needs to perform a single OS allocation (or so that a buffer
@@ -92,7 +102,7 @@ typedef enum {
92
102
  *
93
103
  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
94
104
  * so that literally everything fits in a single buffer. Note: if present,
95
- * this must be the first object in the workspace, since ZSTD_free{CCtx,
105
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
96
106
  * CDict}() rely on a pointer comparison to see whether one or two frees are
97
107
  * required.
98
108
  *
@@ -137,9 +147,10 @@ typedef struct {
137
147
  void* tableValidEnd;
138
148
  void* allocStart;
139
149
 
140
- int allocFailed;
150
+ BYTE allocFailed;
141
151
  int workspaceOversizedDuration;
142
152
  ZSTD_cwksp_alloc_phase_e phase;
153
+ ZSTD_cwksp_static_alloc_e isStatic;
143
154
  } ZSTD_cwksp;
144
155
 
145
156
  /*-*************************************
@@ -178,7 +189,9 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
178
189
  * else is though.
179
190
  */
180
191
  MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
181
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
192
+ if (size == 0)
193
+ return 0;
194
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
182
195
  return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
183
196
  #else
184
197
  return size;
@@ -228,7 +241,10 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
228
241
  ZSTD_cwksp_internal_advance_phase(ws, phase);
229
242
  alloc = (BYTE *)ws->allocStart - bytes;
230
243
 
231
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
244
+ if (bytes == 0)
245
+ return NULL;
246
+
247
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
232
248
  /* over-reserve space */
233
249
  alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
234
250
  #endif
@@ -247,11 +263,13 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
247
263
  }
248
264
  ws->allocStart = alloc;
249
265
 
250
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
266
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
251
267
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
252
268
  * either size. */
253
269
  alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
254
- __asan_unpoison_memory_region(alloc, bytes);
270
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
271
+ __asan_unpoison_memory_region(alloc, bytes);
272
+ }
255
273
  #endif
256
274
 
257
275
  return alloc;
@@ -296,8 +314,10 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
296
314
  }
297
315
  ws->tableEnd = end;
298
316
 
299
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
300
- __asan_unpoison_memory_region(alloc, bytes);
317
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
318
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
319
+ __asan_unpoison_memory_region(alloc, bytes);
320
+ }
301
321
  #endif
302
322
 
303
323
  return alloc;
@@ -311,7 +331,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
311
331
  void* alloc = ws->objectEnd;
312
332
  void* end = (BYTE*)alloc + roundedBytes;
313
333
 
314
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
334
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
315
335
  /* over-reserve space */
316
336
  end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
317
337
  #endif
@@ -332,11 +352,13 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
332
352
  ws->tableEnd = end;
333
353
  ws->tableValidEnd = end;
334
354
 
335
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
355
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
336
356
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
337
357
  * either size. */
338
358
  alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
339
- __asan_unpoison_memory_region(alloc, bytes);
359
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
360
+ __asan_unpoison_memory_region(alloc, bytes);
361
+ }
340
362
  #endif
341
363
 
342
364
  return alloc;
@@ -345,7 +367,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
345
367
  MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
346
368
  DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
347
369
 
348
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
370
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
349
371
  /* To validate that the table re-use logic is sound, and that we don't
350
372
  * access table space that we haven't cleaned, we re-"poison" the table
351
373
  * space every time we mark it dirty. */
@@ -380,7 +402,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
380
402
  assert(ws->tableValidEnd >= ws->objectEnd);
381
403
  assert(ws->tableValidEnd <= ws->allocStart);
382
404
  if (ws->tableValidEnd < ws->tableEnd) {
383
- memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
405
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
384
406
  }
385
407
  ZSTD_cwksp_mark_tables_clean(ws);
386
408
  }
@@ -392,8 +414,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
392
414
  MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
393
415
  DEBUGLOG(4, "cwksp: clearing tables!");
394
416
 
395
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
396
- {
417
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
418
+ /* We don't do this when the workspace is statically allocated, because
419
+ * when that is the case, we have no capability to hook into the end of the
420
+ * workspace's lifecycle to unpoison the memory.
421
+ */
422
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
397
423
  size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
398
424
  __asan_poison_memory_region(ws->objectEnd, size);
399
425
  }
@@ -410,7 +436,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
410
436
  MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
411
437
  DEBUGLOG(4, "cwksp: clearing!");
412
438
 
413
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
439
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
414
440
  /* To validate that the context re-use logic is sound, and that we don't
415
441
  * access stuff that this compression hasn't initialized, we re-"poison"
416
442
  * the workspace (or at least the non-static, non-table parts of it)
@@ -421,8 +447,12 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
421
447
  }
422
448
  #endif
423
449
 
424
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
425
- {
450
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
451
+ /* We don't do this when the workspace is statically allocated, because
452
+ * when that is the case, we have no capability to hook into the end of the
453
+ * workspace's lifecycle to unpoison the memory.
454
+ */
455
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
426
456
  size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
427
457
  __asan_poison_memory_region(ws->objectEnd, size);
428
458
  }
@@ -442,7 +472,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
442
472
  * Any existing values in the workspace are ignored (the previously managed
443
473
  * buffer, if present, must be separately freed).
444
474
  */
445
- MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
475
+ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
446
476
  DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
447
477
  assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
448
478
  ws->workspace = start;
@@ -450,24 +480,25 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
450
480
  ws->objectEnd = ws->workspace;
451
481
  ws->tableValidEnd = ws->objectEnd;
452
482
  ws->phase = ZSTD_cwksp_alloc_objects;
483
+ ws->isStatic = isStatic;
453
484
  ZSTD_cwksp_clear(ws);
454
485
  ws->workspaceOversizedDuration = 0;
455
486
  ZSTD_cwksp_assert_internal_consistency(ws);
456
487
  }
457
488
 
458
489
  MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
459
- void* workspace = ZSTD_malloc(size, customMem);
490
+ void* workspace = ZSTD_customMalloc(size, customMem);
460
491
  DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
461
492
  RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
462
- ZSTD_cwksp_init(ws, workspace, size);
493
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
463
494
  return 0;
464
495
  }
465
496
 
466
497
  MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
467
498
  void *ptr = ws->workspace;
468
499
  DEBUGLOG(4, "cwksp: freeing workspace");
469
- memset(ws, 0, sizeof(ZSTD_cwksp));
470
- ZSTD_free(ptr, customMem);
500
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
501
+ ZSTD_customFree(ptr, customMem);
471
502
  }
472
503
 
473
504
  /**
@@ -476,13 +507,18 @@ MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
476
507
  */
477
508
  MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
478
509
  *dst = *src;
479
- memset(src, 0, sizeof(ZSTD_cwksp));
510
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
480
511
  }
481
512
 
482
513
  MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
483
514
  return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
484
515
  }
485
516
 
517
+ MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
518
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
519
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
520
+ }
521
+
486
522
  MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
487
523
  return ws->allocFailed;
488
524
  }
@@ -31,15 +31,15 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
31
31
  * is empty.
32
32
  */
33
33
  for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
34
- U32 const current = (U32)(ip - base);
34
+ U32 const curr = (U32)(ip - base);
35
35
  U32 i;
36
36
  for (i = 0; i < fastHashFillStep; ++i) {
37
37
  size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
38
38
  size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
39
39
  if (i == 0)
40
- hashSmall[smHash] = current + i;
40
+ hashSmall[smHash] = curr + i;
41
41
  if (i == 0 || hashLarge[lgHash] == 0)
42
- hashLarge[lgHash] = current + i;
42
+ hashLarge[lgHash] = curr + i;
43
43
  /* Only load extra positions for ZSTD_dtlm_full */
44
44
  if (dtlm == ZSTD_dtlm_fast)
45
45
  break;
@@ -108,9 +108,9 @@ size_t ZSTD_compressBlock_doubleFast_generic(
108
108
  /* init */
109
109
  ip += (dictAndPrefixLength == 0);
110
110
  if (dictMode == ZSTD_noDict) {
111
- U32 const current = (U32)(ip - base);
112
- U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
113
- U32 const maxRep = current - windowLow;
111
+ U32 const curr = (U32)(ip - base);
112
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
113
+ U32 const maxRep = curr - windowLow;
114
114
  if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
115
115
  if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
116
116
  }
@@ -129,17 +129,17 @@ size_t ZSTD_compressBlock_doubleFast_generic(
129
129
  size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
130
130
  size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
131
131
  size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
132
- U32 const current = (U32)(ip-base);
132
+ U32 const curr = (U32)(ip-base);
133
133
  U32 const matchIndexL = hashLong[h2];
134
134
  U32 matchIndexS = hashSmall[h];
135
135
  const BYTE* matchLong = base + matchIndexL;
136
136
  const BYTE* match = base + matchIndexS;
137
- const U32 repIndex = current + 1 - offset_1;
137
+ const U32 repIndex = curr + 1 - offset_1;
138
138
  const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
139
139
  && repIndex < prefixLowestIndex) ?
140
140
  dictBase + (repIndex - dictIndexDelta) :
141
141
  base + repIndex;
142
- hashLong[h2] = hashSmall[h] = current; /* update hash tables */
142
+ hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
143
143
 
144
144
  /* check dictMatchState repcode */
145
145
  if (dictMode == ZSTD_dictMatchState
@@ -177,7 +177,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
177
177
 
178
178
  if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
179
179
  mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
180
- offset = (U32)(current - dictMatchIndexL - dictIndexDelta);
180
+ offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
181
181
  while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
182
182
  goto _match_found;
183
183
  } }
@@ -209,7 +209,7 @@ _search_next_long:
209
209
  size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
210
210
  U32 const matchIndexL3 = hashLong[hl3];
211
211
  const BYTE* matchL3 = base + matchIndexL3;
212
- hashLong[hl3] = current + 1;
212
+ hashLong[hl3] = curr + 1;
213
213
 
214
214
  /* check prefix long +1 match */
215
215
  if (matchIndexL3 > prefixLowestIndex) {
@@ -228,7 +228,7 @@ _search_next_long:
228
228
  if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
229
229
  mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
230
230
  ip++;
231
- offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);
231
+ offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
232
232
  while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
233
233
  goto _match_found;
234
234
  } } }
@@ -236,7 +236,7 @@ _search_next_long:
236
236
  /* if no long +1 match, explore the short match we found */
237
237
  if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
238
238
  mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
239
- offset = (U32)(current - matchIndexS);
239
+ offset = (U32)(curr - matchIndexS);
240
240
  while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
241
241
  } else {
242
242
  mLength = ZSTD_count(ip+4, match+4, iend) + 4;
@@ -260,7 +260,7 @@ _match_stored:
260
260
  if (ip <= ilimit) {
261
261
  /* Complementary insertion */
262
262
  /* done after iLimit test, as candidates could be > iend-8 */
263
- { U32 const indexToInsert = current+2;
263
+ { U32 const indexToInsert = curr+2;
264
264
  hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
265
265
  hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
266
266
  hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
@@ -401,12 +401,12 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
401
401
  const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
402
402
  const BYTE* matchLong = matchLongBase + matchLongIndex;
403
403
 
404
- const U32 current = (U32)(ip-base);
405
- const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
404
+ const U32 curr = (U32)(ip-base);
405
+ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
406
406
  const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
407
407
  const BYTE* const repMatch = repBase + repIndex;
408
408
  size_t mLength;
409
- hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
409
+ hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
410
410
 
411
411
  if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
412
412
  & (repIndex > dictStartIndex))
@@ -421,7 +421,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
421
421
  const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
422
422
  U32 offset;
423
423
  mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
424
- offset = current - matchLongIndex;
424
+ offset = curr - matchLongIndex;
425
425
  while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
426
426
  offset_2 = offset_1;
427
427
  offset_1 = offset;
@@ -433,19 +433,19 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
433
433
  const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
434
434
  const BYTE* match3 = match3Base + matchIndex3;
435
435
  U32 offset;
436
- hashLong[h3] = current + 1;
436
+ hashLong[h3] = curr + 1;
437
437
  if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
438
438
  const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
439
439
  const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
440
440
  mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
441
441
  ip++;
442
- offset = current+1 - matchIndex3;
442
+ offset = curr+1 - matchIndex3;
443
443
  while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
444
444
  } else {
445
445
  const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
446
446
  const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
447
447
  mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
448
- offset = current - matchIndex;
448
+ offset = curr - matchIndex;
449
449
  while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
450
450
  }
451
451
  offset_2 = offset_1;
@@ -464,7 +464,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
464
464
  if (ip <= ilimit) {
465
465
  /* Complementary insertion */
466
466
  /* done after iLimit test, as candidates could be > iend-8 */
467
- { U32 const indexToInsert = current+2;
467
+ { U32 const indexToInsert = curr+2;
468
468
  hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
469
469
  hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
470
470
  hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
@@ -29,16 +29,16 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
29
29
  * Insert the other positions if their hash entry is empty.
30
30
  */
31
31
  for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
32
- U32 const current = (U32)(ip - base);
32
+ U32 const curr = (U32)(ip - base);
33
33
  size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
34
- hashTable[hash0] = current;
34
+ hashTable[hash0] = curr;
35
35
  if (dtlm == ZSTD_dtlm_fast) continue;
36
36
  /* Only load extra positions for ZSTD_dtlm_full */
37
37
  { U32 p;
38
38
  for (p = 1; p < fastHashFillStep; ++p) {
39
39
  size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
40
40
  if (hashTable[hash] == 0) { /* not yet filled */
41
- hashTable[hash] = current + p;
41
+ hashTable[hash] = curr + p;
42
42
  } } } }
43
43
  }
44
44
 
@@ -72,9 +72,9 @@ ZSTD_compressBlock_fast_generic(
72
72
  DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
73
73
  ip0 += (ip0 == prefixStart);
74
74
  ip1 = ip0 + 1;
75
- { U32 const current = (U32)(ip0 - base);
76
- U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
77
- U32 const maxRep = current - windowLow;
75
+ { U32 const curr = (U32)(ip0 - base);
76
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
77
+ U32 const maxRep = curr - windowLow;
78
78
  if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
79
79
  if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
80
80
  }
@@ -258,14 +258,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
258
258
  while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
259
259
  size_t mLength;
260
260
  size_t const h = ZSTD_hashPtr(ip, hlog, mls);
261
- U32 const current = (U32)(ip-base);
261
+ U32 const curr = (U32)(ip-base);
262
262
  U32 const matchIndex = hashTable[h];
263
263
  const BYTE* match = base + matchIndex;
264
- const U32 repIndex = current + 1 - offset_1;
264
+ const U32 repIndex = curr + 1 - offset_1;
265
265
  const BYTE* repMatch = (repIndex < prefixStartIndex) ?
266
266
  dictBase + (repIndex - dictIndexDelta) :
267
267
  base + repIndex;
268
- hashTable[h] = current; /* update hash table */
268
+ hashTable[h] = curr; /* update hash table */
269
269
 
270
270
  if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
271
271
  && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
@@ -284,7 +284,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
284
284
  continue;
285
285
  } else {
286
286
  /* found a dict match */
287
- U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
287
+ U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
288
288
  mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
289
289
  while (((ip>anchor) & (dictMatch>dictStart))
290
290
  && (ip[-1] == dictMatch[-1])) {
@@ -316,8 +316,8 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
316
316
 
317
317
  if (ip <= ilimit) {
318
318
  /* Fill Table */
319
- assert(base+current+2 > istart); /* check base overflow */
320
- hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
319
+ assert(base+curr+2 > istart); /* check base overflow */
320
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
321
321
  hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
322
322
 
323
323
  /* check immediate repcode */
@@ -410,13 +410,13 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
410
410
  const U32 matchIndex = hashTable[h];
411
411
  const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
412
412
  const BYTE* match = matchBase + matchIndex;
413
- const U32 current = (U32)(ip-base);
414
- const U32 repIndex = current + 1 - offset_1;
413
+ const U32 curr = (U32)(ip-base);
414
+ const U32 repIndex = curr + 1 - offset_1;
415
415
  const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
416
416
  const BYTE* const repMatch = repBase + repIndex;
417
- hashTable[h] = current; /* update hash table */
418
- DEBUGLOG(7, "offset_1 = %u , current = %u", offset_1, current);
419
- assert(offset_1 <= current +1); /* check repIndex */
417
+ hashTable[h] = curr; /* update hash table */
418
+ DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
419
+ assert(offset_1 <= curr +1); /* check repIndex */
420
420
 
421
421
  if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
422
422
  && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
@@ -435,7 +435,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
435
435
  }
436
436
  { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
437
437
  const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
438
- U32 const offset = current - matchIndex;
438
+ U32 const offset = curr - matchIndex;
439
439
  size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
440
440
  while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
441
441
  offset_2 = offset_1; offset_1 = offset; /* update offset history */
@@ -446,7 +446,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
446
446
 
447
447
  if (ip <= ilimit) {
448
448
  /* Fill Table */
449
- hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
449
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
450
450
  hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
451
451
  /* check immediate repcode */
452
452
  while (ip <= ilimit) {