zstdlib 0.1.2-x86-mingw32 → 0.2.0-x86-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +6 -0
  3. data/CHANGES.md +7 -0
  4. data/README.md +4 -3
  5. data/ext/zstdlib/extconf.rb +1 -1
  6. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/bitstream.h +0 -0
  7. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/compiler.h +1 -1
  8. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/cpu.h +0 -0
  9. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/debug.c +0 -0
  10. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/debug.h +0 -0
  11. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/entropy_common.c +0 -0
  12. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/error_private.c +0 -0
  13. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/error_private.h +0 -0
  14. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/fse.h +1 -1
  15. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/fse_decompress.c +0 -0
  16. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/huf.h +0 -0
  17. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/mem.h +0 -0
  18. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/pool.c +0 -0
  19. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/pool.h +0 -0
  20. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/threading.c +2 -2
  21. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/threading.h +0 -0
  22. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/xxhash.c +2 -2
  23. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/xxhash.h +0 -0
  24. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/zstd_common.c +0 -0
  25. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/zstd_errors.h +0 -0
  26. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/common/zstd_internal.h +55 -2
  27. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/fse_compress.c +2 -2
  28. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/hist.c +0 -0
  29. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/hist.h +0 -0
  30. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/huf_compress.c +0 -0
  31. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_compress.c +423 -296
  32. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_compress_internal.h +14 -11
  33. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_double_fast.c +0 -0
  34. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_double_fast.h +0 -0
  35. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_fast.c +203 -124
  36. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_fast.h +0 -0
  37. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_lazy.c +0 -0
  38. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_lazy.h +1 -1
  39. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_ldm.c +1 -1
  40. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_ldm.h +0 -0
  41. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_opt.c +27 -11
  42. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstd_opt.h +0 -0
  43. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstdmt_compress.c +41 -49
  44. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/compress/zstdmt_compress.h +43 -26
  45. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/decompress/huf_decompress.c +0 -0
  46. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/decompress/zstd_ddict.c +4 -4
  47. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/decompress/zstd_ddict.h +0 -0
  48. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/decompress/zstd_decompress.c +257 -164
  49. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/decompress/zstd_decompress_block.c +51 -47
  50. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/decompress/zstd_decompress_block.h +0 -0
  51. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/decompress/zstd_decompress_internal.h +7 -0
  52. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/lib/zstd.h +689 -542
  53. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/zlibWrapper/gzclose.c +0 -0
  54. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/zlibWrapper/gzcompatibility.h +0 -0
  55. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/zlibWrapper/gzguts.h +2 -0
  56. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/zlibWrapper/gzlib.c +0 -0
  57. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/zlibWrapper/gzread.c +0 -0
  58. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/zlibWrapper/gzwrite.c +0 -0
  59. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/zlibWrapper/zstd_zlibwrapper.c +0 -0
  60. data/ext/zstdlib/{zstd-1.3.8 → zstd-1.4.0}/zlibWrapper/zstd_zlibwrapper.h +0 -0
  61. data/lib/2.2/zstdlib.so +0 -0
  62. data/lib/2.3/zstdlib.so +0 -0
  63. data/lib/2.4/zstdlib.so +0 -0
  64. data/lib/2.5/zstdlib.so +0 -0
  65. data/lib/2.6/zstdlib.so +0 -0
  66. metadata +58 -57
@@ -56,14 +56,15 @@ static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
56
56
  size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
57
57
  blockProperties_t* bpPtr)
58
58
  {
59
- if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
59
+ RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
60
+
60
61
  { U32 const cBlockHeader = MEM_readLE24(src);
61
62
  U32 const cSize = cBlockHeader >> 3;
62
63
  bpPtr->lastBlock = cBlockHeader & 1;
63
64
  bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
64
65
  bpPtr->origSize = cSize; /* only useful for RLE */
65
66
  if (bpPtr->blockType == bt_rle) return 1;
66
- if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
67
+ RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
67
68
  return cSize;
68
69
  }
69
70
  }
@@ -78,7 +79,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
78
79
  size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
79
80
  const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
80
81
  {
81
- if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
82
+ RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
82
83
 
83
84
  { const BYTE* const istart = (const BYTE*) src;
84
85
  symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
@@ -86,11 +87,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
86
87
  switch(litEncType)
87
88
  {
88
89
  case set_repeat:
89
- if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
90
+ RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
90
91
  /* fall-through */
91
92
 
92
93
  case set_compressed:
93
- if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
94
+ RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
94
95
  { size_t lhSize, litSize, litCSize;
95
96
  U32 singleStream=0;
96
97
  U32 const lhlCode = (istart[0] >> 2) & 3;
@@ -118,8 +119,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
118
119
  litCSize = (lhc >> 22) + (istart[4] << 10);
119
120
  break;
120
121
  }
121
- if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
122
- if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
122
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
123
+ RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
123
124
 
124
125
  /* prefetch huffman table if cold */
125
126
  if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
@@ -157,7 +158,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
157
158
  }
158
159
  }
159
160
 
160
- if (HUF_isError(hufSuccess)) return ERROR(corruption_detected);
161
+ RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
161
162
 
162
163
  dctx->litPtr = dctx->litBuffer;
163
164
  dctx->litSize = litSize;
@@ -187,7 +188,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
187
188
  }
188
189
 
189
190
  if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
190
- if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
191
+ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
191
192
  memcpy(dctx->litBuffer, istart+lhSize, litSize);
192
193
  dctx->litPtr = dctx->litBuffer;
193
194
  dctx->litSize = litSize;
@@ -216,17 +217,17 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
216
217
  case 3:
217
218
  lhSize = 3;
218
219
  litSize = MEM_readLE24(istart) >> 4;
219
- if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
220
+ RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
220
221
  break;
221
222
  }
222
- if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
223
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
223
224
  memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
224
225
  dctx->litPtr = dctx->litBuffer;
225
226
  dctx->litSize = litSize;
226
227
  return lhSize+1;
227
228
  }
228
229
  default:
229
- return ERROR(corruption_detected); /* impossible */
230
+ RETURN_ERROR(corruption_detected, "impossible");
230
231
  }
231
232
  }
232
233
  }
@@ -436,8 +437,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
436
437
  switch(type)
437
438
  {
438
439
  case set_rle :
439
- if (!srcSize) return ERROR(srcSize_wrong);
440
- if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
440
+ RETURN_ERROR_IF(!srcSize, srcSize_wrong);
441
+ RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
441
442
  { U32 const symbol = *(const BYTE*)src;
442
443
  U32 const baseline = baseValue[symbol];
443
444
  U32 const nbBits = nbAdditionalBits[symbol];
@@ -449,7 +450,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
449
450
  *DTablePtr = defaultTable;
450
451
  return 0;
451
452
  case set_repeat:
452
- if (!flagRepeatTable) return ERROR(corruption_detected);
453
+ RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
453
454
  /* prefetch FSE table if used */
454
455
  if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
455
456
  const void* const pStart = *DTablePtr;
@@ -461,15 +462,15 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
461
462
  { unsigned tableLog;
462
463
  S16 norm[MaxSeq+1];
463
464
  size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
464
- if (FSE_isError(headerSize)) return ERROR(corruption_detected);
465
- if (tableLog > maxLog) return ERROR(corruption_detected);
465
+ RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
466
+ RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
466
467
  ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
467
468
  *DTablePtr = DTableSpace;
468
469
  return headerSize;
469
470
  }
470
- default : /* impossible */
471
+ default :
471
472
  assert(0);
472
- return ERROR(GENERIC);
473
+ RETURN_ERROR(GENERIC, "impossible");
473
474
  }
474
475
  }
475
476
 
@@ -483,28 +484,28 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
483
484
  DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
484
485
 
485
486
  /* check */
486
- if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
487
+ RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
487
488
 
488
489
  /* SeqHead */
489
490
  nbSeq = *ip++;
490
491
  if (!nbSeq) {
491
492
  *nbSeqPtr=0;
492
- if (srcSize != 1) return ERROR(srcSize_wrong);
493
+ RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
493
494
  return 1;
494
495
  }
495
496
  if (nbSeq > 0x7F) {
496
497
  if (nbSeq == 0xFF) {
497
- if (ip+2 > iend) return ERROR(srcSize_wrong);
498
+ RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
498
499
  nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
499
500
  } else {
500
- if (ip >= iend) return ERROR(srcSize_wrong);
501
+ RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
501
502
  nbSeq = ((nbSeq-0x80)<<8) + *ip++;
502
503
  }
503
504
  }
504
505
  *nbSeqPtr = nbSeq;
505
506
 
506
507
  /* FSE table descriptors */
507
- if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
508
+ RETURN_ERROR_IF(ip+4 > iend, srcSize_wrong); /* minimum possible size */
508
509
  { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
509
510
  symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
510
511
  symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
@@ -517,7 +518,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
517
518
  LL_base, LL_bits,
518
519
  LL_defaultDTable, dctx->fseEntropy,
519
520
  dctx->ddictIsCold, nbSeq);
520
- if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
521
+ RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
521
522
  ip += llhSize;
522
523
  }
523
524
 
@@ -527,7 +528,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
527
528
  OF_base, OF_bits,
528
529
  OF_defaultDTable, dctx->fseEntropy,
529
530
  dctx->ddictIsCold, nbSeq);
530
- if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
531
+ RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
531
532
  ip += ofhSize;
532
533
  }
533
534
 
@@ -537,7 +538,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
537
538
  ML_base, ML_bits,
538
539
  ML_defaultDTable, dctx->fseEntropy,
539
540
  dctx->ddictIsCold, nbSeq);
540
- if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
541
+ RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
541
542
  ip += mlhSize;
542
543
  }
543
544
  }
@@ -590,8 +591,8 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
590
591
  const BYTE* match = oLitEnd - sequence.offset;
591
592
 
592
593
  /* check */
593
- if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must fit within dstBuffer */
594
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* try to read beyond literal buffer */
594
+ RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
595
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
595
596
 
596
597
  /* copy literals */
597
598
  while (op < oLitEnd) *op++ = *(*litPtr)++;
@@ -599,7 +600,7 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
599
600
  /* copy Match */
600
601
  if (sequence.offset > (size_t)(oLitEnd - base)) {
601
602
  /* offset beyond prefix */
602
- if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
603
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
603
604
  match = dictEnd - (base-match);
604
605
  if (match + sequence.matchLength <= dictEnd) {
605
606
  memmove(oLitEnd, match, sequence.matchLength);
@@ -631,8 +632,8 @@ size_t ZSTD_execSequence(BYTE* op,
631
632
  const BYTE* match = oLitEnd - sequence.offset;
632
633
 
633
634
  /* check */
634
- if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
635
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
635
+ RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
636
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
636
637
  if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
637
638
 
638
639
  /* copy Literals */
@@ -645,8 +646,7 @@ size_t ZSTD_execSequence(BYTE* op,
645
646
  /* copy Match */
646
647
  if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
647
648
  /* offset beyond prefix -> go into extDict */
648
- if (sequence.offset > (size_t)(oLitEnd - virtualStart))
649
- return ERROR(corruption_detected);
649
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
650
650
  match = dictEnd + (match - prefixStart);
651
651
  if (match + sequence.matchLength <= dictEnd) {
652
652
  memmove(oLitEnd, match, sequence.matchLength);
@@ -712,8 +712,8 @@ size_t ZSTD_execSequenceLong(BYTE* op,
712
712
  const BYTE* match = sequence.match;
713
713
 
714
714
  /* check */
715
- if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
716
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
715
+ RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
716
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
717
717
  if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
718
718
 
719
719
  /* copy Literals */
@@ -726,7 +726,7 @@ size_t ZSTD_execSequenceLong(BYTE* op,
726
726
  /* copy Match */
727
727
  if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
728
728
  /* offset beyond prefix */
729
- if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
729
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
730
730
  if (match + sequence.matchLength <= dictEnd) {
731
731
  memmove(oLitEnd, match, sequence.matchLength);
732
732
  return sequenceLength;
@@ -801,7 +801,7 @@ ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
801
801
  /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
802
802
  * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
803
803
  * bits before reloading. This value is the maximum number of bytes we read
804
- * after reloading when we are decoding long offets.
804
+ * after reloading when we are decoding long offsets.
805
805
  */
806
806
  #define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
807
807
  (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
@@ -911,7 +911,9 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
911
911
  seqState_t seqState;
912
912
  dctx->fseEntropy = 1;
913
913
  { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
914
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
914
+ RETURN_ERROR_IF(
915
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
916
+ corruption_detected);
915
917
  ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
916
918
  ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
917
919
  ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
@@ -927,14 +929,14 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
927
929
 
928
930
  /* check if reached exact end */
929
931
  DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
930
- if (nbSeq) return ERROR(corruption_detected);
932
+ RETURN_ERROR_IF(nbSeq, corruption_detected);
931
933
  /* save reps for next block */
932
934
  { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
933
935
  }
934
936
 
935
937
  /* last literal segment */
936
938
  { size_t const lastLLSize = litEnd - litPtr;
937
- if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
939
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
938
940
  memcpy(op, litPtr, lastLLSize);
939
941
  op += lastLLSize;
940
942
  }
@@ -1066,7 +1068,9 @@ ZSTD_decompressSequencesLong_body(
1066
1068
  seqState.pos = (size_t)(op-prefixStart);
1067
1069
  seqState.dictEnd = dictEnd;
1068
1070
  assert(iend >= ip);
1069
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
1071
+ RETURN_ERROR_IF(
1072
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
1073
+ corruption_detected);
1070
1074
  ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1071
1075
  ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1072
1076
  ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
@@ -1076,7 +1080,7 @@ ZSTD_decompressSequencesLong_body(
1076
1080
  sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
1077
1081
  PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
1078
1082
  }
1079
- if (seqNb<seqAdvance) return ERROR(corruption_detected);
1083
+ RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
1080
1084
 
1081
1085
  /* decode and decompress */
1082
1086
  for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
@@ -1087,7 +1091,7 @@ ZSTD_decompressSequencesLong_body(
1087
1091
  sequences[seqNb & STORED_SEQS_MASK] = sequence;
1088
1092
  op += oneSeqSize;
1089
1093
  }
1090
- if (seqNb<nbSeq) return ERROR(corruption_detected);
1094
+ RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
1091
1095
 
1092
1096
  /* finish queue */
1093
1097
  seqNb -= seqAdvance;
@@ -1103,7 +1107,7 @@ ZSTD_decompressSequencesLong_body(
1103
1107
 
1104
1108
  /* last literal segment */
1105
1109
  { size_t const lastLLSize = litEnd - litPtr;
1106
- if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
1110
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
1107
1111
  memcpy(op, litPtr, lastLLSize);
1108
1112
  op += lastLLSize;
1109
1113
  }
@@ -1176,7 +1180,7 @@ ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
1176
1180
  /* ZSTD_decompressSequencesLong() :
1177
1181
  * decompression function triggered when a minimum share of offsets is considered "long",
1178
1182
  * aka out of cache.
1179
- * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes mearning "farther than memory cache distance".
1183
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
1180
1184
  * This function will try to mitigate main memory latency through the use of prefetching */
1181
1185
  static size_t
1182
1186
  ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
@@ -1240,7 +1244,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
1240
1244
  ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
1241
1245
  DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
1242
1246
 
1243
- if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
1247
+ RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
1244
1248
 
1245
1249
  /* Decode literals section */
1246
1250
  { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
@@ -89,6 +89,12 @@ typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
89
89
  typedef enum { zdss_init=0, zdss_loadHeader,
90
90
  zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
91
91
 
92
+ typedef enum {
93
+ ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */
94
+ ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */
95
+ ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
96
+ } ZSTD_dictUses_e;
97
+
92
98
  struct ZSTD_DCtx_s
93
99
  {
94
100
  const ZSTD_seqSymbol* LLTptr;
@@ -123,6 +129,7 @@ struct ZSTD_DCtx_s
123
129
  const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
124
130
  U32 dictID;
125
131
  int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
132
+ ZSTD_dictUses_e dictUses;
126
133
 
127
134
  /* streaming */
128
135
  ZSTD_dStreamStage streamStage;