zstd-ruby 1.5.0.0 → 1.5.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ruby.yml +2 -2
  3. data/README.md +1 -1
  4. data/ext/zstdruby/extconf.rb +1 -0
  5. data/ext/zstdruby/libzstd/Makefile +50 -175
  6. data/ext/zstdruby/libzstd/README.md +7 -1
  7. data/ext/zstdruby/libzstd/common/bitstream.h +24 -9
  8. data/ext/zstdruby/libzstd/common/compiler.h +89 -43
  9. data/ext/zstdruby/libzstd/common/entropy_common.c +11 -5
  10. data/ext/zstdruby/libzstd/common/error_private.h +79 -0
  11. data/ext/zstdruby/libzstd/common/fse.h +2 -1
  12. data/ext/zstdruby/libzstd/common/fse_decompress.c +1 -1
  13. data/ext/zstdruby/libzstd/common/huf.h +24 -22
  14. data/ext/zstdruby/libzstd/common/mem.h +18 -0
  15. data/ext/zstdruby/libzstd/common/portability_macros.h +131 -0
  16. data/ext/zstdruby/libzstd/common/xxhash.c +5 -805
  17. data/ext/zstdruby/libzstd/common/xxhash.h +5568 -167
  18. data/ext/zstdruby/libzstd/common/zstd_internal.h +92 -88
  19. data/ext/zstdruby/libzstd/common/zstd_trace.h +12 -3
  20. data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
  21. data/ext/zstdruby/libzstd/compress/fse_compress.c +63 -27
  22. data/ext/zstdruby/libzstd/compress/huf_compress.c +537 -104
  23. data/ext/zstdruby/libzstd/compress/zstd_compress.c +194 -278
  24. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +102 -44
  25. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +4 -3
  26. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +3 -1
  27. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +5 -4
  28. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +3 -2
  29. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +3 -3
  30. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +289 -114
  31. data/ext/zstdruby/libzstd/compress/zstd_fast.c +302 -123
  32. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +418 -502
  33. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +4 -4
  34. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +1 -1
  35. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +4 -1
  36. data/ext/zstdruby/libzstd/compress/zstd_opt.c +186 -108
  37. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +59 -29
  38. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +727 -189
  39. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +571 -0
  40. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +85 -22
  41. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +744 -220
  42. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +8 -2
  43. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +34 -3
  44. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +23 -3
  45. data/ext/zstdruby/libzstd/dictBuilder/cover.c +9 -2
  46. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +11 -4
  47. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +99 -28
  48. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +2 -6
  49. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +3 -7
  50. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +3 -7
  51. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +3 -7
  52. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +3 -7
  53. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +3 -7
  54. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +3 -7
  55. data/ext/zstdruby/libzstd/libzstd.mk +185 -0
  56. data/ext/zstdruby/libzstd/libzstd.pc.in +1 -0
  57. data/ext/zstdruby/libzstd/modulemap/module.modulemap +4 -0
  58. data/ext/zstdruby/libzstd/zdict.h +4 -4
  59. data/ext/zstdruby/libzstd/zstd.h +179 -136
  60. data/ext/zstdruby/zstdruby.c +2 -2
  61. data/lib/zstd-ruby/version.rb +1 -1
  62. metadata +8 -3
@@ -63,7 +63,7 @@ typedef struct {
63
63
  } ZSTD_localDict;
64
64
 
65
65
  typedef struct {
66
- HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
66
+ HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)];
67
67
  HUF_repeat repeatMode;
68
68
  } ZSTD_hufCTables_t;
69
69
 
@@ -179,7 +179,7 @@ typedef struct {
179
179
  U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
180
180
  ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
181
181
  const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
182
- ZSTD_literalCompressionMode_e literalCompressionMode;
182
+ ZSTD_paramSwitch_e literalCompressionMode;
183
183
  } optState_t;
184
184
 
185
185
  typedef struct {
@@ -199,6 +199,8 @@ typedef struct {
199
199
  */
200
200
  } ZSTD_window_t;
201
201
 
202
+ #define ZSTD_WINDOW_START_INDEX 2
203
+
202
204
  typedef struct ZSTD_matchState_t ZSTD_matchState_t;
203
205
 
204
206
  #define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
@@ -264,7 +266,7 @@ typedef struct {
264
266
  } ldmState_t;
265
267
 
266
268
  typedef struct {
267
- U32 enableLdm; /* 1 if enable long distance matching */
269
+ ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
268
270
  U32 hashLog; /* Log size of hashTable */
269
271
  U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
270
272
  U32 minMatchLength; /* Minimum match length */
@@ -295,7 +297,7 @@ struct ZSTD_CCtx_params_s {
295
297
  * There is no guarantee that hint is close to actual source size */
296
298
 
297
299
  ZSTD_dictAttachPref_e attachDictPref;
298
- ZSTD_literalCompressionMode_e literalCompressionMode;
300
+ ZSTD_paramSwitch_e literalCompressionMode;
299
301
 
300
302
  /* Multithreading: used to pass parameters to mtctx */
301
303
  int nbWorkers;
@@ -318,10 +320,10 @@ struct ZSTD_CCtx_params_s {
318
320
  int validateSequences;
319
321
 
320
322
  /* Block splitting */
321
- int splitBlocks;
323
+ ZSTD_paramSwitch_e useBlockSplitter;
322
324
 
323
325
  /* Param for deciding whether to use row-based matchfinder */
324
- ZSTD_useRowMatchFinderMode_e useRowMatchFinder;
326
+ ZSTD_paramSwitch_e useRowMatchFinder;
325
327
 
326
328
  /* Always load a dictionary in ext-dict mode (not prefix mode)? */
327
329
  int deterministicRefPrefix;
@@ -343,6 +345,22 @@ typedef enum {
343
345
  ZSTDb_buffered
344
346
  } ZSTD_buffered_policy_e;
345
347
 
348
+ /**
349
+ * Struct that contains all elements of block splitter that should be allocated
350
+ * in a wksp.
351
+ */
352
+ #define ZSTD_MAX_NB_BLOCK_SPLITS 196
353
+ typedef struct {
354
+ seqStore_t fullSeqStoreChunk;
355
+ seqStore_t firstHalfSeqStore;
356
+ seqStore_t secondHalfSeqStore;
357
+ seqStore_t currSeqStore;
358
+ seqStore_t nextSeqStore;
359
+
360
+ U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
361
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
362
+ } ZSTD_blockSplitCtx;
363
+
346
364
  struct ZSTD_CCtx_s {
347
365
  ZSTD_compressionStage_e stage;
348
366
  int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
@@ -374,7 +392,7 @@ struct ZSTD_CCtx_s {
374
392
  ZSTD_blockState_t blockState;
375
393
  U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
376
394
 
377
- /* Wether we are streaming or not */
395
+ /* Whether we are streaming or not */
378
396
  ZSTD_buffered_policy_e bufferedPolicy;
379
397
 
380
398
  /* streaming */
@@ -408,6 +426,9 @@ struct ZSTD_CCtx_s {
408
426
  #if ZSTD_TRACE
409
427
  ZSTD_TraceCtx traceCtx;
410
428
  #endif
429
+
430
+ /* Workspace for block splitter */
431
+ ZSTD_blockSplitCtx blockSplitCtx;
411
432
  };
412
433
 
413
434
  typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
@@ -442,7 +463,7 @@ typedef enum {
442
463
  typedef size_t (*ZSTD_blockCompressor) (
443
464
  ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
444
465
  void const* src, size_t srcSize);
445
- ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRowMatchFinderMode_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
466
+ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
446
467
 
447
468
 
448
469
  MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
@@ -549,17 +570,17 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
549
570
  return (srcSize >> minlog) + 2;
550
571
  }
551
572
 
552
- MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
573
+ MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams)
553
574
  {
554
575
  switch (cctxParams->literalCompressionMode) {
555
- case ZSTD_lcm_huffman:
576
+ case ZSTD_ps_enable:
556
577
  return 0;
557
- case ZSTD_lcm_uncompressed:
578
+ case ZSTD_ps_disable:
558
579
  return 1;
559
580
  default:
560
581
  assert(0 /* impossible: pre-validated */);
561
- /* fall-through */
562
- case ZSTD_lcm_auto:
582
+ ZSTD_FALLTHROUGH;
583
+ case ZSTD_ps_auto:
563
584
  return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
564
585
  }
565
586
  }
@@ -651,8 +672,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
651
672
  # if STATIC_BMI2
652
673
  return _tzcnt_u64(val) >> 3;
653
674
  # else
654
- unsigned long r = 0;
655
- return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
675
+ if (val != 0) {
676
+ unsigned long r;
677
+ _BitScanForward64(&r, (U64)val);
678
+ return (unsigned)(r >> 3);
679
+ } else {
680
+ /* Should not reach this code path */
681
+ __assume(0);
682
+ }
656
683
  # endif
657
684
  # elif defined(__GNUC__) && (__GNUC__ >= 4)
658
685
  return (__builtin_ctzll((U64)val) >> 3);
@@ -669,8 +696,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
669
696
  # endif
670
697
  } else { /* 32 bits */
671
698
  # if defined(_MSC_VER)
672
- unsigned long r=0;
673
- return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
699
+ if (val != 0) {
700
+ unsigned long r;
701
+ _BitScanForward(&r, (U32)val);
702
+ return (unsigned)(r >> 3);
703
+ } else {
704
+ /* Should not reach this code path */
705
+ __assume(0);
706
+ }
674
707
  # elif defined(__GNUC__) && (__GNUC__ >= 3)
675
708
  return (__builtin_ctz((U32)val) >> 3);
676
709
  # else
@@ -687,8 +720,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
687
720
  # if STATIC_BMI2
688
721
  return _lzcnt_u64(val) >> 3;
689
722
  # else
690
- unsigned long r = 0;
691
- return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0;
723
+ if (val != 0) {
724
+ unsigned long r;
725
+ _BitScanReverse64(&r, (U64)val);
726
+ return (unsigned)(r >> 3);
727
+ } else {
728
+ /* Should not reach this code path */
729
+ __assume(0);
730
+ }
692
731
  # endif
693
732
  # elif defined(__GNUC__) && (__GNUC__ >= 4)
694
733
  return (__builtin_clzll(val) >> 3);
@@ -702,8 +741,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
702
741
  # endif
703
742
  } else { /* 32 bits */
704
743
  # if defined(_MSC_VER)
705
- unsigned long r = 0;
706
- return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
744
+ if (val != 0) {
745
+ unsigned long r;
746
+ _BitScanReverse(&r, (unsigned long)val);
747
+ return (unsigned)(r >> 3);
748
+ } else {
749
+ /* Should not reach this code path */
750
+ __assume(0);
751
+ }
707
752
  # elif defined(__GNUC__) && (__GNUC__ >= 3)
708
753
  return (__builtin_clz((U32)val) >> 3);
709
754
  # else
@@ -884,9 +929,9 @@ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
884
929
 
885
930
  MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
886
931
  {
887
- return window.dictLimit == 1 &&
888
- window.lowLimit == 1 &&
889
- (window.nextSrc - window.base) == 1;
932
+ return window.dictLimit == ZSTD_WINDOW_START_INDEX &&
933
+ window.lowLimit == ZSTD_WINDOW_START_INDEX &&
934
+ (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX;
890
935
  }
891
936
 
892
937
  /**
@@ -937,7 +982,9 @@ MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
937
982
  {
938
983
  U32 const cycleSize = 1u << cycleLog;
939
984
  U32 const curr = (U32)((BYTE const*)src - window.base);
940
- U32 const minIndexToOverflowCorrect = cycleSize + MAX(maxDist, cycleSize);
985
+ U32 const minIndexToOverflowCorrect = cycleSize
986
+ + MAX(maxDist, cycleSize)
987
+ + ZSTD_WINDOW_START_INDEX;
941
988
 
942
989
  /* Adjust the min index to backoff the overflow correction frequency,
943
990
  * so we don't waste too much CPU in overflow correction. If this
@@ -1012,10 +1059,14 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
1012
1059
  U32 const cycleSize = 1u << cycleLog;
1013
1060
  U32 const cycleMask = cycleSize - 1;
1014
1061
  U32 const curr = (U32)((BYTE const*)src - window->base);
1015
- U32 const currentCycle0 = curr & cycleMask;
1016
- /* Exclude zero so that newCurrent - maxDist >= 1. */
1017
- U32 const currentCycle1 = currentCycle0 == 0 ? cycleSize : currentCycle0;
1018
- U32 const newCurrent = currentCycle1 + MAX(maxDist, cycleSize);
1062
+ U32 const currentCycle = curr & cycleMask;
1063
+ /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */
1064
+ U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX
1065
+ ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX)
1066
+ : 0;
1067
+ U32 const newCurrent = currentCycle
1068
+ + currentCycleCorrection
1069
+ + MAX(maxDist, cycleSize);
1019
1070
  U32 const correction = curr - newCurrent;
1020
1071
  /* maxDist must be a power of two so that:
1021
1072
  * (newCurrent & cycleMask) == (curr & cycleMask)
@@ -1031,14 +1082,20 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
1031
1082
 
1032
1083
  window->base += correction;
1033
1084
  window->dictBase += correction;
1034
- if (window->lowLimit <= correction) window->lowLimit = 1;
1035
- else window->lowLimit -= correction;
1036
- if (window->dictLimit <= correction) window->dictLimit = 1;
1037
- else window->dictLimit -= correction;
1085
+ if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
1086
+ window->lowLimit = ZSTD_WINDOW_START_INDEX;
1087
+ } else {
1088
+ window->lowLimit -= correction;
1089
+ }
1090
+ if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) {
1091
+ window->dictLimit = ZSTD_WINDOW_START_INDEX;
1092
+ } else {
1093
+ window->dictLimit -= correction;
1094
+ }
1038
1095
 
1039
1096
  /* Ensure we can still reference the full window. */
1040
1097
  assert(newCurrent >= maxDist);
1041
- assert(newCurrent - maxDist >= 1);
1098
+ assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX);
1042
1099
  /* Ensure that lowLimit and dictLimit didn't underflow. */
1043
1100
  assert(window->lowLimit <= newCurrent);
1044
1101
  assert(window->dictLimit <= newCurrent);
@@ -1149,11 +1206,12 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
1149
1206
 
1150
1207
  MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
1151
1208
  ZSTD_memset(window, 0, sizeof(*window));
1152
- window->base = (BYTE const*)"";
1153
- window->dictBase = (BYTE const*)"";
1154
- window->dictLimit = 1; /* start from 1, so that 1st position is valid */
1155
- window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
1156
- window->nextSrc = window->base + 1; /* see issue #1241 */
1209
+ window->base = (BYTE const*)" ";
1210
+ window->dictBase = (BYTE const*)" ";
1211
+ ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */
1212
+ window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */
1213
+ window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */
1214
+ window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */
1157
1215
  window->nbOverflowCorrections = 0;
1158
1216
  }
1159
1217
 
@@ -1206,15 +1264,15 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
1206
1264
  */
1207
1265
  MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
1208
1266
  {
1209
- U32 const maxDistance = 1U << windowLog;
1210
- U32 const lowestValid = ms->window.lowLimit;
1211
- U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1212
- U32 const isDictionary = (ms->loadedDictEnd != 0);
1267
+ U32 const maxDistance = 1U << windowLog;
1268
+ U32 const lowestValid = ms->window.lowLimit;
1269
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1270
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
1213
1271
  /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
1214
1272
  * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
1215
1273
  * valid for the entire block. So this check is sufficient to find the lowest valid match index.
1216
1274
  */
1217
- U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
1275
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
1218
1276
  return matchLowest;
1219
1277
  }
1220
1278
 
@@ -73,7 +73,8 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
73
73
  void* dst, size_t dstCapacity,
74
74
  const void* src, size_t srcSize,
75
75
  void* entropyWorkspace, size_t entropyWorkspaceSize,
76
- const int bmi2)
76
+ const int bmi2,
77
+ unsigned suspectUncompressible)
77
78
  {
78
79
  size_t const minGain = ZSTD_minGain(srcSize, strategy);
79
80
  size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
@@ -105,11 +106,11 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
105
106
  HUF_compress1X_repeat(
106
107
  ostart+lhSize, dstCapacity-lhSize, src, srcSize,
107
108
  HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
108
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
109
+ (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) :
109
110
  HUF_compress4X_repeat(
110
111
  ostart+lhSize, dstCapacity-lhSize, src, srcSize,
111
112
  HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
112
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
113
+ (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible);
113
114
  if (repeat != HUF_repeat_none) {
114
115
  /* reused the existing table */
115
116
  DEBUGLOG(5, "Reusing previous huffman table");
@@ -18,12 +18,14 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
18
18
 
19
19
  size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
20
20
 
21
+ /* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
21
22
  size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
22
23
  ZSTD_hufCTables_t* nextHuf,
23
24
  ZSTD_strategy strategy, int disableLiteralCompression,
24
25
  void* dst, size_t dstCapacity,
25
26
  const void* src, size_t srcSize,
26
27
  void* entropyWorkspace, size_t entropyWorkspaceSize,
27
- const int bmi2);
28
+ const int bmi2,
29
+ unsigned suspectUncompressible);
28
30
 
29
31
  #endif /* ZSTD_COMPRESS_LITERALS_H */
@@ -275,10 +275,11 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
275
275
  assert(nbSeq_1 > 1);
276
276
  assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
277
277
  (void)entropyWorkspaceSize;
278
- FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
279
- { size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog); /* overflow protected */
278
+ FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
279
+ assert(oend >= op);
280
+ { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
280
281
  FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
281
- FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
282
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
282
283
  return NCountSize;
283
284
  }
284
285
  }
@@ -398,7 +399,7 @@ ZSTD_encodeSequences_default(
398
399
 
399
400
  #if DYNAMIC_BMI2
400
401
 
401
- static TARGET_ATTRIBUTE("bmi2") size_t
402
+ static BMI2_TARGET_ATTRIBUTE size_t
402
403
  ZSTD_encodeSequences_bmi2(
403
404
  void* dst, size_t dstCapacity,
404
405
  FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
@@ -132,6 +132,7 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef*
132
132
  const seqDef* sp = sstart;
133
133
  size_t matchLengthSum = 0;
134
134
  size_t litLengthSum = 0;
135
+ (void)(litLengthSum); /* suppress unused variable warning on some environments */
135
136
  while (send-sp > 0) {
136
137
  ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
137
138
  litLengthSum += seqLen.litLength;
@@ -324,7 +325,7 @@ static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t lit
324
325
  static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
325
326
  const BYTE* codeTable, unsigned maxCode,
326
327
  size_t nbSeq, const FSE_CTable* fseCTable,
327
- const U32* additionalBits,
328
+ const U8* additionalBits,
328
329
  short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
329
330
  void* workspace, size_t wkspSize)
330
331
  {
@@ -474,7 +475,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
474
475
  /* I think there is an optimization opportunity here.
475
476
  * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
476
477
  * since it recalculates estimate from scratch.
477
- * For example, it would recount literal distribution and symbol codes everytime.
478
+ * For example, it would recount literal distribution and symbol codes every time.
478
479
  */
479
480
  cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
480
481
  &nextCBlock->entropy, entropyMetadata,
@@ -219,7 +219,7 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
219
219
  MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
220
220
  /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
221
221
  * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
222
- * to align the beginning of the aligned secion.
222
+ * to align the beginning of the aligned section.
223
223
  *
224
224
  * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
225
225
  * aligneds being sized in multiples of 64 bytes.
@@ -422,8 +422,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
422
422
  DEBUGLOG(5,
423
423
  "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
424
424
  alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
425
- assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
426
- assert((bytes & (sizeof(void*)-1)) == 0);
425
+ assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
426
+ assert(bytes % ZSTD_ALIGNOF(void*) == 0);
427
427
  ZSTD_cwksp_assert_internal_consistency(ws);
428
428
  /* we must be in the first phase, no advance is possible */
429
429
  if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {