zstd-ruby 1.5.5.1 → 1.5.6.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +1 -1
  3. data/ext/zstdruby/libzstd/common/allocations.h +1 -1
  4. data/ext/zstdruby/libzstd/common/bitstream.h +49 -29
  5. data/ext/zstdruby/libzstd/common/compiler.h +114 -22
  6. data/ext/zstdruby/libzstd/common/cpu.h +36 -0
  7. data/ext/zstdruby/libzstd/common/debug.c +6 -0
  8. data/ext/zstdruby/libzstd/common/debug.h +20 -11
  9. data/ext/zstdruby/libzstd/common/error_private.h +45 -36
  10. data/ext/zstdruby/libzstd/common/fse.h +3 -2
  11. data/ext/zstdruby/libzstd/common/fse_decompress.c +19 -17
  12. data/ext/zstdruby/libzstd/common/huf.h +14 -1
  13. data/ext/zstdruby/libzstd/common/mem.h +0 -9
  14. data/ext/zstdruby/libzstd/common/pool.c +1 -1
  15. data/ext/zstdruby/libzstd/common/pool.h +1 -1
  16. data/ext/zstdruby/libzstd/common/portability_macros.h +2 -0
  17. data/ext/zstdruby/libzstd/common/threading.c +8 -2
  18. data/ext/zstdruby/libzstd/common/xxhash.c +5 -11
  19. data/ext/zstdruby/libzstd/common/xxhash.h +2341 -1007
  20. data/ext/zstdruby/libzstd/common/zstd_internal.h +5 -5
  21. data/ext/zstdruby/libzstd/compress/fse_compress.c +8 -7
  22. data/ext/zstdruby/libzstd/compress/huf_compress.c +54 -25
  23. data/ext/zstdruby/libzstd/compress/zstd_compress.c +282 -161
  24. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +29 -27
  25. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +224 -113
  26. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +19 -13
  27. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +17 -5
  28. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +11 -0
  29. data/ext/zstdruby/libzstd/compress/zstd_fast.c +14 -6
  30. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +129 -87
  31. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +103 -28
  32. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +8 -2
  33. data/ext/zstdruby/libzstd/compress/zstd_opt.c +216 -112
  34. data/ext/zstdruby/libzstd/compress/zstd_opt.h +31 -7
  35. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +94 -79
  36. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +188 -126
  37. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +38 -19
  38. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +84 -32
  39. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +231 -208
  40. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +1 -1
  41. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +2 -0
  42. data/ext/zstdruby/libzstd/dictBuilder/cover.c +16 -12
  43. data/ext/zstdruby/libzstd/dictBuilder/cover.h +2 -8
  44. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +2 -2
  45. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +12 -6
  46. data/ext/zstdruby/libzstd/zstd.h +129 -60
  47. data/lib/zstd-ruby/version.rb +1 -1
  48. metadata +1 -1
@@ -12,6 +12,11 @@
12
12
  #include "zstd_lazy.h"
13
13
  #include "../common/bits.h" /* ZSTD_countTrailingZeros64 */
14
14
 
15
+ #if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
16
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
17
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
18
+ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
19
+
15
20
  #define kLazySkippingStep 8
16
21
 
17
22
 
@@ -19,8 +24,9 @@
19
24
  * Binary Tree search
20
25
  ***************************************/
21
26
 
22
- static void
23
- ZSTD_updateDUBT(ZSTD_matchState_t* ms,
27
+ static
28
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
29
+ void ZSTD_updateDUBT(ZSTD_matchState_t* ms,
24
30
  const BYTE* ip, const BYTE* iend,
25
31
  U32 mls)
26
32
  {
@@ -63,8 +69,9 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms,
63
69
  * sort one already inserted but unsorted position
64
70
  * assumption : curr >= btlow == (curr - btmask)
65
71
  * doesn't fail */
66
- static void
67
- ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
72
+ static
73
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
74
+ void ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
68
75
  U32 curr, const BYTE* inputEnd,
69
76
  U32 nbCompares, U32 btLow,
70
77
  const ZSTD_dictMode_e dictMode)
@@ -152,8 +159,9 @@ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
152
159
  }
153
160
 
154
161
 
155
- static size_t
156
- ZSTD_DUBT_findBetterDictMatch (
162
+ static
163
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
164
+ size_t ZSTD_DUBT_findBetterDictMatch (
157
165
  const ZSTD_matchState_t* ms,
158
166
  const BYTE* const ip, const BYTE* const iend,
159
167
  size_t* offsetPtr,
@@ -230,8 +238,9 @@ ZSTD_DUBT_findBetterDictMatch (
230
238
  }
231
239
 
232
240
 
233
- static size_t
234
- ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
241
+ static
242
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
243
+ size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
235
244
  const BYTE* const ip, const BYTE* const iend,
236
245
  size_t* offBasePtr,
237
246
  U32 const mls,
@@ -381,8 +390,9 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
381
390
 
382
391
 
383
392
  /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
384
- FORCE_INLINE_TEMPLATE size_t
385
- ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
393
+ FORCE_INLINE_TEMPLATE
394
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
395
+ size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
386
396
  const BYTE* const ip, const BYTE* const iLimit,
387
397
  size_t* offBasePtr,
388
398
  const U32 mls /* template */,
@@ -617,7 +627,9 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
617
627
 
618
628
  /* Update chains up to ip (excluded)
619
629
  Assumption : always within prefix (i.e. not within extDict) */
620
- FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
630
+ FORCE_INLINE_TEMPLATE
631
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
632
+ U32 ZSTD_insertAndFindFirstIndex_internal(
621
633
  ZSTD_matchState_t* ms,
622
634
  const ZSTD_compressionParameters* const cParams,
623
635
  const BYTE* ip, U32 const mls, U32 const lazySkipping)
@@ -651,6 +663,7 @@ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
651
663
 
652
664
  /* inlining is important to hardwire a hot branch (template emulation) */
653
665
  FORCE_INLINE_TEMPLATE
666
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
654
667
  size_t ZSTD_HcFindBestMatch(
655
668
  ZSTD_matchState_t* ms,
656
669
  const BYTE* const ip, const BYTE* const iLimit,
@@ -819,7 +832,9 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* t
819
832
  * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
820
833
  * but not beyond iLimit.
821
834
  */
822
- FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
835
+ FORCE_INLINE_TEMPLATE
836
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
837
+ void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
823
838
  U32 const rowLog, U32 const mls,
824
839
  U32 idx, const BYTE* const iLimit)
825
840
  {
@@ -845,7 +860,9 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B
845
860
  * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
846
861
  * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
847
862
  */
848
- FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
863
+ FORCE_INLINE_TEMPLATE
864
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
865
+ U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
849
866
  BYTE const* tagTable, BYTE const* base,
850
867
  U32 idx, U32 const hashLog,
851
868
  U32 const rowLog, U32 const mls,
@@ -863,10 +880,12 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTab
863
880
  /* ZSTD_row_update_internalImpl():
864
881
  * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
865
882
  */
866
- FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
867
- U32 updateStartIdx, U32 const updateEndIdx,
868
- U32 const mls, U32 const rowLog,
869
- U32 const rowMask, U32 const useCache)
883
+ FORCE_INLINE_TEMPLATE
884
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
885
+ void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
886
+ U32 updateStartIdx, U32 const updateEndIdx,
887
+ U32 const mls, U32 const rowLog,
888
+ U32 const rowMask, U32 const useCache)
870
889
  {
871
890
  U32* const hashTable = ms->hashTable;
872
891
  BYTE* const tagTable = ms->tagTable;
@@ -892,9 +911,11 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
892
911
  * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
893
912
  * Skips sections of long matches as is necessary.
894
913
  */
895
- FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
896
- U32 const mls, U32 const rowLog,
897
- U32 const rowMask, U32 const useCache)
914
+ FORCE_INLINE_TEMPLATE
915
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
916
+ void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
917
+ U32 const mls, U32 const rowLog,
918
+ U32 const rowMask, U32 const useCache)
898
919
  {
899
920
  U32 idx = ms->nextToUpdate;
900
921
  const BYTE* const base = ms->window.base;
@@ -1102,20 +1123,21 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGr
1102
1123
 
1103
1124
  /* The high-level approach of the SIMD row based match finder is as follows:
1104
1125
  * - Figure out where to insert the new entry:
1105
- * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
1106
- * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
1126
+ * - Generate a hash for current input posistion and split it into a one byte of tag and `rowHashLog` bits of index.
1127
+ * - The hash is salted by a value that changes on every contex reset, so when the same table is used
1128
+ * we will avoid collisions that would otherwise slow us down by intorducing phantom matches.
1129
+ * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines
1107
1130
  * which row to insert into.
1108
- * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
1109
- * be considered as a circular buffer with a "head" index that resides in the tagTable.
1110
- * - Also insert the "tag" into the equivalent row and position in the tagTable.
1111
- * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
1112
- * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
1113
- * for alignment/performance reasons, leaving some bytes unused.
1114
- * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
1131
+ * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can
1132
+ * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes
1133
+ * per row).
1134
+ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and
1115
1135
  * generate a bitfield that we can cycle through to check the collisions in the hash table.
1116
1136
  * - Pick the longest match.
1137
+ * - Insert the tag into the equivalent row and position in the tagTable.
1117
1138
  */
1118
1139
  FORCE_INLINE_TEMPLATE
1140
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
1119
1141
  size_t ZSTD_RowFindBestMatch(
1120
1142
  ZSTD_matchState_t* ms,
1121
1143
  const BYTE* const ip, const BYTE* const iLimit,
@@ -1489,8 +1511,9 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
1489
1511
  * Common parser - lazy strategy
1490
1512
  *********************************/
1491
1513
 
1492
- FORCE_INLINE_TEMPLATE size_t
1493
- ZSTD_compressBlock_lazy_generic(
1514
+ FORCE_INLINE_TEMPLATE
1515
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
1516
+ size_t ZSTD_compressBlock_lazy_generic(
1494
1517
  ZSTD_matchState_t* ms, seqStore_t* seqStore,
1495
1518
  U32 rep[ZSTD_REP_NUM],
1496
1519
  const void* src, size_t srcSize,
@@ -1754,152 +1777,163 @@ _storeSequence:
1754
1777
  /* Return the last literals size */
1755
1778
  return (size_t)(iend - anchor);
1756
1779
  }
1780
+ #endif /* build exclusions */
1757
1781
 
1758
1782
 
1759
- size_t ZSTD_compressBlock_btlazy2(
1783
+ #ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
1784
+ size_t ZSTD_compressBlock_greedy(
1760
1785
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1761
1786
  void const* src, size_t srcSize)
1762
1787
  {
1763
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
1788
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
1764
1789
  }
1765
1790
 
1766
- size_t ZSTD_compressBlock_lazy2(
1791
+ size_t ZSTD_compressBlock_greedy_dictMatchState(
1767
1792
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1768
1793
  void const* src, size_t srcSize)
1769
1794
  {
1770
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
1795
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
1771
1796
  }
1772
1797
 
1773
- size_t ZSTD_compressBlock_lazy(
1798
+ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
1774
1799
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1775
1800
  void const* src, size_t srcSize)
1776
1801
  {
1777
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
1802
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
1778
1803
  }
1779
1804
 
1780
- size_t ZSTD_compressBlock_greedy(
1805
+ size_t ZSTD_compressBlock_greedy_row(
1781
1806
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1782
1807
  void const* src, size_t srcSize)
1783
1808
  {
1784
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
1809
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
1785
1810
  }
1786
1811
 
1787
- size_t ZSTD_compressBlock_btlazy2_dictMatchState(
1812
+ size_t ZSTD_compressBlock_greedy_dictMatchState_row(
1788
1813
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1789
1814
  void const* src, size_t srcSize)
1790
1815
  {
1791
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
1816
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
1792
1817
  }
1793
1818
 
1794
- size_t ZSTD_compressBlock_lazy2_dictMatchState(
1819
+ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
1795
1820
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1796
1821
  void const* src, size_t srcSize)
1797
1822
  {
1798
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
1823
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
1799
1824
  }
1825
+ #endif
1800
1826
 
1801
- size_t ZSTD_compressBlock_lazy_dictMatchState(
1827
+ #ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
1828
+ size_t ZSTD_compressBlock_lazy(
1802
1829
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1803
1830
  void const* src, size_t srcSize)
1804
1831
  {
1805
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
1832
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
1806
1833
  }
1807
1834
 
1808
- size_t ZSTD_compressBlock_greedy_dictMatchState(
1835
+ size_t ZSTD_compressBlock_lazy_dictMatchState(
1809
1836
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1810
1837
  void const* src, size_t srcSize)
1811
1838
  {
1812
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
1839
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
1813
1840
  }
1814
1841
 
1815
-
1816
- size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
1842
+ size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
1817
1843
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1818
1844
  void const* src, size_t srcSize)
1819
1845
  {
1820
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
1846
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
1821
1847
  }
1822
1848
 
1823
- size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
1849
+ size_t ZSTD_compressBlock_lazy_row(
1824
1850
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1825
1851
  void const* src, size_t srcSize)
1826
1852
  {
1827
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
1853
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
1828
1854
  }
1829
1855
 
1830
- size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
1856
+ size_t ZSTD_compressBlock_lazy_dictMatchState_row(
1831
1857
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1832
1858
  void const* src, size_t srcSize)
1833
1859
  {
1834
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
1860
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
1835
1861
  }
1836
1862
 
1837
- /* Row-based matchfinder */
1838
- size_t ZSTD_compressBlock_lazy2_row(
1863
+ size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
1839
1864
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1840
1865
  void const* src, size_t srcSize)
1841
1866
  {
1842
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
1867
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
1843
1868
  }
1869
+ #endif
1844
1870
 
1845
- size_t ZSTD_compressBlock_lazy_row(
1871
+ #ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
1872
+ size_t ZSTD_compressBlock_lazy2(
1846
1873
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1847
1874
  void const* src, size_t srcSize)
1848
1875
  {
1849
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
1876
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
1850
1877
  }
1851
1878
 
1852
- size_t ZSTD_compressBlock_greedy_row(
1879
+ size_t ZSTD_compressBlock_lazy2_dictMatchState(
1853
1880
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1854
1881
  void const* src, size_t srcSize)
1855
1882
  {
1856
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
1883
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
1857
1884
  }
1858
1885
 
1859
- size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
1886
+ size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
1860
1887
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1861
1888
  void const* src, size_t srcSize)
1862
1889
  {
1863
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
1890
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
1864
1891
  }
1865
1892
 
1866
- size_t ZSTD_compressBlock_lazy_dictMatchState_row(
1893
+ size_t ZSTD_compressBlock_lazy2_row(
1867
1894
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1868
1895
  void const* src, size_t srcSize)
1869
1896
  {
1870
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
1897
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
1871
1898
  }
1872
1899
 
1873
- size_t ZSTD_compressBlock_greedy_dictMatchState_row(
1900
+ size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
1874
1901
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1875
1902
  void const* src, size_t srcSize)
1876
1903
  {
1877
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
1904
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
1878
1905
  }
1879
1906
 
1880
-
1881
1907
  size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
1882
1908
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1883
1909
  void const* src, size_t srcSize)
1884
1910
  {
1885
1911
  return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
1886
1912
  }
1913
+ #endif
1887
1914
 
1888
- size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
1915
+ #ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
1916
+ size_t ZSTD_compressBlock_btlazy2(
1889
1917
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1890
1918
  void const* src, size_t srcSize)
1891
1919
  {
1892
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
1920
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
1893
1921
  }
1894
1922
 
1895
- size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
1923
+ size_t ZSTD_compressBlock_btlazy2_dictMatchState(
1896
1924
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1897
1925
  void const* src, size_t srcSize)
1898
1926
  {
1899
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
1927
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
1900
1928
  }
1929
+ #endif
1901
1930
 
1931
+ #if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
1932
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
1933
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
1934
+ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
1902
1935
  FORCE_INLINE_TEMPLATE
1936
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
1903
1937
  size_t ZSTD_compressBlock_lazy_extDict_generic(
1904
1938
  ZSTD_matchState_t* ms, seqStore_t* seqStore,
1905
1939
  U32 rep[ZSTD_REP_NUM],
@@ -2101,8 +2135,9 @@ _storeSequence:
2101
2135
  /* Return the last literals size */
2102
2136
  return (size_t)(iend - anchor);
2103
2137
  }
2138
+ #endif /* build exclusions */
2104
2139
 
2105
-
2140
+ #ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
2106
2141
  size_t ZSTD_compressBlock_greedy_extDict(
2107
2142
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2108
2143
  void const* src, size_t srcSize)
@@ -2110,48 +2145,55 @@ size_t ZSTD_compressBlock_greedy_extDict(
2110
2145
  return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
2111
2146
  }
2112
2147
 
2113
- size_t ZSTD_compressBlock_lazy_extDict(
2148
+ size_t ZSTD_compressBlock_greedy_extDict_row(
2114
2149
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2115
2150
  void const* src, size_t srcSize)
2116
-
2117
2151
  {
2118
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
2152
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
2119
2153
  }
2154
+ #endif
2120
2155
 
2121
- size_t ZSTD_compressBlock_lazy2_extDict(
2156
+ #ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
2157
+ size_t ZSTD_compressBlock_lazy_extDict(
2122
2158
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2123
2159
  void const* src, size_t srcSize)
2124
2160
 
2125
2161
  {
2126
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
2162
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
2127
2163
  }
2128
2164
 
2129
- size_t ZSTD_compressBlock_btlazy2_extDict(
2165
+ size_t ZSTD_compressBlock_lazy_extDict_row(
2130
2166
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2131
2167
  void const* src, size_t srcSize)
2132
2168
 
2133
2169
  {
2134
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
2170
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
2135
2171
  }
2172
+ #endif
2136
2173
 
2137
- size_t ZSTD_compressBlock_greedy_extDict_row(
2174
+ #ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
2175
+ size_t ZSTD_compressBlock_lazy2_extDict(
2138
2176
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2139
2177
  void const* src, size_t srcSize)
2178
+
2140
2179
  {
2141
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
2180
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
2142
2181
  }
2143
2182
 
2144
- size_t ZSTD_compressBlock_lazy_extDict_row(
2183
+ size_t ZSTD_compressBlock_lazy2_extDict_row(
2145
2184
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2146
2185
  void const* src, size_t srcSize)
2147
-
2148
2186
  {
2149
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
2187
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
2150
2188
  }
2189
+ #endif
2151
2190
 
2152
- size_t ZSTD_compressBlock_lazy2_extDict_row(
2191
+ #ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
2192
+ size_t ZSTD_compressBlock_btlazy2_extDict(
2153
2193
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2154
2194
  void const* src, size_t srcSize)
2195
+
2155
2196
  {
2156
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
2197
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
2157
2198
  }
2199
+ #endif