zstdlib 0.6.0-x64-mingw32 → 0.9.0-x64-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +20 -0
  3. data/README.md +7 -1
  4. data/Rakefile +38 -8
  5. data/ext/{zstdlib → zstdlib_c}/extconf.rb +10 -5
  6. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.2/zstdlib.c +2 -2
  7. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.3/zstdlib.c +2 -2
  8. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.4/zstdlib.c +2 -2
  9. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.5/zstdlib.c +2 -2
  10. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.6/zstdlib.c +2 -2
  11. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.7/zstdlib.c +2 -2
  12. data/ext/zstdlib_c/ruby/zlib-3.0/zstdlib.c +4994 -0
  13. data/ext/zstdlib_c/ruby/zlib-3.1/zstdlib.c +5076 -0
  14. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/adler32.c +0 -0
  15. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/compress.c +0 -0
  16. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/crc32.c +0 -0
  17. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/crc32.h +0 -0
  18. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/deflate.c +0 -0
  19. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/deflate.h +0 -0
  20. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzclose.c +0 -0
  21. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzguts.h +0 -0
  22. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzlib.c +0 -0
  23. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzread.c +0 -0
  24. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzwrite.c +0 -0
  25. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/infback.c +0 -0
  26. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffast.c +0 -0
  27. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffast.h +0 -0
  28. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffixed.h +0 -0
  29. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inflate.c +0 -0
  30. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inflate.h +0 -0
  31. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inftrees.c +0 -0
  32. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inftrees.h +0 -0
  33. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/trees.c +0 -0
  34. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/trees.h +0 -0
  35. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/uncompr.c +0 -0
  36. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zconf.h +0 -0
  37. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zlib.h +0 -0
  38. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zutil.c +0 -0
  39. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zutil.h +0 -0
  40. data/ext/{zstdlib → zstdlib_c}/zlib.mk +0 -0
  41. data/ext/{zstdlib → zstdlib_c}/zlibwrapper/zlibwrapper.c +1 -5
  42. data/ext/{zstdlib → zstdlib_c}/zlibwrapper.mk +0 -0
  43. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/bitstream.h +75 -57
  44. data/ext/zstdlib_c/zstd-1.5.2/lib/common/compiler.h +335 -0
  45. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/cpu.h +1 -3
  46. data/ext/zstdlib_c/zstd-1.5.2/lib/common/debug.c +24 -0
  47. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/debug.h +22 -49
  48. data/ext/zstdlib_c/zstd-1.5.2/lib/common/entropy_common.c +368 -0
  49. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/error_private.c +3 -1
  50. data/ext/zstdlib_c/zstd-1.5.2/lib/common/error_private.h +159 -0
  51. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/fse.h +51 -42
  52. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/fse_decompress.c +149 -57
  53. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/huf.h +60 -54
  54. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/mem.h +87 -98
  55. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/pool.c +34 -23
  56. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/pool.h +5 -5
  57. data/ext/zstdlib_c/zstd-1.5.2/lib/common/portability_macros.h +137 -0
  58. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/threading.c +10 -8
  59. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/threading.h +4 -3
  60. data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.c +24 -0
  61. data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.h +5686 -0
  62. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_common.c +10 -10
  63. data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_deps.h +111 -0
  64. data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_internal.h +493 -0
  65. data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_trace.h +163 -0
  66. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/clevels.h +134 -0
  67. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/fse_compress.c +105 -85
  68. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.c +41 -63
  69. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.h +13 -33
  70. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/huf_compress.c +1370 -0
  71. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_compress.c +6327 -0
  72. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_internal.h +537 -82
  73. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.c +21 -16
  74. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.h +4 -2
  75. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.c +61 -34
  76. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.h +10 -3
  77. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_compress_superblock.c +573 -0
  78. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_compress_superblock.h +32 -0
  79. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_cwksp.h +236 -95
  80. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.c +321 -143
  81. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.h +2 -2
  82. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.c +328 -137
  83. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.h +2 -2
  84. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_lazy.c +2104 -0
  85. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_lazy.h +125 -0
  86. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.c +336 -209
  87. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.h +15 -3
  88. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_ldm_geartab.h +106 -0
  89. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.c +439 -239
  90. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.h +1 -1
  91. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstdmt_compress.c +205 -462
  92. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstdmt_compress.h +113 -0
  93. data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/huf_decompress.c +1889 -0
  94. data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/huf_decompress_amd64.S +585 -0
  95. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.c +20 -16
  96. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.h +3 -3
  97. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress.c +691 -230
  98. data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/zstd_decompress_block.c +2072 -0
  99. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_block.h +16 -7
  100. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_internal.h +71 -10
  101. data/ext/zstdlib_c/zstd-1.5.2/lib/zdict.h +452 -0
  102. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/zstd.h +760 -234
  103. data/ext/{zstdlib/zstd-1.4.4/lib/common → zstdlib_c/zstd-1.5.2/lib}/zstd_errors.h +3 -1
  104. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzclose.c +0 -0
  105. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzcompatibility.h +1 -1
  106. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzguts.h +0 -0
  107. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzlib.c +0 -0
  108. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzread.c +0 -0
  109. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzwrite.c +0 -0
  110. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.c +133 -44
  111. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.h +1 -1
  112. data/ext/zstdlib_c/zstd.mk +15 -0
  113. data/lib/2.4/zstdlib_c.so +0 -0
  114. data/lib/2.5/zstdlib_c.so +0 -0
  115. data/lib/2.6/zstdlib_c.so +0 -0
  116. data/lib/2.7/zstdlib_c.so +0 -0
  117. data/lib/3.0/zstdlib_c.so +0 -0
  118. data/lib/zstdlib.rb +2 -2
  119. metadata +124 -114
  120. data/ext/zstdlib/zstd-1.4.4/lib/common/compiler.h +0 -159
  121. data/ext/zstdlib/zstd-1.4.4/lib/common/debug.c +0 -44
  122. data/ext/zstdlib/zstd-1.4.4/lib/common/entropy_common.c +0 -236
  123. data/ext/zstdlib/zstd-1.4.4/lib/common/error_private.h +0 -76
  124. data/ext/zstdlib/zstd-1.4.4/lib/common/xxhash.c +0 -882
  125. data/ext/zstdlib/zstd-1.4.4/lib/common/xxhash.h +0 -305
  126. data/ext/zstdlib/zstd-1.4.4/lib/common/zstd_internal.h +0 -350
  127. data/ext/zstdlib/zstd-1.4.4/lib/compress/huf_compress.c +0 -798
  128. data/ext/zstdlib/zstd-1.4.4/lib/compress/zstd_compress.c +0 -4103
  129. data/ext/zstdlib/zstd-1.4.4/lib/compress/zstd_lazy.c +0 -1115
  130. data/ext/zstdlib/zstd-1.4.4/lib/compress/zstd_lazy.h +0 -67
  131. data/ext/zstdlib/zstd-1.4.4/lib/compress/zstdmt_compress.h +0 -192
  132. data/ext/zstdlib/zstd-1.4.4/lib/decompress/huf_decompress.c +0 -1234
  133. data/ext/zstdlib/zstd-1.4.4/lib/decompress/zstd_decompress_block.c +0 -1323
  134. data/ext/zstdlib/zstd.mk +0 -14
  135. data/lib/2.2/zstdlib.so +0 -0
  136. data/lib/2.3/zstdlib.so +0 -0
  137. data/lib/2.4/zstdlib.so +0 -0
  138. data/lib/2.5/zstdlib.so +0 -0
  139. data/lib/2.6/zstdlib.so +0 -0
  140. data/lib/2.7/zstdlib.so +0 -0
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -14,7 +14,6 @@
14
14
 
15
15
 
16
16
  #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
17
- #define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */
18
17
  #define ZSTD_MAX_PRICE (1<<30)
19
18
 
20
19
  #define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
@@ -24,11 +23,11 @@
24
23
  * Price functions for optimal parser
25
24
  ***************************************/
26
25
 
27
- #if 0 /* approximation at bit level */
26
+ #if 0 /* approximation at bit level (for tests) */
28
27
  # define BITCOST_ACCURACY 0
29
28
  # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
30
- # define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat))
31
- #elif 0 /* fractional bit accuracy */
29
+ # define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat))
30
+ #elif 0 /* fractional bit accuracy (for tests) */
32
31
  # define BITCOST_ACCURACY 8
33
32
  # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
34
33
  # define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
@@ -66,7 +65,7 @@ MEM_STATIC double ZSTD_fCost(U32 price)
66
65
 
67
66
  static int ZSTD_compressedLiterals(optState_t const* const optPtr)
68
67
  {
69
- return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
68
+ return optPtr->literalCompressionMode != ZSTD_ps_disable;
70
69
  }
71
70
 
72
71
  static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
@@ -79,25 +78,46 @@ static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
79
78
  }
80
79
 
81
80
 
82
- /* ZSTD_downscaleStat() :
83
- * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
84
- * return the resulting sum of elements */
85
- static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
81
+ static U32 sum_u32(const unsigned table[], size_t nbElts)
82
+ {
83
+ size_t n;
84
+ U32 total = 0;
85
+ for (n=0; n<nbElts; n++) {
86
+ total += table[n];
87
+ }
88
+ return total;
89
+ }
90
+
91
+ static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift)
86
92
  {
87
93
  U32 s, sum=0;
88
- DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
89
- assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
94
+ DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift);
95
+ assert(shift < 30);
90
96
  for (s=0; s<lastEltIndex+1; s++) {
91
- table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
97
+ table[s] = 1 + (table[s] >> shift);
92
98
  sum += table[s];
93
99
  }
94
100
  return sum;
95
101
  }
96
102
 
103
+ /* ZSTD_scaleStats() :
104
+ * reduce all elements in table is sum too large
105
+ * return the resulting sum of elements */
106
+ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
107
+ {
108
+ U32 const prevsum = sum_u32(table, lastEltIndex+1);
109
+ U32 const factor = prevsum >> logTarget;
110
+ DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
111
+ assert(logTarget < 30);
112
+ if (factor <= 1) return prevsum;
113
+ return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor));
114
+ }
115
+
97
116
  /* ZSTD_rescaleFreqs() :
98
117
  * if first block (detected by optPtr->litLengthSum == 0) : init statistics
99
118
  * take hints from dictionary if there is one
100
- * or init from zero, using src for literals stats, or flat 1 for match symbols
119
+ * and init from zero if there is none,
120
+ * using src for literals stats, and baseline stats for sequence symbols
101
121
  * otherwise downscale existing stats, to be used as seed for next block.
102
122
  */
103
123
  static void
@@ -126,7 +146,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
126
146
  optPtr->litSum = 0;
127
147
  for (lit=0; lit<=MaxLit; lit++) {
128
148
  U32 const scaleLog = 11; /* scale to 2K */
129
- U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
149
+ U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit);
130
150
  assert(bitCost <= scaleLog);
131
151
  optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
132
152
  optPtr->litSum += optPtr->litFreq[lit];
@@ -174,14 +194,19 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
174
194
  if (compressedLiterals) {
175
195
  unsigned lit = MaxLit;
176
196
  HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
177
- optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
197
+ optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8);
178
198
  }
179
199
 
180
- { unsigned ll;
181
- for (ll=0; ll<=MaxLL; ll++)
182
- optPtr->litLengthFreq[ll] = 1;
200
+ { unsigned const baseLLfreqs[MaxLL+1] = {
201
+ 4, 2, 1, 1, 1, 1, 1, 1,
202
+ 1, 1, 1, 1, 1, 1, 1, 1,
203
+ 1, 1, 1, 1, 1, 1, 1, 1,
204
+ 1, 1, 1, 1, 1, 1, 1, 1,
205
+ 1, 1, 1, 1
206
+ };
207
+ ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
208
+ optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
183
209
  }
184
- optPtr->litLengthSum = MaxLL+1;
185
210
 
186
211
  { unsigned ml;
187
212
  for (ml=0; ml<=MaxML; ml++)
@@ -189,21 +214,26 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
189
214
  }
190
215
  optPtr->matchLengthSum = MaxML+1;
191
216
 
192
- { unsigned of;
193
- for (of=0; of<=MaxOff; of++)
194
- optPtr->offCodeFreq[of] = 1;
217
+ { unsigned const baseOFCfreqs[MaxOff+1] = {
218
+ 6, 2, 1, 1, 2, 3, 4, 4,
219
+ 4, 3, 2, 1, 1, 1, 1, 1,
220
+ 1, 1, 1, 1, 1, 1, 1, 1,
221
+ 1, 1, 1, 1, 1, 1, 1, 1
222
+ };
223
+ ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
224
+ optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
195
225
  }
196
- optPtr->offCodeSum = MaxOff+1;
226
+
197
227
 
198
228
  }
199
229
 
200
230
  } else { /* new block : re-use previous statistics, scaled down */
201
231
 
202
232
  if (compressedLiterals)
203
- optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
204
- optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
205
- optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
206
- optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
233
+ optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
234
+ optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
235
+ optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
236
+ optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
207
237
  }
208
238
 
209
239
  ZSTD_setBasePrices(optPtr, optLevel);
@@ -239,7 +269,16 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
239
269
  * cost of literalLength symbol */
240
270
  static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
241
271
  {
242
- if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
272
+ assert(litLength <= ZSTD_BLOCKSIZE_MAX);
273
+ if (optPtr->priceType == zop_predef)
274
+ return WEIGHT(litLength, optLevel);
275
+ /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
276
+ * because it isn't representable in the zstd format. So instead just
277
+ * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block
278
+ * would be all literals.
279
+ */
280
+ if (litLength == ZSTD_BLOCKSIZE_MAX)
281
+ return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
243
282
 
244
283
  /* dynamic statistics */
245
284
  { U32 const llCode = ZSTD_LLcode(litLength);
@@ -249,52 +288,20 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
249
288
  }
250
289
  }
251
290
 
252
- /* ZSTD_litLengthContribution() :
253
- * @return ( cost(litlength) - cost(0) )
254
- * this value can then be added to rawLiteralsCost()
255
- * to provide a cost which is directly comparable to a match ending at same position */
256
- static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)
257
- {
258
- if (optPtr->priceType >= zop_predef) return (int)WEIGHT(litLength, optLevel);
259
-
260
- /* dynamic statistics */
261
- { U32 const llCode = ZSTD_LLcode(litLength);
262
- int const contribution = (int)(LL_bits[llCode] * BITCOST_MULTIPLIER)
263
- + (int)WEIGHT(optPtr->litLengthFreq[0], optLevel) /* note: log2litLengthSum cancel out */
264
- - (int)WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
265
- #if 1
266
- return contribution;
267
- #else
268
- return MAX(0, contribution); /* sometimes better, sometimes not ... */
269
- #endif
270
- }
271
- }
272
-
273
- /* ZSTD_literalsContribution() :
274
- * creates a fake cost for the literals part of a sequence
275
- * which can be compared to the ending cost of a match
276
- * should a new match start at this position */
277
- static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
278
- const optState_t* const optPtr,
279
- int optLevel)
280
- {
281
- int const contribution = (int)ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
282
- + ZSTD_litLengthContribution(litLength, optPtr, optLevel);
283
- return contribution;
284
- }
285
-
286
291
  /* ZSTD_getMatchPrice() :
287
292
  * Provides the cost of the match part (offset + matchLength) of a sequence
288
293
  * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
289
- * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
294
+ * @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2
295
+ * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
296
+ */
290
297
  FORCE_INLINE_TEMPLATE U32
291
- ZSTD_getMatchPrice(U32 const offset,
298
+ ZSTD_getMatchPrice(U32 const offcode,
292
299
  U32 const matchLength,
293
300
  const optState_t* const optPtr,
294
301
  int const optLevel)
295
302
  {
296
303
  U32 price;
297
- U32 const offCode = ZSTD_highbit32(offset+1);
304
+ U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode));
298
305
  U32 const mlBase = matchLength - MINMATCH;
299
306
  assert(matchLength >= MINMATCH);
300
307
 
@@ -337,8 +344,8 @@ static void ZSTD_updateStats(optState_t* const optPtr,
337
344
  optPtr->litLengthSum++;
338
345
  }
339
346
 
340
- /* match offset code (0-2=>repCode; 3+=>offset+2) */
341
- { U32 const offCode = ZSTD_highbit32(offsetCode+1);
347
+ /* offset code : expected to follow storeSeq() numeric representation */
348
+ { U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode));
342
349
  assert(offCode <= MaxOff);
343
350
  optPtr->offCodeFreq[offCode]++;
344
351
  optPtr->offCodeSum++;
@@ -372,7 +379,7 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
372
379
 
373
380
  /* Update hashTable3 up to ip (excluded)
374
381
  Assumption : always within prefix (i.e. not within extDict) */
375
- static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
382
+ static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
376
383
  U32* nextToUpdate3,
377
384
  const BYTE* const ip)
378
385
  {
@@ -398,11 +405,13 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
398
405
  * Binary Tree search
399
406
  ***************************************/
400
407
  /** ZSTD_insertBt1() : add one or multiple positions to tree.
401
- * ip : assumed <= iend-8 .
408
+ * @param ip assumed <= iend-8 .
409
+ * @param target The target of ZSTD_updateTree_internal() - we are filling to this position
402
410
  * @return : nb of positions added */
403
411
  static U32 ZSTD_insertBt1(
404
- ZSTD_matchState_t* ms,
412
+ const ZSTD_matchState_t* ms,
405
413
  const BYTE* const ip, const BYTE* const iend,
414
+ U32 const target,
406
415
  U32 const mls, const int extDict)
407
416
  {
408
417
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -420,32 +429,36 @@ static U32 ZSTD_insertBt1(
420
429
  const BYTE* const dictEnd = dictBase + dictLimit;
421
430
  const BYTE* const prefixStart = base + dictLimit;
422
431
  const BYTE* match;
423
- const U32 current = (U32)(ip-base);
424
- const U32 btLow = btMask >= current ? 0 : current - btMask;
425
- U32* smallerPtr = bt + 2*(current&btMask);
432
+ const U32 curr = (U32)(ip-base);
433
+ const U32 btLow = btMask >= curr ? 0 : curr - btMask;
434
+ U32* smallerPtr = bt + 2*(curr&btMask);
426
435
  U32* largerPtr = smallerPtr + 1;
427
436
  U32 dummy32; /* to be nullified at the end */
428
- U32 const windowLow = ms->window.lowLimit;
429
- U32 matchEndIdx = current+8+1;
437
+ /* windowLow is based on target because
438
+ * we only need positions that will be in the window at the end of the tree update.
439
+ */
440
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
441
+ U32 matchEndIdx = curr+8+1;
430
442
  size_t bestLength = 8;
431
443
  U32 nbCompares = 1U << cParams->searchLog;
432
444
  #ifdef ZSTD_C_PREDICT
433
- U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
434
- U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
445
+ U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
446
+ U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
435
447
  predictedSmall += (predictedSmall>0);
436
448
  predictedLarge += (predictedLarge>0);
437
449
  #endif /* ZSTD_C_PREDICT */
438
450
 
439
- DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
451
+ DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
440
452
 
453
+ assert(curr <= target);
441
454
  assert(ip <= iend-8); /* required for h calculation */
442
- hashTable[h] = current; /* Update Hash Table */
455
+ hashTable[h] = curr; /* Update Hash Table */
443
456
 
444
457
  assert(windowLow > 0);
445
- while (nbCompares-- && (matchIndex >= windowLow)) {
458
+ for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
446
459
  U32* const nextPtr = bt + 2*(matchIndex & btMask);
447
460
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
448
- assert(matchIndex < current);
461
+ assert(matchIndex < curr);
449
462
 
450
463
  #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
451
464
  const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
@@ -508,8 +521,8 @@ static U32 ZSTD_insertBt1(
508
521
  *smallerPtr = *largerPtr = 0;
509
522
  { U32 positions = 0;
510
523
  if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
511
- assert(matchEndIdx > current + 8);
512
- return MAX(positions, matchEndIdx - (current + 8));
524
+ assert(matchEndIdx > curr + 8);
525
+ return MAX(positions, matchEndIdx - (curr + 8));
513
526
  }
514
527
  }
515
528
 
@@ -526,7 +539,7 @@ void ZSTD_updateTree_internal(
526
539
  idx, target, dictMode);
527
540
 
528
541
  while(idx < target) {
529
- U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
542
+ U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
530
543
  assert(idx < (U32)(idx + forward));
531
544
  idx += forward;
532
545
  }
@@ -553,7 +566,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
553
566
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
554
567
  U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
555
568
  const BYTE* const base = ms->window.base;
556
- U32 const current = (U32)(ip-base);
569
+ U32 const curr = (U32)(ip-base);
557
570
  U32 const hashLog = cParams->hashLog;
558
571
  U32 const minMatch = (mls==3) ? 3 : 4;
559
572
  U32* const hashTable = ms->hashTable;
@@ -567,12 +580,12 @@ U32 ZSTD_insertBtAndGetAllMatches (
567
580
  U32 const dictLimit = ms->window.dictLimit;
568
581
  const BYTE* const dictEnd = dictBase + dictLimit;
569
582
  const BYTE* const prefixStart = base + dictLimit;
570
- U32 const btLow = (btMask >= current) ? 0 : current - btMask;
571
- U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
583
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
584
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
572
585
  U32 const matchLow = windowLow ? windowLow : 1;
573
- U32* smallerPtr = bt + 2*(current&btMask);
574
- U32* largerPtr = bt + 2*(current&btMask) + 1;
575
- U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */
586
+ U32* smallerPtr = bt + 2*(curr&btMask);
587
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
588
+ U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
576
589
  U32 dummy32; /* to be nullified at the end */
577
590
  U32 mnum = 0;
578
591
  U32 nbCompares = 1U << cParams->searchLog;
@@ -591,7 +604,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
591
604
  U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
592
605
 
593
606
  size_t bestLength = lengthToBeat-1;
594
- DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current);
607
+ DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
595
608
 
596
609
  /* check repCode */
597
610
  assert(ll0 <= 1); /* necessarily 1 or 0 */
@@ -599,26 +612,29 @@ U32 ZSTD_insertBtAndGetAllMatches (
599
612
  U32 repCode;
600
613
  for (repCode = ll0; repCode < lastR; repCode++) {
601
614
  U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
602
- U32 const repIndex = current - repOffset;
615
+ U32 const repIndex = curr - repOffset;
603
616
  U32 repLen = 0;
604
- assert(current >= dictLimit);
605
- if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */
606
- if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) {
617
+ assert(curr >= dictLimit);
618
+ if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
619
+ /* We must validate the repcode offset because when we're using a dictionary the
620
+ * valid offset range shrinks when the dictionary goes out of bounds.
621
+ */
622
+ if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
607
623
  repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
608
624
  }
609
- } else { /* repIndex < dictLimit || repIndex >= current */
625
+ } else { /* repIndex < dictLimit || repIndex >= curr */
610
626
  const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
611
627
  dmsBase + repIndex - dmsIndexDelta :
612
628
  dictBase + repIndex;
613
- assert(current >= windowLow);
629
+ assert(curr >= windowLow);
614
630
  if ( dictMode == ZSTD_extDict
615
- && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */
631
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
616
632
  & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
617
633
  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
618
634
  repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
619
635
  }
620
636
  if (dictMode == ZSTD_dictMatchState
621
- && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `current > repIndex >= dmsLowLimit` */
637
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
622
638
  & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
623
639
  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
624
640
  repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
@@ -628,7 +644,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
628
644
  DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
629
645
  repCode, ll0, repOffset, repLen);
630
646
  bestLength = repLen;
631
- matches[mnum].off = repCode - ll0;
647
+ matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */
632
648
  matches[mnum].len = (U32)repLen;
633
649
  mnum++;
634
650
  if ( (repLen > sufficient_len)
@@ -640,7 +656,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
640
656
  if ((mls == 3) /*static*/ && (bestLength < mls)) {
641
657
  U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
642
658
  if ((matchIndex3 >= matchLow)
643
- & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
659
+ & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
644
660
  size_t mlen;
645
661
  if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
646
662
  const BYTE* const match = base + matchIndex3;
@@ -655,26 +671,26 @@ U32 ZSTD_insertBtAndGetAllMatches (
655
671
  DEBUGLOG(8, "found small match with hlog3, of length %u",
656
672
  (U32)mlen);
657
673
  bestLength = mlen;
658
- assert(current > matchIndex3);
674
+ assert(curr > matchIndex3);
659
675
  assert(mnum==0); /* no prior solution */
660
- matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE;
676
+ matches[0].off = STORE_OFFSET(curr - matchIndex3);
661
677
  matches[0].len = (U32)mlen;
662
678
  mnum = 1;
663
679
  if ( (mlen > sufficient_len) |
664
680
  (ip+mlen == iLimit) ) { /* best possible length */
665
- ms->nextToUpdate = current+1; /* skip insertion */
681
+ ms->nextToUpdate = curr+1; /* skip insertion */
666
682
  return 1;
667
683
  } } }
668
684
  /* no dictMatchState lookup: dicts don't have a populated HC3 table */
669
- }
685
+ } /* if (mls == 3) */
670
686
 
671
- hashTable[h] = current; /* Update Hash Table */
687
+ hashTable[h] = curr; /* Update Hash Table */
672
688
 
673
- while (nbCompares-- && (matchIndex >= matchLow)) {
689
+ for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
674
690
  U32* const nextPtr = bt + 2*(matchIndex & btMask);
675
691
  const BYTE* match;
676
692
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
677
- assert(current > matchIndex);
693
+ assert(curr > matchIndex);
678
694
 
679
695
  if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
680
696
  assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
@@ -691,20 +707,19 @@ U32 ZSTD_insertBtAndGetAllMatches (
691
707
 
692
708
  if (matchLength > bestLength) {
693
709
  DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
694
- (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
710
+ (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
695
711
  assert(matchEndIdx > matchIndex);
696
712
  if (matchLength > matchEndIdx - matchIndex)
697
713
  matchEndIdx = matchIndex + (U32)matchLength;
698
714
  bestLength = matchLength;
699
- matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
715
+ matches[mnum].off = STORE_OFFSET(curr - matchIndex);
700
716
  matches[mnum].len = (U32)matchLength;
701
717
  mnum++;
702
718
  if ( (matchLength > ZSTD_OPT_NUM)
703
719
  | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
704
720
  if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
705
721
  break; /* drop, to preserve bt consistency (miss a little bit of compression) */
706
- }
707
- }
722
+ } }
708
723
 
709
724
  if (match[matchLength] < ip[matchLength]) {
710
725
  /* match smaller than current */
@@ -723,12 +738,13 @@ U32 ZSTD_insertBtAndGetAllMatches (
723
738
 
724
739
  *smallerPtr = *largerPtr = 0;
725
740
 
741
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
726
742
  if (dictMode == ZSTD_dictMatchState && nbCompares) {
727
743
  size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
728
744
  U32 dictMatchIndex = dms->hashTable[dmsH];
729
745
  const U32* const dmsBt = dms->chainTable;
730
746
  commonLengthSmaller = commonLengthLarger = 0;
731
- while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
747
+ for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
732
748
  const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
733
749
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
734
750
  const BYTE* match = dmsBase + dictMatchIndex;
@@ -739,18 +755,17 @@ U32 ZSTD_insertBtAndGetAllMatches (
739
755
  if (matchLength > bestLength) {
740
756
  matchIndex = dictMatchIndex + dmsIndexDelta;
741
757
  DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
742
- (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
758
+ (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
743
759
  if (matchLength > matchEndIdx - matchIndex)
744
760
  matchEndIdx = matchIndex + (U32)matchLength;
745
761
  bestLength = matchLength;
746
- matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
762
+ matches[mnum].off = STORE_OFFSET(curr - matchIndex);
747
763
  matches[mnum].len = (U32)matchLength;
748
764
  mnum++;
749
765
  if ( (matchLength > ZSTD_OPT_NUM)
750
766
  | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
751
767
  break; /* drop, to guarantee consistency (miss a little bit of compression) */
752
- }
753
- }
768
+ } }
754
769
 
755
770
  if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
756
771
  if (match[matchLength] < ip[matchLength]) {
@@ -760,71 +775,242 @@ U32 ZSTD_insertBtAndGetAllMatches (
760
775
  /* match is larger than current */
761
776
  commonLengthLarger = matchLength;
762
777
  dictMatchIndex = nextPtr[0];
763
- }
764
- }
765
- }
778
+ } } } /* if (dictMode == ZSTD_dictMatchState) */
766
779
 
767
- assert(matchEndIdx > current+8);
780
+ assert(matchEndIdx > curr+8);
768
781
  ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
769
782
  return mnum;
770
783
  }
771
784
 
785
+ typedef U32 (*ZSTD_getAllMatchesFn)(
786
+ ZSTD_match_t*,
787
+ ZSTD_matchState_t*,
788
+ U32*,
789
+ const BYTE*,
790
+ const BYTE*,
791
+ const U32 rep[ZSTD_REP_NUM],
792
+ U32 const ll0,
793
+ U32 const lengthToBeat);
794
+
795
+ FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
796
+ ZSTD_match_t* matches,
797
+ ZSTD_matchState_t* ms,
798
+ U32* nextToUpdate3,
799
+ const BYTE* ip,
800
+ const BYTE* const iHighLimit,
801
+ const U32 rep[ZSTD_REP_NUM],
802
+ U32 const ll0,
803
+ U32 const lengthToBeat,
804
+ const ZSTD_dictMode_e dictMode,
805
+ const U32 mls)
806
+ {
807
+ assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
808
+ DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
809
+ if (ip < ms->window.base + ms->nextToUpdate)
810
+ return 0; /* skipped area */
811
+ ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
812
+ return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
813
+ }
814
+
815
+ #define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
816
+
817
+ #define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
818
+ static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
819
+ ZSTD_match_t* matches, \
820
+ ZSTD_matchState_t* ms, \
821
+ U32* nextToUpdate3, \
822
+ const BYTE* ip, \
823
+ const BYTE* const iHighLimit, \
824
+ const U32 rep[ZSTD_REP_NUM], \
825
+ U32 const ll0, \
826
+ U32 const lengthToBeat) \
827
+ { \
828
+ return ZSTD_btGetAllMatches_internal( \
829
+ matches, ms, nextToUpdate3, ip, iHighLimit, \
830
+ rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
831
+ }
832
+
833
+ #define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
834
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
835
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
836
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
837
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
838
+
839
+ GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
840
+ GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
841
+ GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
842
+
843
+ #define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
844
+ { \
845
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
846
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
847
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
848
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
849
+ }
772
850
 
773
- FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
774
- ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */
775
- ZSTD_matchState_t* ms,
776
- U32* nextToUpdate3,
777
- const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
778
- const U32 rep[ZSTD_REP_NUM],
779
- U32 const ll0,
780
- U32 const lengthToBeat)
851
+ static ZSTD_getAllMatchesFn
852
+ ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
781
853
  {
782
- const ZSTD_compressionParameters* const cParams = &ms->cParams;
783
- U32 const matchLengthSearch = cParams->minMatch;
784
- DEBUGLOG(8, "ZSTD_BtGetAllMatches");
785
- if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
786
- ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
787
- switch(matchLengthSearch)
788
- {
789
- case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
790
- default :
791
- case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
792
- case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
793
- case 7 :
794
- case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
854
+ ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
855
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
856
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
857
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
858
+ };
859
+ U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
860
+ assert((U32)dictMode < 3);
861
+ assert(mls - 3 < 4);
862
+ return getAllMatchesFns[(int)dictMode][mls - 3];
863
+ }
864
+
865
+ /*************************
866
+ * LDM helper functions *
867
+ *************************/
868
+
869
+ /* Struct containing info needed to make decision about ldm inclusion */
870
+ typedef struct {
871
+ rawSeqStore_t seqStore; /* External match candidates store for this block */
872
+ U32 startPosInBlock; /* Start position of the current match candidate */
873
+ U32 endPosInBlock; /* End position of the current match candidate */
874
+ U32 offset; /* Offset of the match candidate */
875
+ } ZSTD_optLdm_t;
876
+
877
+ /* ZSTD_optLdm_skipRawSeqStoreBytes():
878
+ * Moves forward in @rawSeqStore by @nbBytes,
879
+ * which will update the fields 'pos' and 'posInSequence'.
880
+ */
881
+ static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
882
+ {
883
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
884
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
885
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
886
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
887
+ currPos -= currSeq.litLength + currSeq.matchLength;
888
+ rawSeqStore->pos++;
889
+ } else {
890
+ rawSeqStore->posInSequence = currPos;
891
+ break;
892
+ }
893
+ }
894
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
895
+ rawSeqStore->posInSequence = 0;
896
+ }
897
+ }
898
+
899
+ /* ZSTD_opt_getNextMatchAndUpdateSeqStore():
900
+ * Calculates the beginning and end of the next match in the current block.
901
+ * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
902
+ */
903
+ static void
904
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
905
+ U32 blockBytesRemaining)
906
+ {
907
+ rawSeq currSeq;
908
+ U32 currBlockEndPos;
909
+ U32 literalsBytesRemaining;
910
+ U32 matchBytesRemaining;
911
+
912
+ /* Setting match end position to MAX to ensure we never use an LDM during this block */
913
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
914
+ optLdm->startPosInBlock = UINT_MAX;
915
+ optLdm->endPosInBlock = UINT_MAX;
916
+ return;
917
+ }
918
+ /* Calculate appropriate bytes left in matchLength and litLength
919
+ * after adjusting based on ldmSeqStore->posInSequence */
920
+ currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
921
+ assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
922
+ currBlockEndPos = currPosInBlock + blockBytesRemaining;
923
+ literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
924
+ currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
925
+ 0;
926
+ matchBytesRemaining = (literalsBytesRemaining == 0) ?
927
+ currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
928
+ currSeq.matchLength;
929
+
930
+ /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
931
+ if (literalsBytesRemaining >= blockBytesRemaining) {
932
+ optLdm->startPosInBlock = UINT_MAX;
933
+ optLdm->endPosInBlock = UINT_MAX;
934
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
935
+ return;
936
+ }
937
+
938
+ /* Matches may be < MINMATCH by this process. In that case, we will reject them
939
+ when we are deciding whether or not to add the ldm */
940
+ optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
941
+ optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
942
+ optLdm->offset = currSeq.offset;
943
+
944
+ if (optLdm->endPosInBlock > currBlockEndPos) {
945
+ /* Match ends after the block ends, we can't use the whole match */
946
+ optLdm->endPosInBlock = currBlockEndPos;
947
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
948
+ } else {
949
+ /* Consume nb of bytes equal to size of sequence left */
950
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
795
951
  }
796
952
  }
797
953
 
954
+ /* ZSTD_optLdm_maybeAddMatch():
955
+ * Adds a match if it's long enough,
956
+ * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
957
+ * into 'matches'. Maintains the correct ordering of 'matches'.
958
+ */
959
+ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
960
+ const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
961
+ {
962
+ U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
963
+ /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
964
+ U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
965
+
966
+ /* Ensure that current block position is not outside of the match */
967
+ if (currPosInBlock < optLdm->startPosInBlock
968
+ || currPosInBlock >= optLdm->endPosInBlock
969
+ || candidateMatchLength < MINMATCH) {
970
+ return;
971
+ }
798
972
 
799
- /*-*******************************
800
- * Optimal parser
801
- *********************************/
802
- typedef struct repcodes_s {
803
- U32 rep[3];
804
- } repcodes_t;
973
+ if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
974
+ U32 const candidateOffCode = STORE_OFFSET(optLdm->offset);
975
+ DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
976
+ candidateOffCode, candidateMatchLength, currPosInBlock);
977
+ matches[*nbMatches].len = candidateMatchLength;
978
+ matches[*nbMatches].off = candidateOffCode;
979
+ (*nbMatches)++;
980
+ }
981
+ }
805
982
 
806
- static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
983
+ /* ZSTD_optLdm_processMatchCandidate():
984
+ * Wrapper function to update ldm seq store and call ldm functions as necessary.
985
+ */
986
+ static void
987
+ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
988
+ ZSTD_match_t* matches, U32* nbMatches,
989
+ U32 currPosInBlock, U32 remainingBytes)
807
990
  {
808
- repcodes_t newReps;
809
- if (offset >= ZSTD_REP_NUM) { /* full offset */
810
- newReps.rep[2] = rep[1];
811
- newReps.rep[1] = rep[0];
812
- newReps.rep[0] = offset - ZSTD_REP_MOVE;
813
- } else { /* repcode */
814
- U32 const repCode = offset + ll0;
815
- if (repCode > 0) { /* note : if repCode==0, no change */
816
- U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
817
- newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
818
- newReps.rep[1] = rep[0];
819
- newReps.rep[0] = currentOffset;
820
- } else { /* repCode == 0 */
821
- memcpy(&newReps, rep, sizeof(newReps));
991
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
992
+ return;
993
+ }
994
+
995
+ if (currPosInBlock >= optLdm->endPosInBlock) {
996
+ if (currPosInBlock > optLdm->endPosInBlock) {
997
+ /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
998
+ * at the end of a match from the ldm seq store, and will often be some bytes
999
+ * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
1000
+ */
1001
+ U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock;
1002
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
822
1003
  }
1004
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
823
1005
  }
824
- return newReps;
1006
+ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
825
1007
  }
826
1008
 
827
1009
 
1010
+ /*-*******************************
1011
+ * Optimal parser
1012
+ *********************************/
1013
+
828
1014
  static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
829
1015
  {
830
1016
  return sol.litlen + sol.mlen;
@@ -839,7 +1025,7 @@ listStats(const U32* table, int lastEltID)
839
1025
  int enb;
840
1026
  for (enb=0; enb < nbElts; enb++) {
841
1027
  (void)table;
842
- //RAWLOG(2, "%3i:%3i, ", enb, table[enb]);
1028
+ /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
843
1029
  RAWLOG(2, "%4i,", table[enb]);
844
1030
  }
845
1031
  RAWLOG(2, " \n");
@@ -865,6 +1051,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
865
1051
  const BYTE* const prefixStart = base + ms->window.dictLimit;
866
1052
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
867
1053
 
1054
+ ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
1055
+
868
1056
  U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
869
1057
  U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
870
1058
  U32 nextToUpdate3 = ms->nextToUpdate;
@@ -872,6 +1060,11 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
872
1060
  ZSTD_optimal_t* const opt = optStatePtr->priceTable;
873
1061
  ZSTD_match_t* const matches = optStatePtr->matchTable;
874
1062
  ZSTD_optimal_t lastSequence;
1063
+ ZSTD_optLdm_t optLdm;
1064
+
1065
+ optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
1066
+ optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
1067
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
875
1068
 
876
1069
  /* init */
877
1070
  DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
@@ -887,25 +1080,32 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
887
1080
  /* find first match */
888
1081
  { U32 const litlen = (U32)(ip - anchor);
889
1082
  U32 const ll0 = !litlen;
890
- U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
1083
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
1084
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
1085
+ (U32)(ip-istart), (U32)(iend - ip));
891
1086
  if (!nbMatches) { ip++; continue; }
892
1087
 
893
1088
  /* initialize opt[0] */
894
1089
  { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
895
1090
  opt[0].mlen = 0; /* means is_a_literal */
896
1091
  opt[0].litlen = litlen;
897
- opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel);
1092
+ /* We don't need to include the actual price of the literals because
1093
+ * it is static for the duration of the forward pass, and is included
1094
+ * in every price. We include the literal length to avoid negative
1095
+ * prices when we subtract the previous literal length.
1096
+ */
1097
+ opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
898
1098
 
899
1099
  /* large match -> immediate encoding */
900
1100
  { U32 const maxML = matches[nbMatches-1].len;
901
- U32 const maxOffset = matches[nbMatches-1].off;
1101
+ U32 const maxOffcode = matches[nbMatches-1].off;
902
1102
  DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
903
- nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
1103
+ nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart));
904
1104
 
905
1105
  if (maxML > sufficient_len) {
906
1106
  lastSequence.litlen = litlen;
907
1107
  lastSequence.mlen = maxML;
908
- lastSequence.off = maxOffset;
1108
+ lastSequence.off = maxOffcode;
909
1109
  DEBUGLOG(6, "large match (%u>%u), immediate encoding",
910
1110
  maxML, sufficient_len);
911
1111
  cur = 0;
@@ -914,27 +1114,25 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
914
1114
  } }
915
1115
 
916
1116
  /* set prices for first matches starting position == 0 */
917
- { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
1117
+ assert(opt[0].price >= 0);
1118
+ { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
918
1119
  U32 pos;
919
1120
  U32 matchNb;
920
1121
  for (pos = 1; pos < minMatch; pos++) {
921
1122
  opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
922
1123
  }
923
1124
  for (matchNb = 0; matchNb < nbMatches; matchNb++) {
924
- U32 const offset = matches[matchNb].off;
1125
+ U32 const offcode = matches[matchNb].off;
925
1126
  U32 const end = matches[matchNb].len;
926
- repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
927
1127
  for ( ; pos <= end ; pos++ ) {
928
- U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
1128
+ U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
929
1129
  U32 const sequencePrice = literalsPrice + matchPrice;
930
1130
  DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
931
1131
  pos, ZSTD_fCost(sequencePrice));
932
1132
  opt[pos].mlen = pos;
933
- opt[pos].off = offset;
1133
+ opt[pos].off = offcode;
934
1134
  opt[pos].litlen = litlen;
935
- opt[pos].price = sequencePrice;
936
- ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
937
- memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
1135
+ opt[pos].price = (int)sequencePrice;
938
1136
  } }
939
1137
  last_pos = pos-1;
940
1138
  }
@@ -949,9 +1147,9 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
949
1147
  /* Fix current position with one literal if cheaper */
950
1148
  { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
951
1149
  int const price = opt[cur-1].price
952
- + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
953
- + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
954
- - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
1150
+ + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
1151
+ + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
1152
+ - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
955
1153
  assert(price < 1000000000); /* overflow check */
956
1154
  if (price <= opt[cur].price) {
957
1155
  DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
@@ -961,7 +1159,6 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
961
1159
  opt[cur].off = 0;
962
1160
  opt[cur].litlen = litlen;
963
1161
  opt[cur].price = price;
964
- memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
965
1162
  } else {
966
1163
  DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
967
1164
  inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
@@ -969,6 +1166,21 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
969
1166
  }
970
1167
  }
971
1168
 
1169
+ /* Set the repcodes of the current position. We must do it here
1170
+ * because we rely on the repcodes of the 2nd to last sequence being
1171
+ * correct to set the next chunks repcodes during the backward
1172
+ * traversal.
1173
+ */
1174
+ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
1175
+ assert(cur >= opt[cur].mlen);
1176
+ if (opt[cur].mlen != 0) {
1177
+ U32 const prev = cur - opt[cur].mlen;
1178
+ repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
1179
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
1180
+ } else {
1181
+ ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
1182
+ }
1183
+
972
1184
  /* last match must start at a minimum distance of 8 from oend */
973
1185
  if (inr > ilimit) continue;
974
1186
 
@@ -980,12 +1192,17 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
980
1192
  continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
981
1193
  }
982
1194
 
1195
+ assert(opt[cur].price >= 0);
983
1196
  { U32 const ll0 = (opt[cur].mlen != 0);
984
1197
  U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
985
- U32 const previousPrice = opt[cur].price;
1198
+ U32 const previousPrice = (U32)opt[cur].price;
986
1199
  U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
987
- U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
1200
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
988
1201
  U32 matchNb;
1202
+
1203
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
1204
+ (U32)(inr-istart), (U32)(iend-inr));
1205
+
989
1206
  if (!nbMatches) {
990
1207
  DEBUGLOG(7, "rPos:%u : no match found", cur);
991
1208
  continue;
@@ -1009,7 +1226,6 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
1009
1226
  /* set prices using matches found at position == cur */
1010
1227
  for (matchNb = 0; matchNb < nbMatches; matchNb++) {
1011
1228
  U32 const offset = matches[matchNb].off;
1012
- repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0);
1013
1229
  U32 const lastML = matches[matchNb].len;
1014
1230
  U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
1015
1231
  U32 mlen;
@@ -1019,7 +1235,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
1019
1235
 
1020
1236
  for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
1021
1237
  U32 const pos = cur + mlen;
1022
- int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
1238
+ int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
1023
1239
 
1024
1240
  if ((pos > last_pos) || (price < opt[pos].price)) {
1025
1241
  DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
@@ -1029,8 +1245,6 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
1029
1245
  opt[pos].off = offset;
1030
1246
  opt[pos].litlen = litlen;
1031
1247
  opt[pos].price = price;
1032
- ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
1033
- memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
1034
1248
  } else {
1035
1249
  DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
1036
1250
  pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
@@ -1046,6 +1260,17 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
1046
1260
  _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
1047
1261
  assert(opt[0].mlen == 0);
1048
1262
 
1263
+ /* Set the next chunk's repcodes based on the repcodes of the beginning
1264
+ * of the last match, and the last sequence. This avoids us having to
1265
+ * update them while traversing the sequences.
1266
+ */
1267
+ if (lastSequence.mlen != 0) {
1268
+ repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
1269
+ ZSTD_memcpy(rep, &reps, sizeof(reps));
1270
+ } else {
1271
+ ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
1272
+ }
1273
+
1049
1274
  { U32 const storeEnd = cur + 1;
1050
1275
  U32 storeStart = storeEnd;
1051
1276
  U32 seqPos = cur;
@@ -1082,67 +1307,44 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
1082
1307
  continue; /* will finish */
1083
1308
  }
1084
1309
 
1085
- /* repcodes update : like ZSTD_updateRep(), but update in place */
1086
- if (offCode >= ZSTD_REP_NUM) { /* full offset */
1087
- rep[2] = rep[1];
1088
- rep[1] = rep[0];
1089
- rep[0] = offCode - ZSTD_REP_MOVE;
1090
- } else { /* repcode */
1091
- U32 const repCode = offCode + (llen==0);
1092
- if (repCode) { /* note : if repCode==0, no change */
1093
- U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
1094
- if (repCode >= 2) rep[2] = rep[1];
1095
- rep[1] = rep[0];
1096
- rep[0] = currentOffset;
1097
- } }
1098
-
1099
1310
  assert(anchor + llen <= iend);
1100
1311
  ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
1101
- ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
1312
+ ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen);
1102
1313
  anchor += advance;
1103
1314
  ip = anchor;
1104
1315
  } }
1105
1316
  ZSTD_setBasePrices(optStatePtr, optLevel);
1106
1317
  }
1107
-
1108
1318
  } /* while (ip < ilimit) */
1109
1319
 
1110
1320
  /* Return the last literals size */
1111
1321
  return (size_t)(iend - anchor);
1112
1322
  }
1113
1323
 
1324
+ static size_t ZSTD_compressBlock_opt0(
1325
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1326
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
1327
+ {
1328
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
1329
+ }
1330
+
1331
+ static size_t ZSTD_compressBlock_opt2(
1332
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1333
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
1334
+ {
1335
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
1336
+ }
1114
1337
 
1115
1338
  size_t ZSTD_compressBlock_btopt(
1116
1339
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1117
1340
  const void* src, size_t srcSize)
1118
1341
  {
1119
1342
  DEBUGLOG(5, "ZSTD_compressBlock_btopt");
1120
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
1343
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
1121
1344
  }
1122
1345
 
1123
1346
 
1124
- /* used in 2-pass strategy */
1125
- static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
1126
- {
1127
- U32 s, sum=0;
1128
- assert(ZSTD_FREQ_DIV+bonus >= 0);
1129
- for (s=0; s<lastEltIndex+1; s++) {
1130
- table[s] <<= ZSTD_FREQ_DIV+bonus;
1131
- table[s]--;
1132
- sum += table[s];
1133
- }
1134
- return sum;
1135
- }
1136
1347
 
1137
- /* used in 2-pass strategy */
1138
- MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
1139
- {
1140
- if (ZSTD_compressedLiterals(optPtr))
1141
- optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
1142
- optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
1143
- optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
1144
- optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
1145
- }
1146
1348
 
1147
1349
  /* ZSTD_initStats_ultra():
1148
1350
  * make a first compression pass, just to seed stats with more accurate starting values.
@@ -1156,7 +1358,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
1156
1358
  const void* src, size_t srcSize)
1157
1359
  {
1158
1360
  U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
1159
- memcpy(tmpRep, rep, sizeof(tmpRep));
1361
+ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
1160
1362
 
1161
1363
  DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
1162
1364
  assert(ms->opt.litLengthSum == 0); /* first block */
@@ -1164,7 +1366,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
1164
1366
  assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
1165
1367
  assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
1166
1368
 
1167
- ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/
1369
+ ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
1168
1370
 
1169
1371
  /* invalidate first scan from history */
1170
1372
  ZSTD_resetSeqStore(seqStore);
@@ -1173,8 +1375,6 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
1173
1375
  ms->window.lowLimit = ms->window.dictLimit;
1174
1376
  ms->nextToUpdate = ms->window.dictLimit;
1175
1377
 
1176
- /* re-inforce weight of collected statistics */
1177
- ZSTD_upscaleStats(&ms->opt);
1178
1378
  }
1179
1379
 
1180
1380
  size_t ZSTD_compressBlock_btultra(
@@ -1182,14 +1382,14 @@ size_t ZSTD_compressBlock_btultra(
1182
1382
  const void* src, size_t srcSize)
1183
1383
  {
1184
1384
  DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
1185
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
1385
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
1186
1386
  }
1187
1387
 
1188
1388
  size_t ZSTD_compressBlock_btultra2(
1189
1389
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1190
1390
  const void* src, size_t srcSize)
1191
1391
  {
1192
- U32 const current = (U32)((const BYTE*)src - ms->window.base);
1392
+ U32 const curr = (U32)((const BYTE*)src - ms->window.base);
1193
1393
  DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
1194
1394
 
1195
1395
  /* 2-pass strategy:
@@ -1204,41 +1404,41 @@ size_t ZSTD_compressBlock_btultra2(
1204
1404
  if ( (ms->opt.litLengthSum==0) /* first block */
1205
1405
  && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
1206
1406
  && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
1207
- && (current == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
1407
+ && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
1208
1408
  && (srcSize > ZSTD_PREDEF_THRESHOLD)
1209
1409
  ) {
1210
1410
  ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
1211
1411
  }
1212
1412
 
1213
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
1413
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
1214
1414
  }
1215
1415
 
1216
1416
  size_t ZSTD_compressBlock_btopt_dictMatchState(
1217
1417
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1218
1418
  const void* src, size_t srcSize)
1219
1419
  {
1220
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
1420
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
1221
1421
  }
1222
1422
 
1223
1423
  size_t ZSTD_compressBlock_btultra_dictMatchState(
1224
1424
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1225
1425
  const void* src, size_t srcSize)
1226
1426
  {
1227
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
1427
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
1228
1428
  }
1229
1429
 
1230
1430
  size_t ZSTD_compressBlock_btopt_extDict(
1231
1431
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1232
1432
  const void* src, size_t srcSize)
1233
1433
  {
1234
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
1434
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
1235
1435
  }
1236
1436
 
1237
1437
  size_t ZSTD_compressBlock_btultra_extDict(
1238
1438
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1239
1439
  const void* src, size_t srcSize)
1240
1440
  {
1241
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
1441
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
1242
1442
  }
1243
1443
 
1244
1444
  /* note : no btultra2 variant for extDict nor dictMatchState,