extzstd 0.3.2 → 0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +4 -3
  3. data/contrib/zstd/CHANGELOG +225 -1
  4. data/contrib/zstd/CONTRIBUTING.md +158 -75
  5. data/contrib/zstd/LICENSE +4 -4
  6. data/contrib/zstd/Makefile +106 -69
  7. data/contrib/zstd/Package.swift +36 -0
  8. data/contrib/zstd/README.md +64 -36
  9. data/contrib/zstd/SECURITY.md +15 -0
  10. data/contrib/zstd/TESTING.md +2 -3
  11. data/contrib/zstd/lib/BUCK +5 -7
  12. data/contrib/zstd/lib/Makefile +117 -199
  13. data/contrib/zstd/lib/README.md +37 -7
  14. data/contrib/zstd/lib/common/allocations.h +55 -0
  15. data/contrib/zstd/lib/common/bits.h +200 -0
  16. data/contrib/zstd/lib/common/bitstream.h +80 -86
  17. data/contrib/zstd/lib/common/compiler.h +225 -63
  18. data/contrib/zstd/lib/common/cpu.h +37 -1
  19. data/contrib/zstd/lib/common/debug.c +7 -1
  20. data/contrib/zstd/lib/common/debug.h +21 -12
  21. data/contrib/zstd/lib/common/entropy_common.c +15 -37
  22. data/contrib/zstd/lib/common/error_private.c +9 -2
  23. data/contrib/zstd/lib/common/error_private.h +93 -5
  24. data/contrib/zstd/lib/common/fse.h +12 -87
  25. data/contrib/zstd/lib/common/fse_decompress.c +37 -117
  26. data/contrib/zstd/lib/common/huf.h +97 -172
  27. data/contrib/zstd/lib/common/mem.h +58 -58
  28. data/contrib/zstd/lib/common/pool.c +38 -17
  29. data/contrib/zstd/lib/common/pool.h +10 -4
  30. data/contrib/zstd/lib/common/portability_macros.h +158 -0
  31. data/contrib/zstd/lib/common/threading.c +74 -14
  32. data/contrib/zstd/lib/common/threading.h +5 -10
  33. data/contrib/zstd/lib/common/xxhash.c +6 -814
  34. data/contrib/zstd/lib/common/xxhash.h +6930 -195
  35. data/contrib/zstd/lib/common/zstd_common.c +1 -36
  36. data/contrib/zstd/lib/common/zstd_deps.h +1 -1
  37. data/contrib/zstd/lib/common/zstd_internal.h +68 -154
  38. data/contrib/zstd/lib/common/zstd_trace.h +163 -0
  39. data/contrib/zstd/lib/compress/clevels.h +134 -0
  40. data/contrib/zstd/lib/compress/fse_compress.c +75 -155
  41. data/contrib/zstd/lib/compress/hist.c +1 -1
  42. data/contrib/zstd/lib/compress/hist.h +1 -1
  43. data/contrib/zstd/lib/compress/huf_compress.c +810 -259
  44. data/contrib/zstd/lib/compress/zstd_compress.c +2864 -919
  45. data/contrib/zstd/lib/compress/zstd_compress_internal.h +523 -192
  46. data/contrib/zstd/lib/compress/zstd_compress_literals.c +117 -40
  47. data/contrib/zstd/lib/compress/zstd_compress_literals.h +16 -6
  48. data/contrib/zstd/lib/compress/zstd_compress_sequences.c +28 -19
  49. data/contrib/zstd/lib/compress/zstd_compress_sequences.h +1 -1
  50. data/contrib/zstd/lib/compress/zstd_compress_superblock.c +251 -412
  51. data/contrib/zstd/lib/compress/zstd_compress_superblock.h +1 -1
  52. data/contrib/zstd/lib/compress/zstd_cwksp.h +284 -97
  53. data/contrib/zstd/lib/compress/zstd_double_fast.c +382 -133
  54. data/contrib/zstd/lib/compress/zstd_double_fast.h +14 -2
  55. data/contrib/zstd/lib/compress/zstd_fast.c +732 -260
  56. data/contrib/zstd/lib/compress/zstd_fast.h +3 -2
  57. data/contrib/zstd/lib/compress/zstd_lazy.c +1177 -390
  58. data/contrib/zstd/lib/compress/zstd_lazy.h +129 -14
  59. data/contrib/zstd/lib/compress/zstd_ldm.c +280 -210
  60. data/contrib/zstd/lib/compress/zstd_ldm.h +3 -2
  61. data/contrib/zstd/lib/compress/zstd_ldm_geartab.h +106 -0
  62. data/contrib/zstd/lib/compress/zstd_opt.c +516 -285
  63. data/contrib/zstd/lib/compress/zstd_opt.h +32 -8
  64. data/contrib/zstd/lib/compress/zstdmt_compress.c +202 -131
  65. data/contrib/zstd/lib/compress/zstdmt_compress.h +9 -6
  66. data/contrib/zstd/lib/decompress/huf_decompress.c +1149 -555
  67. data/contrib/zstd/lib/decompress/huf_decompress_amd64.S +595 -0
  68. data/contrib/zstd/lib/decompress/zstd_ddict.c +4 -4
  69. data/contrib/zstd/lib/decompress/zstd_ddict.h +1 -1
  70. data/contrib/zstd/lib/decompress/zstd_decompress.c +583 -106
  71. data/contrib/zstd/lib/decompress/zstd_decompress_block.c +1054 -379
  72. data/contrib/zstd/lib/decompress/zstd_decompress_block.h +14 -3
  73. data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +56 -6
  74. data/contrib/zstd/lib/deprecated/zbuff.h +1 -1
  75. data/contrib/zstd/lib/deprecated/zbuff_common.c +1 -1
  76. data/contrib/zstd/lib/deprecated/zbuff_compress.c +24 -4
  77. data/contrib/zstd/lib/deprecated/zbuff_decompress.c +3 -1
  78. data/contrib/zstd/lib/dictBuilder/cover.c +60 -44
  79. data/contrib/zstd/lib/dictBuilder/cover.h +6 -11
  80. data/contrib/zstd/lib/dictBuilder/divsufsort.c +1 -1
  81. data/contrib/zstd/lib/dictBuilder/fastcover.c +26 -18
  82. data/contrib/zstd/lib/dictBuilder/zdict.c +100 -101
  83. data/contrib/zstd/lib/legacy/zstd_legacy.h +38 -1
  84. data/contrib/zstd/lib/legacy/zstd_v01.c +18 -53
  85. data/contrib/zstd/lib/legacy/zstd_v01.h +1 -1
  86. data/contrib/zstd/lib/legacy/zstd_v02.c +28 -85
  87. data/contrib/zstd/lib/legacy/zstd_v02.h +1 -1
  88. data/contrib/zstd/lib/legacy/zstd_v03.c +29 -88
  89. data/contrib/zstd/lib/legacy/zstd_v03.h +1 -1
  90. data/contrib/zstd/lib/legacy/zstd_v04.c +27 -80
  91. data/contrib/zstd/lib/legacy/zstd_v04.h +1 -1
  92. data/contrib/zstd/lib/legacy/zstd_v05.c +36 -85
  93. data/contrib/zstd/lib/legacy/zstd_v05.h +1 -1
  94. data/contrib/zstd/lib/legacy/zstd_v06.c +44 -96
  95. data/contrib/zstd/lib/legacy/zstd_v06.h +1 -1
  96. data/contrib/zstd/lib/legacy/zstd_v07.c +37 -92
  97. data/contrib/zstd/lib/legacy/zstd_v07.h +1 -1
  98. data/contrib/zstd/lib/libzstd.mk +237 -0
  99. data/contrib/zstd/lib/libzstd.pc.in +4 -3
  100. data/contrib/zstd/lib/module.modulemap +35 -0
  101. data/contrib/zstd/lib/{dictBuilder/zdict.h → zdict.h} +202 -33
  102. data/contrib/zstd/lib/zstd.h +1030 -332
  103. data/contrib/zstd/lib/{common/zstd_errors.h → zstd_errors.h} +27 -8
  104. data/ext/extconf.rb +26 -7
  105. data/ext/extzstd.c +51 -24
  106. data/ext/extzstd.h +33 -6
  107. data/ext/extzstd_stream.c +74 -31
  108. data/ext/libzstd_conf.h +0 -1
  109. data/ext/zstd_decompress_asm.S +1 -0
  110. metadata +17 -7
  111. data/contrib/zstd/appveyor.yml +0 -292
  112. data/ext/depend +0 -2
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +11,46 @@
11
11
  #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
12
12
  #include "zstd_fast.h"
13
13
 
14
+ static
15
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
16
+ void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
17
+ const void* const end,
18
+ ZSTD_dictTableLoadMethod_e dtlm)
19
+ {
20
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
21
+ U32* const hashTable = ms->hashTable;
22
+ U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
23
+ U32 const mls = cParams->minMatch;
24
+ const BYTE* const base = ms->window.base;
25
+ const BYTE* ip = base + ms->nextToUpdate;
26
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
27
+ const U32 fastHashFillStep = 3;
14
28
 
15
- void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
29
+ /* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
30
+ * Feel free to remove this assert if there's a good reason! */
31
+ assert(dtlm == ZSTD_dtlm_full);
32
+
33
+ /* Always insert every fastHashFillStep position into the hash table.
34
+ * Insert the other positions if their hash entry is empty.
35
+ */
36
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
37
+ U32 const curr = (U32)(ip - base);
38
+ { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
39
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
40
+
41
+ if (dtlm == ZSTD_dtlm_fast) continue;
42
+ /* Only load extra positions for ZSTD_dtlm_full */
43
+ { U32 p;
44
+ for (p = 1; p < fastHashFillStep; ++p) {
45
+ size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
46
+ if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
47
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
48
+ } } } }
49
+ }
50
+
51
+ static
52
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
53
+ void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
16
54
  const void* const end,
17
55
  ZSTD_dictTableLoadMethod_e dtlm)
18
56
  {
@@ -25,6 +63,10 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
25
63
  const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
26
64
  const U32 fastHashFillStep = 3;
27
65
 
66
+ /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
67
+ * Feel free to remove this assert if there's a good reason! */
68
+ assert(dtlm == ZSTD_dtlm_fast);
69
+
28
70
  /* Always insert every fastHashFillStep position into the hash table.
29
71
  * Insert the other positions if their hash entry is empty.
30
72
  */
@@ -42,146 +84,345 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
42
84
  } } } }
43
85
  }
44
86
 
87
+ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
88
+ const void* const end,
89
+ ZSTD_dictTableLoadMethod_e dtlm,
90
+ ZSTD_tableFillPurpose_e tfp)
91
+ {
92
+ if (tfp == ZSTD_tfp_forCDict) {
93
+ ZSTD_fillHashTableForCDict(ms, end, dtlm);
94
+ } else {
95
+ ZSTD_fillHashTableForCCtx(ms, end, dtlm);
96
+ }
97
+ }
98
+
45
99
 
46
- FORCE_INLINE_TEMPLATE size_t
47
- ZSTD_compressBlock_fast_generic(
100
+ /**
101
+ * If you squint hard enough (and ignore repcodes), the search operation at any
102
+ * given position is broken into 4 stages:
103
+ *
104
+ * 1. Hash (map position to hash value via input read)
105
+ * 2. Lookup (map hash val to index via hashtable read)
106
+ * 3. Load (map index to value at that position via input read)
107
+ * 4. Compare
108
+ *
109
+ * Each of these steps involves a memory read at an address which is computed
110
+ * from the previous step. This means these steps must be sequenced and their
111
+ * latencies are cumulative.
112
+ *
113
+ * Rather than do 1->2->3->4 sequentially for a single position before moving
114
+ * onto the next, this implementation interleaves these operations across the
115
+ * next few positions:
116
+ *
117
+ * R = Repcode Read & Compare
118
+ * H = Hash
119
+ * T = Table Lookup
120
+ * M = Match Read & Compare
121
+ *
122
+ * Pos | Time -->
123
+ * ----+-------------------
124
+ * N | ... M
125
+ * N+1 | ... TM
126
+ * N+2 | R H T M
127
+ * N+3 | H TM
128
+ * N+4 | R H T M
129
+ * N+5 | H ...
130
+ * N+6 | R ...
131
+ *
132
+ * This is very much analogous to the pipelining of execution in a CPU. And just
133
+ * like a CPU, we have to dump the pipeline when we find a match (i.e., take a
134
+ * branch).
135
+ *
136
+ * When this happens, we throw away our current state, and do the following prep
137
+ * to re-enter the loop:
138
+ *
139
+ * Pos | Time -->
140
+ * ----+-------------------
141
+ * N | H T
142
+ * N+1 | H
143
+ *
144
+ * This is also the work we do at the beginning to enter the loop initially.
145
+ */
146
+ FORCE_INLINE_TEMPLATE
147
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
148
+ size_t ZSTD_compressBlock_fast_noDict_generic(
48
149
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
49
150
  void const* src, size_t srcSize,
50
- U32 const mls)
151
+ U32 const mls, U32 const hasStep)
51
152
  {
52
153
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
53
154
  U32* const hashTable = ms->hashTable;
54
155
  U32 const hlog = cParams->hashLog;
55
156
  /* support stepSize of 0 */
56
- size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
157
+ size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
57
158
  const BYTE* const base = ms->window.base;
58
159
  const BYTE* const istart = (const BYTE*)src;
59
- /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
60
- const BYTE* ip0 = istart;
61
- const BYTE* ip1;
62
- const BYTE* anchor = istart;
63
160
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
64
161
  const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
65
162
  const BYTE* const prefixStart = base + prefixStartIndex;
66
163
  const BYTE* const iend = istart + srcSize;
67
164
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
68
- U32 offset_1=rep[0], offset_2=rep[1];
69
- U32 offsetSaved = 0;
70
165
 
71
- /* init */
166
+ const BYTE* anchor = istart;
167
+ const BYTE* ip0 = istart;
168
+ const BYTE* ip1;
169
+ const BYTE* ip2;
170
+ const BYTE* ip3;
171
+ U32 current0;
172
+
173
+ U32 rep_offset1 = rep[0];
174
+ U32 rep_offset2 = rep[1];
175
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
176
+
177
+ size_t hash0; /* hash for ip0 */
178
+ size_t hash1; /* hash for ip1 */
179
+ U32 idx; /* match idx for ip0 */
180
+ U32 mval; /* src value at match idx */
181
+
182
+ U32 offcode;
183
+ const BYTE* match0;
184
+ size_t mLength;
185
+
186
+ /* ip0 and ip1 are always adjacent. The targetLength skipping and
187
+ * uncompressibility acceleration is applied to every other position,
188
+ * matching the behavior of #1562. step therefore represents the gap
189
+ * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
190
+ size_t step;
191
+ const BYTE* nextStep;
192
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
193
+
72
194
  DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
73
195
  ip0 += (ip0 == prefixStart);
74
- ip1 = ip0 + 1;
75
196
  { U32 const curr = (U32)(ip0 - base);
76
197
  U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
77
198
  U32 const maxRep = curr - windowLow;
78
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
79
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
199
+ if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
200
+ if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
80
201
  }
81
202
 
82
- /* Main Search Loop */
83
- #ifdef __INTEL_COMPILER
84
- /* From intel 'The vector pragma indicates that the loop should be
85
- * vectorized if it is legal to do so'. Can be used together with
86
- * #pragma ivdep (but have opted to exclude that because intel
87
- * warns against using it).*/
88
- #pragma vector always
89
- #endif
90
- while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
91
- size_t mLength;
92
- BYTE const* ip2 = ip0 + 2;
93
- size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
94
- U32 const val0 = MEM_read32(ip0);
95
- size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
96
- U32 const val1 = MEM_read32(ip1);
97
- U32 const current0 = (U32)(ip0-base);
98
- U32 const current1 = (U32)(ip1-base);
99
- U32 const matchIndex0 = hashTable[h0];
100
- U32 const matchIndex1 = hashTable[h1];
101
- BYTE const* repMatch = ip2 - offset_1;
102
- const BYTE* match0 = base + matchIndex0;
103
- const BYTE* match1 = base + matchIndex1;
104
- U32 offcode;
105
-
106
- #if defined(__aarch64__)
107
- PREFETCH_L1(ip0+256);
108
- #endif
109
-
110
- hashTable[h0] = current0; /* update hash table */
111
- hashTable[h1] = current1; /* update hash table */
112
-
113
- assert(ip0 + 1 == ip1);
114
-
115
- if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
116
- mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
117
- ip0 = ip2 - mLength;
118
- match0 = repMatch - mLength;
203
+ /* start each op */
204
+ _start: /* Requires: ip0 */
205
+
206
+ step = stepSize;
207
+ nextStep = ip0 + kStepIncr;
208
+
209
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
210
+ ip1 = ip0 + 1;
211
+ ip2 = ip0 + step;
212
+ ip3 = ip2 + 1;
213
+
214
+ if (ip3 >= ilimit) {
215
+ goto _cleanup;
216
+ }
217
+
218
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
219
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
220
+
221
+ idx = hashTable[hash0];
222
+
223
+ do {
224
+ /* load repcode match for ip[2]*/
225
+ const U32 rval = MEM_read32(ip2 - rep_offset1);
226
+
227
+ /* write back hash table entry */
228
+ current0 = (U32)(ip0 - base);
229
+ hashTable[hash0] = current0;
230
+
231
+ /* check repcode at ip[2] */
232
+ if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
233
+ ip0 = ip2;
234
+ match0 = ip0 - rep_offset1;
235
+ mLength = ip0[-1] == match0[-1];
236
+ ip0 -= mLength;
237
+ match0 -= mLength;
238
+ offcode = REPCODE1_TO_OFFBASE;
119
239
  mLength += 4;
120
- offcode = 0;
240
+
241
+ /* First write next hash table entry; we've already calculated it.
242
+ * This write is known to be safe because the ip1 is before the
243
+ * repcode (ip2). */
244
+ hashTable[hash1] = (U32)(ip1 - base);
245
+
121
246
  goto _match;
122
247
  }
123
- if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
124
- /* found a regular match */
125
- goto _offset;
248
+
249
+ /* load match for ip[0] */
250
+ if (idx >= prefixStartIndex) {
251
+ mval = MEM_read32(base + idx);
252
+ } else {
253
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
126
254
  }
127
- if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
128
- /* found a regular match after one literal */
129
- ip0 = ip1;
130
- match0 = match1;
255
+
256
+ /* check match at ip[0] */
257
+ if (MEM_read32(ip0) == mval) {
258
+ /* found a match! */
259
+
260
+ /* First write next hash table entry; we've already calculated it.
261
+ * This write is known to be safe because the ip1 == ip0 + 1, so
262
+ * we know we will resume searching after ip1 */
263
+ hashTable[hash1] = (U32)(ip1 - base);
264
+
131
265
  goto _offset;
132
266
  }
133
- { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
134
- assert(step >= 2);
135
- ip0 += step;
136
- ip1 += step;
137
- continue;
267
+
268
+ /* lookup ip[1] */
269
+ idx = hashTable[hash1];
270
+
271
+ /* hash ip[2] */
272
+ hash0 = hash1;
273
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
274
+
275
+ /* advance to next positions */
276
+ ip0 = ip1;
277
+ ip1 = ip2;
278
+ ip2 = ip3;
279
+
280
+ /* write back hash table entry */
281
+ current0 = (U32)(ip0 - base);
282
+ hashTable[hash0] = current0;
283
+
284
+ /* load match for ip[0] */
285
+ if (idx >= prefixStartIndex) {
286
+ mval = MEM_read32(base + idx);
287
+ } else {
288
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
138
289
  }
139
- _offset: /* Requires: ip0, match0 */
140
- /* Compute the offset code */
141
- offset_2 = offset_1;
142
- offset_1 = (U32)(ip0-match0);
143
- offcode = offset_1 + ZSTD_REP_MOVE;
144
- mLength = 4;
145
- /* Count the backwards match length */
146
- while (((ip0>anchor) & (match0>prefixStart))
147
- && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
148
290
 
149
- _match: /* Requires: ip0, match0, offcode */
150
- /* Count the forward length */
151
- mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
152
- ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
153
- /* match found */
154
- ip0 += mLength;
155
- anchor = ip0;
291
+ /* check match at ip[0] */
292
+ if (MEM_read32(ip0) == mval) {
293
+ /* found a match! */
294
+
295
+ /* first write next hash table entry; we've already calculated it */
296
+ if (step <= 4) {
297
+ /* We need to avoid writing an index into the hash table >= the
298
+ * position at which we will pick up our searching after we've
299
+ * taken this match.
300
+ *
301
+ * The minimum possible match has length 4, so the earliest ip0
302
+ * can be after we take this match will be the current ip0 + 4.
303
+ * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
304
+ * write this position.
305
+ */
306
+ hashTable[hash1] = (U32)(ip1 - base);
307
+ }
156
308
 
157
- if (ip0 <= ilimit) {
158
- /* Fill Table */
159
- assert(base+current0+2 > istart); /* check base overflow */
160
- hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
161
- hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
309
+ goto _offset;
310
+ }
162
311
 
163
- if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
164
- while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
165
- /* store sequence */
166
- size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
167
- { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
168
- hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
169
- ip0 += rLength;
170
- ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
171
- anchor = ip0;
172
- continue; /* faster when present (confirmed on gcc-8) ... (?) */
173
- } } }
174
- ip1 = ip0 + 1;
175
- }
312
+ /* lookup ip[1] */
313
+ idx = hashTable[hash1];
314
+
315
+ /* hash ip[2] */
316
+ hash0 = hash1;
317
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
318
+
319
+ /* advance to next positions */
320
+ ip0 = ip1;
321
+ ip1 = ip2;
322
+ ip2 = ip0 + step;
323
+ ip3 = ip1 + step;
324
+
325
+ /* calculate step */
326
+ if (ip2 >= nextStep) {
327
+ step++;
328
+ PREFETCH_L1(ip1 + 64);
329
+ PREFETCH_L1(ip1 + 128);
330
+ nextStep += kStepIncr;
331
+ }
332
+ } while (ip3 < ilimit);
333
+
334
+ _cleanup:
335
+ /* Note that there are probably still a couple positions we could search.
336
+ * However, it seems to be a meaningful performance hit to try to search
337
+ * them. So let's not. */
338
+
339
+ /* When the repcodes are outside of the prefix, we set them to zero before the loop.
340
+ * When the offsets are still zero, we need to restore them after the block to have a correct
341
+ * repcode history. If only one offset was invalid, it is easy. The tricky case is when both
342
+ * offsets were invalid. We need to figure out which offset to refill with.
343
+ * - If both offsets are zero they are in the same order.
344
+ * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
345
+ * - If only one is zero, we need to decide which offset to restore.
346
+ * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
347
+ * - It is impossible for rep_offset2 to be non-zero.
348
+ *
349
+ * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
350
+ * set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
351
+ */
352
+ offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
176
353
 
177
354
  /* save reps for next block */
178
- rep[0] = offset_1 ? offset_1 : offsetSaved;
179
- rep[1] = offset_2 ? offset_2 : offsetSaved;
355
+ rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
356
+ rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
180
357
 
181
358
  /* Return the last literals size */
182
359
  return (size_t)(iend - anchor);
360
+
361
+ _offset: /* Requires: ip0, idx */
362
+
363
+ /* Compute the offset code. */
364
+ match0 = base + idx;
365
+ rep_offset2 = rep_offset1;
366
+ rep_offset1 = (U32)(ip0-match0);
367
+ offcode = OFFSET_TO_OFFBASE(rep_offset1);
368
+ mLength = 4;
369
+
370
+ /* Count the backwards match length. */
371
+ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
372
+ ip0--;
373
+ match0--;
374
+ mLength++;
375
+ }
376
+
377
+ _match: /* Requires: ip0, match0, offcode */
378
+
379
+ /* Count the forward length. */
380
+ mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
381
+
382
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
383
+
384
+ ip0 += mLength;
385
+ anchor = ip0;
386
+
387
+ /* Fill table and check for immediate repcode. */
388
+ if (ip0 <= ilimit) {
389
+ /* Fill Table */
390
+ assert(base+current0+2 > istart); /* check base overflow */
391
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
392
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
393
+
394
+ if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
395
+ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
396
+ /* store sequence */
397
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
398
+ { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
399
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
400
+ ip0 += rLength;
401
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
402
+ anchor = ip0;
403
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
404
+ } } }
405
+
406
+ goto _start;
183
407
  }
184
408
 
409
+ #define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
410
+ static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
411
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
412
+ void const* src, size_t srcSize) \
413
+ { \
414
+ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
415
+ }
416
+
417
+ ZSTD_GEN_FAST_FN(noDict, 4, 1)
418
+ ZSTD_GEN_FAST_FN(noDict, 5, 1)
419
+ ZSTD_GEN_FAST_FN(noDict, 6, 1)
420
+ ZSTD_GEN_FAST_FN(noDict, 7, 1)
421
+
422
+ ZSTD_GEN_FAST_FN(noDict, 4, 0)
423
+ ZSTD_GEN_FAST_FN(noDict, 5, 0)
424
+ ZSTD_GEN_FAST_FN(noDict, 6, 0)
425
+ ZSTD_GEN_FAST_FN(noDict, 7, 0)
185
426
 
186
427
  size_t ZSTD_compressBlock_fast(
187
428
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -189,24 +430,41 @@ size_t ZSTD_compressBlock_fast(
189
430
  {
190
431
  U32 const mls = ms->cParams.minMatch;
191
432
  assert(ms->dictMatchState == NULL);
192
- switch(mls)
193
- {
194
- default: /* includes case 3 */
195
- case 4 :
196
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
197
- case 5 :
198
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
199
- case 6 :
200
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
201
- case 7 :
202
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
433
+ if (ms->cParams.targetLength > 1) {
434
+ switch(mls)
435
+ {
436
+ default: /* includes case 3 */
437
+ case 4 :
438
+ return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
439
+ case 5 :
440
+ return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
441
+ case 6 :
442
+ return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
443
+ case 7 :
444
+ return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
445
+ }
446
+ } else {
447
+ switch(mls)
448
+ {
449
+ default: /* includes case 3 */
450
+ case 4 :
451
+ return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
452
+ case 5 :
453
+ return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
454
+ case 6 :
455
+ return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
456
+ case 7 :
457
+ return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
458
+ }
459
+
203
460
  }
204
461
  }
205
462
 
206
463
  FORCE_INLINE_TEMPLATE
464
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
207
465
  size_t ZSTD_compressBlock_fast_dictMatchState_generic(
208
466
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
209
- void const* src, size_t srcSize, U32 const mls)
467
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
210
468
  {
211
469
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
212
470
  U32* const hashTable = ms->hashTable;
@@ -215,14 +473,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
215
473
  U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
216
474
  const BYTE* const base = ms->window.base;
217
475
  const BYTE* const istart = (const BYTE*)src;
218
- const BYTE* ip = istart;
476
+ const BYTE* ip0 = istart;
477
+ const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
219
478
  const BYTE* anchor = istart;
220
479
  const U32 prefixStartIndex = ms->window.dictLimit;
221
480
  const BYTE* const prefixStart = base + prefixStartIndex;
222
481
  const BYTE* const iend = istart + srcSize;
223
482
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
224
483
  U32 offset_1=rep[0], offset_2=rep[1];
225
- U32 offsetSaved = 0;
226
484
 
227
485
  const ZSTD_matchState_t* const dms = ms->dictMatchState;
228
486
  const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
@@ -232,125 +490,182 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
232
490
  const BYTE* const dictStart = dictBase + dictStartIndex;
233
491
  const BYTE* const dictEnd = dms->window.nextSrc;
234
492
  const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
235
- const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
236
- const U32 dictHLog = dictCParams->hashLog;
493
+ const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
494
+ const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
237
495
 
238
496
  /* if a dictionary is still attached, it necessarily means that
239
497
  * it is within window size. So we just check it. */
240
498
  const U32 maxDistance = 1U << cParams->windowLog;
241
- const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
499
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
242
500
  assert(endIndex - prefixStartIndex <= maxDistance);
243
501
  (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
244
502
 
245
- /* ensure there will be no no underflow
503
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
504
+
505
+ /* ensure there will be no underflow
246
506
  * when translating a dict index into a local index */
247
507
  assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
248
508
 
509
+ if (ms->prefetchCDictTables) {
510
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
511
+ PREFETCH_AREA(dictHashTable, hashTableBytes);
512
+ }
513
+
249
514
  /* init */
250
515
  DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
251
- ip += (dictAndPrefixLength == 0);
516
+ ip0 += (dictAndPrefixLength == 0);
252
517
  /* dictMatchState repCode checks don't currently handle repCode == 0
253
518
  * disabling. */
254
519
  assert(offset_1 <= dictAndPrefixLength);
255
520
  assert(offset_2 <= dictAndPrefixLength);
256
521
 
257
- /* Main Search Loop */
258
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
522
+ /* Outer search loop */
523
+ assert(stepSize >= 1);
524
+ while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
259
525
  size_t mLength;
260
- size_t const h = ZSTD_hashPtr(ip, hlog, mls);
261
- U32 const curr = (U32)(ip-base);
262
- U32 const matchIndex = hashTable[h];
263
- const BYTE* match = base + matchIndex;
264
- const U32 repIndex = curr + 1 - offset_1;
265
- const BYTE* repMatch = (repIndex < prefixStartIndex) ?
266
- dictBase + (repIndex - dictIndexDelta) :
267
- base + repIndex;
268
- hashTable[h] = curr; /* update hash table */
269
-
270
- if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
271
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
272
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
273
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
274
- ip++;
275
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
276
- } else if ( (matchIndex <= prefixStartIndex) ) {
277
- size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
278
- U32 const dictMatchIndex = dictHashTable[dictHash];
279
- const BYTE* dictMatch = dictBase + dictMatchIndex;
280
- if (dictMatchIndex <= dictStartIndex ||
281
- MEM_read32(dictMatch) != MEM_read32(ip)) {
282
- assert(stepSize >= 1);
283
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
284
- continue;
285
- } else {
286
- /* found a dict match */
287
- U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
288
- mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
289
- while (((ip>anchor) & (dictMatch>dictStart))
290
- && (ip[-1] == dictMatch[-1])) {
291
- ip--; dictMatch--; mLength++;
526
+ size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
527
+
528
+ size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
529
+ U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
530
+ int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
531
+
532
+ U32 matchIndex = hashTable[hash0];
533
+ U32 curr = (U32)(ip0 - base);
534
+ size_t step = stepSize;
535
+ const size_t kStepIncr = 1 << kSearchStrength;
536
+ const BYTE* nextStep = ip0 + kStepIncr;
537
+
538
+ /* Inner search loop */
539
+ while (1) {
540
+ const BYTE* match = base + matchIndex;
541
+ const U32 repIndex = curr + 1 - offset_1;
542
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
543
+ dictBase + (repIndex - dictIndexDelta) :
544
+ base + repIndex;
545
+ const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
546
+ size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
547
+ hashTable[hash0] = curr; /* update hash table */
548
+
549
+ if (((U32) ((prefixStartIndex - 1) - repIndex) >=
550
+ 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
551
+ && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
552
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
553
+ mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
554
+ ip0++;
555
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
556
+ break;
557
+ }
558
+
559
+ if (dictTagsMatch) {
560
+ /* Found a possible dict match */
561
+ const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
562
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
563
+ if (dictMatchIndex > dictStartIndex &&
564
+ MEM_read32(dictMatch) == MEM_read32(ip0)) {
565
+ /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
566
+ if (matchIndex <= prefixStartIndex) {
567
+ U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
568
+ mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
569
+ while (((ip0 > anchor) & (dictMatch > dictStart))
570
+ && (ip0[-1] == dictMatch[-1])) {
571
+ ip0--;
572
+ dictMatch--;
573
+ mLength++;
574
+ } /* catch up */
575
+ offset_2 = offset_1;
576
+ offset_1 = offset;
577
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
578
+ break;
579
+ }
580
+ }
581
+ }
582
+
583
+ if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
584
+ /* found a regular match */
585
+ U32 const offset = (U32) (ip0 - match);
586
+ mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
587
+ while (((ip0 > anchor) & (match > prefixStart))
588
+ && (ip0[-1] == match[-1])) {
589
+ ip0--;
590
+ match--;
591
+ mLength++;
292
592
  } /* catch up */
293
593
  offset_2 = offset_1;
294
594
  offset_1 = offset;
295
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
595
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
596
+ break;
296
597
  }
297
- } else if (MEM_read32(match) != MEM_read32(ip)) {
298
- /* it's not a match, and we're not going to check the dictionary */
299
- assert(stepSize >= 1);
300
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
301
- continue;
302
- } else {
303
- /* found a regular match */
304
- U32 const offset = (U32)(ip-match);
305
- mLength = ZSTD_count(ip+4, match+4, iend) + 4;
306
- while (((ip>anchor) & (match>prefixStart))
307
- && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
308
- offset_2 = offset_1;
309
- offset_1 = offset;
310
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
311
- }
598
+
599
+ /* Prepare for next iteration */
600
+ dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
601
+ dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
602
+ matchIndex = hashTable[hash1];
603
+
604
+ if (ip1 >= nextStep) {
605
+ step++;
606
+ nextStep += kStepIncr;
607
+ }
608
+ ip0 = ip1;
609
+ ip1 = ip1 + step;
610
+ if (ip1 > ilimit) goto _cleanup;
611
+
612
+ curr = (U32)(ip0 - base);
613
+ hash0 = hash1;
614
+ } /* end inner search loop */
312
615
 
313
616
  /* match found */
314
- ip += mLength;
315
- anchor = ip;
617
+ assert(mLength);
618
+ ip0 += mLength;
619
+ anchor = ip0;
316
620
 
317
- if (ip <= ilimit) {
621
+ if (ip0 <= ilimit) {
318
622
  /* Fill Table */
319
623
  assert(base+curr+2 > istart); /* check base overflow */
320
624
  hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
321
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
625
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
322
626
 
323
627
  /* check immediate repcode */
324
- while (ip <= ilimit) {
325
- U32 const current2 = (U32)(ip-base);
628
+ while (ip0 <= ilimit) {
629
+ U32 const current2 = (U32)(ip0-base);
326
630
  U32 const repIndex2 = current2 - offset_2;
327
631
  const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
328
632
  dictBase - dictIndexDelta + repIndex2 :
329
633
  base + repIndex2;
330
634
  if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
331
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
635
+ && (MEM_read32(repMatch2) == MEM_read32(ip0))) {
332
636
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
333
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
637
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
334
638
  U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
335
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
336
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
337
- ip += repLength2;
338
- anchor = ip;
639
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
640
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
641
+ ip0 += repLength2;
642
+ anchor = ip0;
339
643
  continue;
340
644
  }
341
645
  break;
342
646
  }
343
647
  }
648
+
649
+ /* Prepare for next iteration */
650
+ assert(ip0 == anchor);
651
+ ip1 = ip0 + stepSize;
344
652
  }
345
653
 
654
+ _cleanup:
346
655
  /* save reps for next block */
347
- rep[0] = offset_1 ? offset_1 : offsetSaved;
348
- rep[1] = offset_2 ? offset_2 : offsetSaved;
656
+ rep[0] = offset_1;
657
+ rep[1] = offset_2;
349
658
 
350
659
  /* Return the last literals size */
351
660
  return (size_t)(iend - anchor);
352
661
  }
353
662
 
663
+
664
+ ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
665
+ ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
666
+ ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
667
+ ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
668
+
354
669
  size_t ZSTD_compressBlock_fast_dictMatchState(
355
670
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
356
671
  void const* src, size_t srcSize)
@@ -361,30 +676,31 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
361
676
  {
362
677
  default: /* includes case 3 */
363
678
  case 4 :
364
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
679
+ return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
365
680
  case 5 :
366
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
681
+ return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
367
682
  case 6 :
368
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
683
+ return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
369
684
  case 7 :
370
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
685
+ return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
371
686
  }
372
687
  }
373
688
 
374
689
 
375
- static size_t ZSTD_compressBlock_fast_extDict_generic(
690
+ static
691
+ ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
692
+ size_t ZSTD_compressBlock_fast_extDict_generic(
376
693
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
377
- void const* src, size_t srcSize, U32 const mls)
694
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
378
695
  {
379
696
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
380
697
  U32* const hashTable = ms->hashTable;
381
698
  U32 const hlog = cParams->hashLog;
382
699
  /* support stepSize of 0 */
383
- U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
700
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
384
701
  const BYTE* const base = ms->window.base;
385
702
  const BYTE* const dictBase = ms->window.dictBase;
386
703
  const BYTE* const istart = (const BYTE*)src;
387
- const BYTE* ip = istart;
388
704
  const BYTE* anchor = istart;
389
705
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
390
706
  const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
@@ -397,100 +713,256 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
397
713
  const BYTE* const iend = istart + srcSize;
398
714
  const BYTE* const ilimit = iend - 8;
399
715
  U32 offset_1=rep[0], offset_2=rep[1];
716
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
717
+
718
+ const BYTE* ip0 = istart;
719
+ const BYTE* ip1;
720
+ const BYTE* ip2;
721
+ const BYTE* ip3;
722
+ U32 current0;
723
+
724
+
725
+ size_t hash0; /* hash for ip0 */
726
+ size_t hash1; /* hash for ip1 */
727
+ U32 idx; /* match idx for ip0 */
728
+ const BYTE* idxBase; /* base pointer for idx */
729
+
730
+ U32 offcode;
731
+ const BYTE* match0;
732
+ size_t mLength;
733
+ const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
734
+
735
+ size_t step;
736
+ const BYTE* nextStep;
737
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
738
+
739
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
400
740
 
401
741
  DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
402
742
 
403
743
  /* switch to "regular" variant if extDict is invalidated due to maxDistance */
404
744
  if (prefixStartIndex == dictStartIndex)
405
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
406
-
407
- /* Search Loop */
408
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
409
- const size_t h = ZSTD_hashPtr(ip, hlog, mls);
410
- const U32 matchIndex = hashTable[h];
411
- const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
412
- const BYTE* match = matchBase + matchIndex;
413
- const U32 curr = (U32)(ip-base);
414
- const U32 repIndex = curr + 1 - offset_1;
415
- const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
416
- const BYTE* const repMatch = repBase + repIndex;
417
- hashTable[h] = curr; /* update hash table */
418
- DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
419
- assert(offset_1 <= curr +1); /* check repIndex */
420
-
421
- if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
422
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
423
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
424
- size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
425
- ip++;
426
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
427
- ip += rLength;
428
- anchor = ip;
429
- } else {
430
- if ( (matchIndex < dictStartIndex) ||
431
- (MEM_read32(match) != MEM_read32(ip)) ) {
432
- assert(stepSize >= 1);
433
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
434
- continue;
745
+ return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
746
+
747
+ { U32 const curr = (U32)(ip0 - base);
748
+ U32 const maxRep = curr - dictStartIndex;
749
+ if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
750
+ if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
751
+ }
752
+
753
+ /* start each op */
754
+ _start: /* Requires: ip0 */
755
+
756
+ step = stepSize;
757
+ nextStep = ip0 + kStepIncr;
758
+
759
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
760
+ ip1 = ip0 + 1;
761
+ ip2 = ip0 + step;
762
+ ip3 = ip2 + 1;
763
+
764
+ if (ip3 >= ilimit) {
765
+ goto _cleanup;
766
+ }
767
+
768
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
769
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
770
+
771
+ idx = hashTable[hash0];
772
+ idxBase = idx < prefixStartIndex ? dictBase : base;
773
+
774
+ do {
775
+ { /* load repcode match for ip[2] */
776
+ U32 const current2 = (U32)(ip2 - base);
777
+ U32 const repIndex = current2 - offset_1;
778
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
779
+ U32 rval;
780
+ if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
781
+ & (offset_1 > 0) ) {
782
+ rval = MEM_read32(repBase + repIndex);
783
+ } else {
784
+ rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
435
785
  }
436
- { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
437
- const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
438
- U32 const offset = curr - matchIndex;
439
- size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
440
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
441
- offset_2 = offset_1; offset_1 = offset; /* update offset history */
442
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
443
- ip += mLength;
444
- anchor = ip;
786
+
787
+ /* write back hash table entry */
788
+ current0 = (U32)(ip0 - base);
789
+ hashTable[hash0] = current0;
790
+
791
+ /* check repcode at ip[2] */
792
+ if (MEM_read32(ip2) == rval) {
793
+ ip0 = ip2;
794
+ match0 = repBase + repIndex;
795
+ matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
796
+ assert((match0 != prefixStart) & (match0 != dictStart));
797
+ mLength = ip0[-1] == match0[-1];
798
+ ip0 -= mLength;
799
+ match0 -= mLength;
800
+ offcode = REPCODE1_TO_OFFBASE;
801
+ mLength += 4;
802
+ goto _match;
445
803
  } }
446
804
 
447
- if (ip <= ilimit) {
448
- /* Fill Table */
449
- hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
450
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
451
- /* check immediate repcode */
452
- while (ip <= ilimit) {
453
- U32 const current2 = (U32)(ip-base);
454
- U32 const repIndex2 = current2 - offset_2;
455
- const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
456
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
457
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
458
- const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
459
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
460
- { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
461
- ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
462
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
463
- ip += repLength2;
464
- anchor = ip;
465
- continue;
466
- }
467
- break;
468
- } } }
805
+ { /* load match for ip[0] */
806
+ U32 const mval = idx >= dictStartIndex ?
807
+ MEM_read32(idxBase + idx) :
808
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
809
+
810
+ /* check match at ip[0] */
811
+ if (MEM_read32(ip0) == mval) {
812
+ /* found a match! */
813
+ goto _offset;
814
+ } }
815
+
816
+ /* lookup ip[1] */
817
+ idx = hashTable[hash1];
818
+ idxBase = idx < prefixStartIndex ? dictBase : base;
819
+
820
+ /* hash ip[2] */
821
+ hash0 = hash1;
822
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
823
+
824
+ /* advance to next positions */
825
+ ip0 = ip1;
826
+ ip1 = ip2;
827
+ ip2 = ip3;
828
+
829
+ /* write back hash table entry */
830
+ current0 = (U32)(ip0 - base);
831
+ hashTable[hash0] = current0;
832
+
833
+ { /* load match for ip[0] */
834
+ U32 const mval = idx >= dictStartIndex ?
835
+ MEM_read32(idxBase + idx) :
836
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
837
+
838
+ /* check match at ip[0] */
839
+ if (MEM_read32(ip0) == mval) {
840
+ /* found a match! */
841
+ goto _offset;
842
+ } }
843
+
844
+ /* lookup ip[1] */
845
+ idx = hashTable[hash1];
846
+ idxBase = idx < prefixStartIndex ? dictBase : base;
847
+
848
+ /* hash ip[2] */
849
+ hash0 = hash1;
850
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
851
+
852
+ /* advance to next positions */
853
+ ip0 = ip1;
854
+ ip1 = ip2;
855
+ ip2 = ip0 + step;
856
+ ip3 = ip1 + step;
857
+
858
+ /* calculate step */
859
+ if (ip2 >= nextStep) {
860
+ step++;
861
+ PREFETCH_L1(ip1 + 64);
862
+ PREFETCH_L1(ip1 + 128);
863
+ nextStep += kStepIncr;
864
+ }
865
+ } while (ip3 < ilimit);
866
+
867
+ _cleanup:
868
+ /* Note that there are probably still a couple positions we could search.
869
+ * However, it seems to be a meaningful performance hit to try to search
870
+ * them. So let's not. */
871
+
872
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
873
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
874
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
469
875
 
470
876
  /* save reps for next block */
471
- rep[0] = offset_1;
472
- rep[1] = offset_2;
877
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
878
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
473
879
 
474
880
  /* Return the last literals size */
475
881
  return (size_t)(iend - anchor);
882
+
883
+ _offset: /* Requires: ip0, idx, idxBase */
884
+
885
+ /* Compute the offset code. */
886
+ { U32 const offset = current0 - idx;
887
+ const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
888
+ matchEnd = idx < prefixStartIndex ? dictEnd : iend;
889
+ match0 = idxBase + idx;
890
+ offset_2 = offset_1;
891
+ offset_1 = offset;
892
+ offcode = OFFSET_TO_OFFBASE(offset);
893
+ mLength = 4;
894
+
895
+ /* Count the backwards match length. */
896
+ while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
897
+ ip0--;
898
+ match0--;
899
+ mLength++;
900
+ } }
901
+
902
+ _match: /* Requires: ip0, match0, offcode, matchEnd */
903
+
904
+ /* Count the forward length. */
905
+ assert(matchEnd != 0);
906
+ mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
907
+
908
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
909
+
910
+ ip0 += mLength;
911
+ anchor = ip0;
912
+
913
+ /* write next hash table entry */
914
+ if (ip1 < ip0) {
915
+ hashTable[hash1] = (U32)(ip1 - base);
916
+ }
917
+
918
+ /* Fill table and check for immediate repcode. */
919
+ if (ip0 <= ilimit) {
920
+ /* Fill Table */
921
+ assert(base+current0+2 > istart); /* check base overflow */
922
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
923
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
924
+
925
+ while (ip0 <= ilimit) {
926
+ U32 const repIndex2 = (U32)(ip0-base) - offset_2;
927
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
928
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */
929
+ && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
930
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
931
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
932
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
933
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
934
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
935
+ ip0 += repLength2;
936
+ anchor = ip0;
937
+ continue;
938
+ }
939
+ break;
940
+ } }
941
+
942
+ goto _start;
476
943
  }
477
944
 
945
+ ZSTD_GEN_FAST_FN(extDict, 4, 0)
946
+ ZSTD_GEN_FAST_FN(extDict, 5, 0)
947
+ ZSTD_GEN_FAST_FN(extDict, 6, 0)
948
+ ZSTD_GEN_FAST_FN(extDict, 7, 0)
478
949
 
479
950
  size_t ZSTD_compressBlock_fast_extDict(
480
951
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
481
952
  void const* src, size_t srcSize)
482
953
  {
483
954
  U32 const mls = ms->cParams.minMatch;
955
+ assert(ms->dictMatchState == NULL);
484
956
  switch(mls)
485
957
  {
486
958
  default: /* includes case 3 */
487
959
  case 4 :
488
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
960
+ return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
489
961
  case 5 :
490
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
962
+ return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
491
963
  case 6 :
492
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
964
+ return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
493
965
  case 7 :
494
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
966
+ return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
495
967
  }
496
968
  }