extzstd 0.3.1 → 0.3.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (113) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +28 -14
  3. data/contrib/zstd/CHANGELOG +301 -56
  4. data/contrib/zstd/CONTRIBUTING.md +169 -72
  5. data/contrib/zstd/LICENSE +4 -4
  6. data/contrib/zstd/Makefile +116 -87
  7. data/contrib/zstd/Package.swift +36 -0
  8. data/contrib/zstd/README.md +62 -32
  9. data/contrib/zstd/TESTING.md +2 -3
  10. data/contrib/zstd/appveyor.yml +52 -136
  11. data/contrib/zstd/lib/BUCK +5 -7
  12. data/contrib/zstd/lib/Makefile +225 -222
  13. data/contrib/zstd/lib/README.md +51 -6
  14. data/contrib/zstd/lib/common/allocations.h +55 -0
  15. data/contrib/zstd/lib/common/bits.h +200 -0
  16. data/contrib/zstd/lib/common/bitstream.h +45 -62
  17. data/contrib/zstd/lib/common/compiler.h +205 -22
  18. data/contrib/zstd/lib/common/cpu.h +1 -3
  19. data/contrib/zstd/lib/common/debug.c +1 -1
  20. data/contrib/zstd/lib/common/debug.h +12 -19
  21. data/contrib/zstd/lib/common/entropy_common.c +172 -48
  22. data/contrib/zstd/lib/common/error_private.c +10 -2
  23. data/contrib/zstd/lib/common/error_private.h +82 -3
  24. data/contrib/zstd/lib/common/fse.h +37 -86
  25. data/contrib/zstd/lib/common/fse_decompress.c +117 -92
  26. data/contrib/zstd/lib/common/huf.h +99 -166
  27. data/contrib/zstd/lib/common/mem.h +124 -142
  28. data/contrib/zstd/lib/common/pool.c +54 -27
  29. data/contrib/zstd/lib/common/pool.h +10 -4
  30. data/contrib/zstd/lib/common/portability_macros.h +156 -0
  31. data/contrib/zstd/lib/common/threading.c +74 -19
  32. data/contrib/zstd/lib/common/threading.h +5 -10
  33. data/contrib/zstd/lib/common/xxhash.c +7 -847
  34. data/contrib/zstd/lib/common/xxhash.h +5568 -167
  35. data/contrib/zstd/lib/common/zstd_common.c +2 -37
  36. data/contrib/zstd/lib/common/zstd_deps.h +111 -0
  37. data/contrib/zstd/lib/common/zstd_internal.h +132 -187
  38. data/contrib/zstd/lib/common/zstd_trace.h +163 -0
  39. data/contrib/zstd/lib/compress/clevels.h +134 -0
  40. data/contrib/zstd/lib/compress/fse_compress.c +83 -157
  41. data/contrib/zstd/lib/compress/hist.c +27 -29
  42. data/contrib/zstd/lib/compress/hist.h +2 -2
  43. data/contrib/zstd/lib/compress/huf_compress.c +916 -279
  44. data/contrib/zstd/lib/compress/zstd_compress.c +3773 -1019
  45. data/contrib/zstd/lib/compress/zstd_compress_internal.h +610 -203
  46. data/contrib/zstd/lib/compress/zstd_compress_literals.c +119 -42
  47. data/contrib/zstd/lib/compress/zstd_compress_literals.h +16 -6
  48. data/contrib/zstd/lib/compress/zstd_compress_sequences.c +42 -19
  49. data/contrib/zstd/lib/compress/zstd_compress_sequences.h +1 -1
  50. data/contrib/zstd/lib/compress/zstd_compress_superblock.c +49 -317
  51. data/contrib/zstd/lib/compress/zstd_compress_superblock.h +1 -1
  52. data/contrib/zstd/lib/compress/zstd_cwksp.h +320 -103
  53. data/contrib/zstd/lib/compress/zstd_double_fast.c +388 -151
  54. data/contrib/zstd/lib/compress/zstd_double_fast.h +3 -2
  55. data/contrib/zstd/lib/compress/zstd_fast.c +729 -265
  56. data/contrib/zstd/lib/compress/zstd_fast.h +3 -2
  57. data/contrib/zstd/lib/compress/zstd_lazy.c +1270 -251
  58. data/contrib/zstd/lib/compress/zstd_lazy.h +61 -1
  59. data/contrib/zstd/lib/compress/zstd_ldm.c +324 -219
  60. data/contrib/zstd/lib/compress/zstd_ldm.h +9 -2
  61. data/contrib/zstd/lib/compress/zstd_ldm_geartab.h +106 -0
  62. data/contrib/zstd/lib/compress/zstd_opt.c +481 -209
  63. data/contrib/zstd/lib/compress/zstd_opt.h +1 -1
  64. data/contrib/zstd/lib/compress/zstdmt_compress.c +181 -457
  65. data/contrib/zstd/lib/compress/zstdmt_compress.h +34 -113
  66. data/contrib/zstd/lib/decompress/huf_decompress.c +1199 -565
  67. data/contrib/zstd/lib/decompress/huf_decompress_amd64.S +576 -0
  68. data/contrib/zstd/lib/decompress/zstd_ddict.c +12 -12
  69. data/contrib/zstd/lib/decompress/zstd_ddict.h +2 -2
  70. data/contrib/zstd/lib/decompress/zstd_decompress.c +627 -157
  71. data/contrib/zstd/lib/decompress/zstd_decompress_block.c +1086 -326
  72. data/contrib/zstd/lib/decompress/zstd_decompress_block.h +19 -5
  73. data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +62 -13
  74. data/contrib/zstd/lib/deprecated/zbuff.h +1 -1
  75. data/contrib/zstd/lib/deprecated/zbuff_common.c +1 -1
  76. data/contrib/zstd/lib/deprecated/zbuff_compress.c +24 -4
  77. data/contrib/zstd/lib/deprecated/zbuff_decompress.c +3 -1
  78. data/contrib/zstd/lib/dictBuilder/cover.c +73 -52
  79. data/contrib/zstd/lib/dictBuilder/cover.h +7 -6
  80. data/contrib/zstd/lib/dictBuilder/divsufsort.c +1 -1
  81. data/contrib/zstd/lib/dictBuilder/fastcover.c +44 -35
  82. data/contrib/zstd/lib/dictBuilder/zdict.c +103 -111
  83. data/contrib/zstd/lib/legacy/zstd_legacy.h +8 -1
  84. data/contrib/zstd/lib/legacy/zstd_v01.c +21 -54
  85. data/contrib/zstd/lib/legacy/zstd_v01.h +1 -1
  86. data/contrib/zstd/lib/legacy/zstd_v02.c +29 -70
  87. data/contrib/zstd/lib/legacy/zstd_v02.h +1 -1
  88. data/contrib/zstd/lib/legacy/zstd_v03.c +30 -73
  89. data/contrib/zstd/lib/legacy/zstd_v03.h +1 -1
  90. data/contrib/zstd/lib/legacy/zstd_v04.c +29 -71
  91. data/contrib/zstd/lib/legacy/zstd_v04.h +1 -1
  92. data/contrib/zstd/lib/legacy/zstd_v05.c +40 -86
  93. data/contrib/zstd/lib/legacy/zstd_v05.h +1 -1
  94. data/contrib/zstd/lib/legacy/zstd_v06.c +47 -88
  95. data/contrib/zstd/lib/legacy/zstd_v06.h +1 -1
  96. data/contrib/zstd/lib/legacy/zstd_v07.c +40 -83
  97. data/contrib/zstd/lib/legacy/zstd_v07.h +1 -1
  98. data/contrib/zstd/lib/libzstd.mk +214 -0
  99. data/contrib/zstd/lib/libzstd.pc.in +7 -6
  100. data/contrib/zstd/lib/module.modulemap +35 -0
  101. data/contrib/zstd/lib/{dictBuilder/zdict.h → zdict.h} +203 -34
  102. data/contrib/zstd/lib/zstd.h +1217 -287
  103. data/contrib/zstd/lib/{common/zstd_errors.h → zstd_errors.h} +28 -8
  104. data/ext/extconf.rb +7 -6
  105. data/ext/extzstd.c +19 -10
  106. data/ext/extzstd.h +6 -0
  107. data/ext/libzstd_conf.h +0 -1
  108. data/ext/zstd_decompress_asm.S +1 -0
  109. data/gemstub.rb +3 -21
  110. data/lib/extzstd/version.rb +6 -1
  111. data/lib/extzstd.rb +0 -2
  112. data/test/test_basic.rb +0 -5
  113. metadata +18 -6
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +11,42 @@
11
11
  #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
12
12
  #include "zstd_fast.h"
13
13
 
14
+ static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
15
+ const void* const end,
16
+ ZSTD_dictTableLoadMethod_e dtlm)
17
+ {
18
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
19
+ U32* const hashTable = ms->hashTable;
20
+ U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
21
+ U32 const mls = cParams->minMatch;
22
+ const BYTE* const base = ms->window.base;
23
+ const BYTE* ip = base + ms->nextToUpdate;
24
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
25
+ const U32 fastHashFillStep = 3;
14
26
 
15
- void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
27
+ /* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
28
+ * Feel free to remove this assert if there's a good reason! */
29
+ assert(dtlm == ZSTD_dtlm_full);
30
+
31
+ /* Always insert every fastHashFillStep position into the hash table.
32
+ * Insert the other positions if their hash entry is empty.
33
+ */
34
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
35
+ U32 const curr = (U32)(ip - base);
36
+ { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
37
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
38
+
39
+ if (dtlm == ZSTD_dtlm_fast) continue;
40
+ /* Only load extra positions for ZSTD_dtlm_full */
41
+ { U32 p;
42
+ for (p = 1; p < fastHashFillStep; ++p) {
43
+ size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
44
+ if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
45
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
46
+ } } } }
47
+ }
48
+
49
+ static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
16
50
  const void* const end,
17
51
  ZSTD_dictTableLoadMethod_e dtlm)
18
52
  {
@@ -25,163 +59,365 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
25
59
  const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
26
60
  const U32 fastHashFillStep = 3;
27
61
 
62
+ /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
63
+ * Feel free to remove this assert if there's a good reason! */
64
+ assert(dtlm == ZSTD_dtlm_fast);
65
+
28
66
  /* Always insert every fastHashFillStep position into the hash table.
29
67
  * Insert the other positions if their hash entry is empty.
30
68
  */
31
69
  for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
32
- U32 const current = (U32)(ip - base);
70
+ U32 const curr = (U32)(ip - base);
33
71
  size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
34
- hashTable[hash0] = current;
72
+ hashTable[hash0] = curr;
35
73
  if (dtlm == ZSTD_dtlm_fast) continue;
36
74
  /* Only load extra positions for ZSTD_dtlm_full */
37
75
  { U32 p;
38
76
  for (p = 1; p < fastHashFillStep; ++p) {
39
77
  size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
40
78
  if (hashTable[hash] == 0) { /* not yet filled */
41
- hashTable[hash] = current + p;
79
+ hashTable[hash] = curr + p;
42
80
  } } } }
43
81
  }
44
82
 
83
+ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
84
+ const void* const end,
85
+ ZSTD_dictTableLoadMethod_e dtlm,
86
+ ZSTD_tableFillPurpose_e tfp)
87
+ {
88
+ if (tfp == ZSTD_tfp_forCDict) {
89
+ ZSTD_fillHashTableForCDict(ms, end, dtlm);
90
+ } else {
91
+ ZSTD_fillHashTableForCCtx(ms, end, dtlm);
92
+ }
93
+ }
94
+
45
95
 
96
+ /**
97
+ * If you squint hard enough (and ignore repcodes), the search operation at any
98
+ * given position is broken into 4 stages:
99
+ *
100
+ * 1. Hash (map position to hash value via input read)
101
+ * 2. Lookup (map hash val to index via hashtable read)
102
+ * 3. Load (map index to value at that position via input read)
103
+ * 4. Compare
104
+ *
105
+ * Each of these steps involves a memory read at an address which is computed
106
+ * from the previous step. This means these steps must be sequenced and their
107
+ * latencies are cumulative.
108
+ *
109
+ * Rather than do 1->2->3->4 sequentially for a single position before moving
110
+ * onto the next, this implementation interleaves these operations across the
111
+ * next few positions:
112
+ *
113
+ * R = Repcode Read & Compare
114
+ * H = Hash
115
+ * T = Table Lookup
116
+ * M = Match Read & Compare
117
+ *
118
+ * Pos | Time -->
119
+ * ----+-------------------
120
+ * N | ... M
121
+ * N+1 | ... TM
122
+ * N+2 | R H T M
123
+ * N+3 | H TM
124
+ * N+4 | R H T M
125
+ * N+5 | H ...
126
+ * N+6 | R ...
127
+ *
128
+ * This is very much analogous to the pipelining of execution in a CPU. And just
129
+ * like a CPU, we have to dump the pipeline when we find a match (i.e., take a
130
+ * branch).
131
+ *
132
+ * When this happens, we throw away our current state, and do the following prep
133
+ * to re-enter the loop:
134
+ *
135
+ * Pos | Time -->
136
+ * ----+-------------------
137
+ * N | H T
138
+ * N+1 | H
139
+ *
140
+ * This is also the work we do at the beginning to enter the loop initially.
141
+ */
46
142
  FORCE_INLINE_TEMPLATE size_t
47
- ZSTD_compressBlock_fast_generic(
143
+ ZSTD_compressBlock_fast_noDict_generic(
48
144
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
49
145
  void const* src, size_t srcSize,
50
- U32 const mls)
146
+ U32 const mls, U32 const hasStep)
51
147
  {
52
148
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
53
149
  U32* const hashTable = ms->hashTable;
54
150
  U32 const hlog = cParams->hashLog;
55
151
  /* support stepSize of 0 */
56
- size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
152
+ size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
57
153
  const BYTE* const base = ms->window.base;
58
154
  const BYTE* const istart = (const BYTE*)src;
59
- /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
60
- const BYTE* ip0 = istart;
61
- const BYTE* ip1;
62
- const BYTE* anchor = istart;
63
155
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
64
156
  const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
65
157
  const BYTE* const prefixStart = base + prefixStartIndex;
66
158
  const BYTE* const iend = istart + srcSize;
67
159
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
68
- U32 offset_1=rep[0], offset_2=rep[1];
69
- U32 offsetSaved = 0;
70
160
 
71
- /* init */
161
+ const BYTE* anchor = istart;
162
+ const BYTE* ip0 = istart;
163
+ const BYTE* ip1;
164
+ const BYTE* ip2;
165
+ const BYTE* ip3;
166
+ U32 current0;
167
+
168
+ U32 rep_offset1 = rep[0];
169
+ U32 rep_offset2 = rep[1];
170
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
171
+
172
+ size_t hash0; /* hash for ip0 */
173
+ size_t hash1; /* hash for ip1 */
174
+ U32 idx; /* match idx for ip0 */
175
+ U32 mval; /* src value at match idx */
176
+
177
+ U32 offcode;
178
+ const BYTE* match0;
179
+ size_t mLength;
180
+
181
+ /* ip0 and ip1 are always adjacent. The targetLength skipping and
182
+ * uncompressibility acceleration is applied to every other position,
183
+ * matching the behavior of #1562. step therefore represents the gap
184
+ * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
185
+ size_t step;
186
+ const BYTE* nextStep;
187
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
188
+
72
189
  DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
73
190
  ip0 += (ip0 == prefixStart);
191
+ { U32 const curr = (U32)(ip0 - base);
192
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
193
+ U32 const maxRep = curr - windowLow;
194
+ if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
195
+ if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
196
+ }
197
+
198
+ /* start each op */
199
+ _start: /* Requires: ip0 */
200
+
201
+ step = stepSize;
202
+ nextStep = ip0 + kStepIncr;
203
+
204
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
74
205
  ip1 = ip0 + 1;
75
- { U32 const current = (U32)(ip0 - base);
76
- U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
77
- U32 const maxRep = current - windowLow;
78
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
79
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
206
+ ip2 = ip0 + step;
207
+ ip3 = ip2 + 1;
208
+
209
+ if (ip3 >= ilimit) {
210
+ goto _cleanup;
80
211
  }
81
212
 
82
- /* Main Search Loop */
83
- #ifdef __INTEL_COMPILER
84
- /* From intel 'The vector pragma indicates that the loop should be
85
- * vectorized if it is legal to do so'. Can be used together with
86
- * #pragma ivdep (but have opted to exclude that because intel
87
- * warns against using it).*/
88
- #pragma vector always
89
- #endif
90
- while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
91
- size_t mLength;
92
- BYTE const* ip2 = ip0 + 2;
93
- size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
94
- U32 const val0 = MEM_read32(ip0);
95
- size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
96
- U32 const val1 = MEM_read32(ip1);
97
- U32 const current0 = (U32)(ip0-base);
98
- U32 const current1 = (U32)(ip1-base);
99
- U32 const matchIndex0 = hashTable[h0];
100
- U32 const matchIndex1 = hashTable[h1];
101
- BYTE const* repMatch = ip2 - offset_1;
102
- const BYTE* match0 = base + matchIndex0;
103
- const BYTE* match1 = base + matchIndex1;
104
- U32 offcode;
105
-
106
- #if defined(__aarch64__)
107
- PREFETCH_L1(ip0+256);
108
- #endif
109
-
110
- hashTable[h0] = current0; /* update hash table */
111
- hashTable[h1] = current1; /* update hash table */
112
-
113
- assert(ip0 + 1 == ip1);
114
-
115
- if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
116
- mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
117
- ip0 = ip2 - mLength;
118
- match0 = repMatch - mLength;
213
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
214
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
215
+
216
+ idx = hashTable[hash0];
217
+
218
+ do {
219
+ /* load repcode match for ip[2]*/
220
+ const U32 rval = MEM_read32(ip2 - rep_offset1);
221
+
222
+ /* write back hash table entry */
223
+ current0 = (U32)(ip0 - base);
224
+ hashTable[hash0] = current0;
225
+
226
+ /* check repcode at ip[2] */
227
+ if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
228
+ ip0 = ip2;
229
+ match0 = ip0 - rep_offset1;
230
+ mLength = ip0[-1] == match0[-1];
231
+ ip0 -= mLength;
232
+ match0 -= mLength;
233
+ offcode = REPCODE1_TO_OFFBASE;
119
234
  mLength += 4;
120
- offcode = 0;
235
+
236
+ /* First write next hash table entry; we've already calculated it.
237
+ * This write is known to be safe because the ip1 is before the
238
+ * repcode (ip2). */
239
+ hashTable[hash1] = (U32)(ip1 - base);
240
+
121
241
  goto _match;
122
242
  }
123
- if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
124
- /* found a regular match */
125
- goto _offset;
243
+
244
+ /* load match for ip[0] */
245
+ if (idx >= prefixStartIndex) {
246
+ mval = MEM_read32(base + idx);
247
+ } else {
248
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
126
249
  }
127
- if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
128
- /* found a regular match after one literal */
129
- ip0 = ip1;
130
- match0 = match1;
250
+
251
+ /* check match at ip[0] */
252
+ if (MEM_read32(ip0) == mval) {
253
+ /* found a match! */
254
+
255
+ /* First write next hash table entry; we've already calculated it.
256
+ * This write is known to be safe because the ip1 == ip0 + 1, so
257
+ * we know we will resume searching after ip1 */
258
+ hashTable[hash1] = (U32)(ip1 - base);
259
+
131
260
  goto _offset;
132
261
  }
133
- { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
134
- assert(step >= 2);
135
- ip0 += step;
136
- ip1 += step;
137
- continue;
262
+
263
+ /* lookup ip[1] */
264
+ idx = hashTable[hash1];
265
+
266
+ /* hash ip[2] */
267
+ hash0 = hash1;
268
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
269
+
270
+ /* advance to next positions */
271
+ ip0 = ip1;
272
+ ip1 = ip2;
273
+ ip2 = ip3;
274
+
275
+ /* write back hash table entry */
276
+ current0 = (U32)(ip0 - base);
277
+ hashTable[hash0] = current0;
278
+
279
+ /* load match for ip[0] */
280
+ if (idx >= prefixStartIndex) {
281
+ mval = MEM_read32(base + idx);
282
+ } else {
283
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
138
284
  }
139
- _offset: /* Requires: ip0, match0 */
140
- /* Compute the offset code */
141
- offset_2 = offset_1;
142
- offset_1 = (U32)(ip0-match0);
143
- offcode = offset_1 + ZSTD_REP_MOVE;
144
- mLength = 4;
145
- /* Count the backwards match length */
146
- while (((ip0>anchor) & (match0>prefixStart))
147
- && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
148
285
 
149
- _match: /* Requires: ip0, match0, offcode */
150
- /* Count the forward length */
151
- mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
152
- ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
153
- /* match found */
154
- ip0 += mLength;
155
- anchor = ip0;
286
+ /* check match at ip[0] */
287
+ if (MEM_read32(ip0) == mval) {
288
+ /* found a match! */
289
+
290
+ /* first write next hash table entry; we've already calculated it */
291
+ if (step <= 4) {
292
+ /* We need to avoid writing an index into the hash table >= the
293
+ * position at which we will pick up our searching after we've
294
+ * taken this match.
295
+ *
296
+ * The minimum possible match has length 4, so the earliest ip0
297
+ * can be after we take this match will be the current ip0 + 4.
298
+ * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
299
+ * write this position.
300
+ */
301
+ hashTable[hash1] = (U32)(ip1 - base);
302
+ }
156
303
 
157
- if (ip0 <= ilimit) {
158
- /* Fill Table */
159
- assert(base+current0+2 > istart); /* check base overflow */
160
- hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
161
- hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
304
+ goto _offset;
305
+ }
162
306
 
163
- if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
164
- while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
165
- /* store sequence */
166
- size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
167
- { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
168
- hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
169
- ip0 += rLength;
170
- ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
171
- anchor = ip0;
172
- continue; /* faster when present (confirmed on gcc-8) ... (?) */
173
- } } }
174
- ip1 = ip0 + 1;
175
- }
307
+ /* lookup ip[1] */
308
+ idx = hashTable[hash1];
309
+
310
+ /* hash ip[2] */
311
+ hash0 = hash1;
312
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
313
+
314
+ /* advance to next positions */
315
+ ip0 = ip1;
316
+ ip1 = ip2;
317
+ ip2 = ip0 + step;
318
+ ip3 = ip1 + step;
319
+
320
+ /* calculate step */
321
+ if (ip2 >= nextStep) {
322
+ step++;
323
+ PREFETCH_L1(ip1 + 64);
324
+ PREFETCH_L1(ip1 + 128);
325
+ nextStep += kStepIncr;
326
+ }
327
+ } while (ip3 < ilimit);
328
+
329
+ _cleanup:
330
+ /* Note that there are probably still a couple positions we could search.
331
+ * However, it seems to be a meaningful performance hit to try to search
332
+ * them. So let's not. */
333
+
334
+ /* When the repcodes are outside of the prefix, we set them to zero before the loop.
335
+ * When the offsets are still zero, we need to restore them after the block to have a correct
336
+ * repcode history. If only one offset was invalid, it is easy. The tricky case is when both
337
+ * offsets were invalid. We need to figure out which offset to refill with.
338
+ * - If both offsets are zero they are in the same order.
339
+ * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
340
+ * - If only one is zero, we need to decide which offset to restore.
341
+ * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
342
+ * - It is impossible for rep_offset2 to be non-zero.
343
+ *
344
+ * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
345
+ * set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
346
+ */
347
+ offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
176
348
 
177
349
  /* save reps for next block */
178
- rep[0] = offset_1 ? offset_1 : offsetSaved;
179
- rep[1] = offset_2 ? offset_2 : offsetSaved;
350
+ rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
351
+ rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
180
352
 
181
353
  /* Return the last literals size */
182
354
  return (size_t)(iend - anchor);
355
+
356
+ _offset: /* Requires: ip0, idx */
357
+
358
+ /* Compute the offset code. */
359
+ match0 = base + idx;
360
+ rep_offset2 = rep_offset1;
361
+ rep_offset1 = (U32)(ip0-match0);
362
+ offcode = OFFSET_TO_OFFBASE(rep_offset1);
363
+ mLength = 4;
364
+
365
+ /* Count the backwards match length. */
366
+ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
367
+ ip0--;
368
+ match0--;
369
+ mLength++;
370
+ }
371
+
372
+ _match: /* Requires: ip0, match0, offcode */
373
+
374
+ /* Count the forward length. */
375
+ mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
376
+
377
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
378
+
379
+ ip0 += mLength;
380
+ anchor = ip0;
381
+
382
+ /* Fill table and check for immediate repcode. */
383
+ if (ip0 <= ilimit) {
384
+ /* Fill Table */
385
+ assert(base+current0+2 > istart); /* check base overflow */
386
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
387
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
388
+
389
+ if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
390
+ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
391
+ /* store sequence */
392
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
393
+ { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
394
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
395
+ ip0 += rLength;
396
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
397
+ anchor = ip0;
398
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
399
+ } } }
400
+
401
+ goto _start;
183
402
  }
184
403
 
404
+ #define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
405
+ static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
406
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
407
+ void const* src, size_t srcSize) \
408
+ { \
409
+ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
410
+ }
411
+
412
+ ZSTD_GEN_FAST_FN(noDict, 4, 1)
413
+ ZSTD_GEN_FAST_FN(noDict, 5, 1)
414
+ ZSTD_GEN_FAST_FN(noDict, 6, 1)
415
+ ZSTD_GEN_FAST_FN(noDict, 7, 1)
416
+
417
+ ZSTD_GEN_FAST_FN(noDict, 4, 0)
418
+ ZSTD_GEN_FAST_FN(noDict, 5, 0)
419
+ ZSTD_GEN_FAST_FN(noDict, 6, 0)
420
+ ZSTD_GEN_FAST_FN(noDict, 7, 0)
185
421
 
186
422
  size_t ZSTD_compressBlock_fast(
187
423
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -189,24 +425,40 @@ size_t ZSTD_compressBlock_fast(
189
425
  {
190
426
  U32 const mls = ms->cParams.minMatch;
191
427
  assert(ms->dictMatchState == NULL);
192
- switch(mls)
193
- {
194
- default: /* includes case 3 */
195
- case 4 :
196
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
197
- case 5 :
198
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
199
- case 6 :
200
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
201
- case 7 :
202
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
428
+ if (ms->cParams.targetLength > 1) {
429
+ switch(mls)
430
+ {
431
+ default: /* includes case 3 */
432
+ case 4 :
433
+ return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
434
+ case 5 :
435
+ return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
436
+ case 6 :
437
+ return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
438
+ case 7 :
439
+ return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
440
+ }
441
+ } else {
442
+ switch(mls)
443
+ {
444
+ default: /* includes case 3 */
445
+ case 4 :
446
+ return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
447
+ case 5 :
448
+ return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
449
+ case 6 :
450
+ return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
451
+ case 7 :
452
+ return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
453
+ }
454
+
203
455
  }
204
456
  }
205
457
 
206
458
  FORCE_INLINE_TEMPLATE
207
459
  size_t ZSTD_compressBlock_fast_dictMatchState_generic(
208
460
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
209
- void const* src, size_t srcSize, U32 const mls)
461
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
210
462
  {
211
463
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
212
464
  U32* const hashTable = ms->hashTable;
@@ -215,14 +467,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
215
467
  U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
216
468
  const BYTE* const base = ms->window.base;
217
469
  const BYTE* const istart = (const BYTE*)src;
218
- const BYTE* ip = istart;
470
+ const BYTE* ip0 = istart;
471
+ const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
219
472
  const BYTE* anchor = istart;
220
473
  const U32 prefixStartIndex = ms->window.dictLimit;
221
474
  const BYTE* const prefixStart = base + prefixStartIndex;
222
475
  const BYTE* const iend = istart + srcSize;
223
476
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
224
477
  U32 offset_1=rep[0], offset_2=rep[1];
225
- U32 offsetSaved = 0;
226
478
 
227
479
  const ZSTD_matchState_t* const dms = ms->dictMatchState;
228
480
  const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
@@ -232,125 +484,182 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
232
484
  const BYTE* const dictStart = dictBase + dictStartIndex;
233
485
  const BYTE* const dictEnd = dms->window.nextSrc;
234
486
  const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
235
- const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
236
- const U32 dictHLog = dictCParams->hashLog;
487
+ const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
488
+ const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
237
489
 
238
490
  /* if a dictionary is still attached, it necessarily means that
239
491
  * it is within window size. So we just check it. */
240
492
  const U32 maxDistance = 1U << cParams->windowLog;
241
- const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
493
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
242
494
  assert(endIndex - prefixStartIndex <= maxDistance);
243
495
  (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
244
496
 
245
- /* ensure there will be no no underflow
497
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
498
+
499
+ /* ensure there will be no underflow
246
500
  * when translating a dict index into a local index */
247
501
  assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
248
502
 
503
+ if (ms->prefetchCDictTables) {
504
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
505
+ PREFETCH_AREA(dictHashTable, hashTableBytes)
506
+ }
507
+
249
508
  /* init */
250
509
  DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
251
- ip += (dictAndPrefixLength == 0);
510
+ ip0 += (dictAndPrefixLength == 0);
252
511
  /* dictMatchState repCode checks don't currently handle repCode == 0
253
512
  * disabling. */
254
513
  assert(offset_1 <= dictAndPrefixLength);
255
514
  assert(offset_2 <= dictAndPrefixLength);
256
515
 
257
- /* Main Search Loop */
258
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
516
+ /* Outer search loop */
517
+ assert(stepSize >= 1);
518
+ while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
259
519
  size_t mLength;
260
- size_t const h = ZSTD_hashPtr(ip, hlog, mls);
261
- U32 const current = (U32)(ip-base);
262
- U32 const matchIndex = hashTable[h];
263
- const BYTE* match = base + matchIndex;
264
- const U32 repIndex = current + 1 - offset_1;
265
- const BYTE* repMatch = (repIndex < prefixStartIndex) ?
266
- dictBase + (repIndex - dictIndexDelta) :
267
- base + repIndex;
268
- hashTable[h] = current; /* update hash table */
269
-
270
- if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
271
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
272
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
273
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
274
- ip++;
275
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
276
- } else if ( (matchIndex <= prefixStartIndex) ) {
277
- size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
278
- U32 const dictMatchIndex = dictHashTable[dictHash];
279
- const BYTE* dictMatch = dictBase + dictMatchIndex;
280
- if (dictMatchIndex <= dictStartIndex ||
281
- MEM_read32(dictMatch) != MEM_read32(ip)) {
282
- assert(stepSize >= 1);
283
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
284
- continue;
285
- } else {
286
- /* found a dict match */
287
- U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
288
- mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
289
- while (((ip>anchor) & (dictMatch>dictStart))
290
- && (ip[-1] == dictMatch[-1])) {
291
- ip--; dictMatch--; mLength++;
520
+ size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
521
+
522
+ size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
523
+ U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
524
+ int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
525
+
526
+ U32 matchIndex = hashTable[hash0];
527
+ U32 curr = (U32)(ip0 - base);
528
+ size_t step = stepSize;
529
+ const size_t kStepIncr = 1 << kSearchStrength;
530
+ const BYTE* nextStep = ip0 + kStepIncr;
531
+
532
+ /* Inner search loop */
533
+ while (1) {
534
+ const BYTE* match = base + matchIndex;
535
+ const U32 repIndex = curr + 1 - offset_1;
536
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
537
+ dictBase + (repIndex - dictIndexDelta) :
538
+ base + repIndex;
539
+ const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
540
+ size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
541
+ hashTable[hash0] = curr; /* update hash table */
542
+
543
+ if (((U32) ((prefixStartIndex - 1) - repIndex) >=
544
+ 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
545
+ && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
546
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
547
+ mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
548
+ ip0++;
549
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
550
+ break;
551
+ }
552
+
553
+ if (dictTagsMatch) {
554
+ /* Found a possible dict match */
555
+ const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
556
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
557
+ if (dictMatchIndex > dictStartIndex &&
558
+ MEM_read32(dictMatch) == MEM_read32(ip0)) {
559
+ /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
560
+ if (matchIndex <= prefixStartIndex) {
561
+ U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
562
+ mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
563
+ while (((ip0 > anchor) & (dictMatch > dictStart))
564
+ && (ip0[-1] == dictMatch[-1])) {
565
+ ip0--;
566
+ dictMatch--;
567
+ mLength++;
568
+ } /* catch up */
569
+ offset_2 = offset_1;
570
+ offset_1 = offset;
571
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
572
+ break;
573
+ }
574
+ }
575
+ }
576
+
577
+ if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
578
+ /* found a regular match */
579
+ U32 const offset = (U32) (ip0 - match);
580
+ mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
581
+ while (((ip0 > anchor) & (match > prefixStart))
582
+ && (ip0[-1] == match[-1])) {
583
+ ip0--;
584
+ match--;
585
+ mLength++;
292
586
  } /* catch up */
293
587
  offset_2 = offset_1;
294
588
  offset_1 = offset;
295
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
589
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
590
+ break;
296
591
  }
297
- } else if (MEM_read32(match) != MEM_read32(ip)) {
298
- /* it's not a match, and we're not going to check the dictionary */
299
- assert(stepSize >= 1);
300
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
301
- continue;
302
- } else {
303
- /* found a regular match */
304
- U32 const offset = (U32)(ip-match);
305
- mLength = ZSTD_count(ip+4, match+4, iend) + 4;
306
- while (((ip>anchor) & (match>prefixStart))
307
- && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
308
- offset_2 = offset_1;
309
- offset_1 = offset;
310
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
311
- }
592
+
593
+ /* Prepare for next iteration */
594
+ dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
595
+ dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
596
+ matchIndex = hashTable[hash1];
597
+
598
+ if (ip1 >= nextStep) {
599
+ step++;
600
+ nextStep += kStepIncr;
601
+ }
602
+ ip0 = ip1;
603
+ ip1 = ip1 + step;
604
+ if (ip1 > ilimit) goto _cleanup;
605
+
606
+ curr = (U32)(ip0 - base);
607
+ hash0 = hash1;
608
+ } /* end inner search loop */
312
609
 
313
610
  /* match found */
314
- ip += mLength;
315
- anchor = ip;
611
+ assert(mLength);
612
+ ip0 += mLength;
613
+ anchor = ip0;
316
614
 
317
- if (ip <= ilimit) {
615
+ if (ip0 <= ilimit) {
318
616
  /* Fill Table */
319
- assert(base+current+2 > istart); /* check base overflow */
320
- hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
321
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
617
+ assert(base+curr+2 > istart); /* check base overflow */
618
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
619
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
322
620
 
323
621
  /* check immediate repcode */
324
- while (ip <= ilimit) {
325
- U32 const current2 = (U32)(ip-base);
622
+ while (ip0 <= ilimit) {
623
+ U32 const current2 = (U32)(ip0-base);
326
624
  U32 const repIndex2 = current2 - offset_2;
327
625
  const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
328
626
  dictBase - dictIndexDelta + repIndex2 :
329
627
  base + repIndex2;
330
628
  if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
331
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
629
+ && (MEM_read32(repMatch2) == MEM_read32(ip0))) {
332
630
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
333
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
631
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
334
632
  U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
335
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
336
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
337
- ip += repLength2;
338
- anchor = ip;
633
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
634
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
635
+ ip0 += repLength2;
636
+ anchor = ip0;
339
637
  continue;
340
638
  }
341
639
  break;
342
640
  }
343
641
  }
642
+
643
+ /* Prepare for next iteration */
644
+ assert(ip0 == anchor);
645
+ ip1 = ip0 + stepSize;
344
646
  }
345
647
 
648
+ _cleanup:
346
649
  /* save reps for next block */
347
- rep[0] = offset_1 ? offset_1 : offsetSaved;
348
- rep[1] = offset_2 ? offset_2 : offsetSaved;
650
+ rep[0] = offset_1;
651
+ rep[1] = offset_2;
349
652
 
350
653
  /* Return the last literals size */
351
654
  return (size_t)(iend - anchor);
352
655
  }
353
656
 
657
+
658
+ ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
659
+ ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
660
+ ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
661
+ ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
662
+
354
663
  size_t ZSTD_compressBlock_fast_dictMatchState(
355
664
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
356
665
  void const* src, size_t srcSize)
@@ -361,30 +670,29 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
361
670
  {
362
671
  default: /* includes case 3 */
363
672
  case 4 :
364
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
673
+ return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
365
674
  case 5 :
366
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
675
+ return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
367
676
  case 6 :
368
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
677
+ return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
369
678
  case 7 :
370
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
679
+ return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
371
680
  }
372
681
  }
373
682
 
374
683
 
375
684
  static size_t ZSTD_compressBlock_fast_extDict_generic(
376
685
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
377
- void const* src, size_t srcSize, U32 const mls)
686
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
378
687
  {
379
688
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
380
689
  U32* const hashTable = ms->hashTable;
381
690
  U32 const hlog = cParams->hashLog;
382
691
  /* support stepSize of 0 */
383
- U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
692
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
384
693
  const BYTE* const base = ms->window.base;
385
694
  const BYTE* const dictBase = ms->window.dictBase;
386
695
  const BYTE* const istart = (const BYTE*)src;
387
- const BYTE* ip = istart;
388
696
  const BYTE* anchor = istart;
389
697
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
390
698
  const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
@@ -397,100 +705,256 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
397
705
  const BYTE* const iend = istart + srcSize;
398
706
  const BYTE* const ilimit = iend - 8;
399
707
  U32 offset_1=rep[0], offset_2=rep[1];
708
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
709
+
710
+ const BYTE* ip0 = istart;
711
+ const BYTE* ip1;
712
+ const BYTE* ip2;
713
+ const BYTE* ip3;
714
+ U32 current0;
715
+
716
+
717
+ size_t hash0; /* hash for ip0 */
718
+ size_t hash1; /* hash for ip1 */
719
+ U32 idx; /* match idx for ip0 */
720
+ const BYTE* idxBase; /* base pointer for idx */
721
+
722
+ U32 offcode;
723
+ const BYTE* match0;
724
+ size_t mLength;
725
+ const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
726
+
727
+ size_t step;
728
+ const BYTE* nextStep;
729
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
730
+
731
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
400
732
 
401
733
  DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
402
734
 
403
735
  /* switch to "regular" variant if extDict is invalidated due to maxDistance */
404
736
  if (prefixStartIndex == dictStartIndex)
405
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
406
-
407
- /* Search Loop */
408
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
409
- const size_t h = ZSTD_hashPtr(ip, hlog, mls);
410
- const U32 matchIndex = hashTable[h];
411
- const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
412
- const BYTE* match = matchBase + matchIndex;
413
- const U32 current = (U32)(ip-base);
414
- const U32 repIndex = current + 1 - offset_1;
415
- const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
416
- const BYTE* const repMatch = repBase + repIndex;
417
- hashTable[h] = current; /* update hash table */
418
- DEBUGLOG(7, "offset_1 = %u , current = %u", offset_1, current);
419
- assert(offset_1 <= current +1); /* check repIndex */
420
-
421
- if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
422
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
423
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
424
- size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
425
- ip++;
426
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
427
- ip += rLength;
428
- anchor = ip;
429
- } else {
430
- if ( (matchIndex < dictStartIndex) ||
431
- (MEM_read32(match) != MEM_read32(ip)) ) {
432
- assert(stepSize >= 1);
433
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
434
- continue;
737
+ return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
738
+
739
+ { U32 const curr = (U32)(ip0 - base);
740
+ U32 const maxRep = curr - dictStartIndex;
741
+ if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
742
+ if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
743
+ }
744
+
745
+ /* start each op */
746
+ _start: /* Requires: ip0 */
747
+
748
+ step = stepSize;
749
+ nextStep = ip0 + kStepIncr;
750
+
751
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
752
+ ip1 = ip0 + 1;
753
+ ip2 = ip0 + step;
754
+ ip3 = ip2 + 1;
755
+
756
+ if (ip3 >= ilimit) {
757
+ goto _cleanup;
758
+ }
759
+
760
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
761
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
762
+
763
+ idx = hashTable[hash0];
764
+ idxBase = idx < prefixStartIndex ? dictBase : base;
765
+
766
+ do {
767
+ { /* load repcode match for ip[2] */
768
+ U32 const current2 = (U32)(ip2 - base);
769
+ U32 const repIndex = current2 - offset_1;
770
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
771
+ U32 rval;
772
+ if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
773
+ & (offset_1 > 0) ) {
774
+ rval = MEM_read32(repBase + repIndex);
775
+ } else {
776
+ rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
435
777
  }
436
- { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
437
- const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
438
- U32 const offset = current - matchIndex;
439
- size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
440
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
441
- offset_2 = offset_1; offset_1 = offset; /* update offset history */
442
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
443
- ip += mLength;
444
- anchor = ip;
778
+
779
+ /* write back hash table entry */
780
+ current0 = (U32)(ip0 - base);
781
+ hashTable[hash0] = current0;
782
+
783
+ /* check repcode at ip[2] */
784
+ if (MEM_read32(ip2) == rval) {
785
+ ip0 = ip2;
786
+ match0 = repBase + repIndex;
787
+ matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
788
+ assert((match0 != prefixStart) & (match0 != dictStart));
789
+ mLength = ip0[-1] == match0[-1];
790
+ ip0 -= mLength;
791
+ match0 -= mLength;
792
+ offcode = REPCODE1_TO_OFFBASE;
793
+ mLength += 4;
794
+ goto _match;
445
795
  } }
446
796
 
447
- if (ip <= ilimit) {
448
- /* Fill Table */
449
- hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
450
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
451
- /* check immediate repcode */
452
- while (ip <= ilimit) {
453
- U32 const current2 = (U32)(ip-base);
454
- U32 const repIndex2 = current2 - offset_2;
455
- const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
456
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
457
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
458
- const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
459
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
460
- { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
461
- ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
462
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
463
- ip += repLength2;
464
- anchor = ip;
465
- continue;
466
- }
467
- break;
468
- } } }
797
+ { /* load match for ip[0] */
798
+ U32 const mval = idx >= dictStartIndex ?
799
+ MEM_read32(idxBase + idx) :
800
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
801
+
802
+ /* check match at ip[0] */
803
+ if (MEM_read32(ip0) == mval) {
804
+ /* found a match! */
805
+ goto _offset;
806
+ } }
807
+
808
+ /* lookup ip[1] */
809
+ idx = hashTable[hash1];
810
+ idxBase = idx < prefixStartIndex ? dictBase : base;
811
+
812
+ /* hash ip[2] */
813
+ hash0 = hash1;
814
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
815
+
816
+ /* advance to next positions */
817
+ ip0 = ip1;
818
+ ip1 = ip2;
819
+ ip2 = ip3;
820
+
821
+ /* write back hash table entry */
822
+ current0 = (U32)(ip0 - base);
823
+ hashTable[hash0] = current0;
824
+
825
+ { /* load match for ip[0] */
826
+ U32 const mval = idx >= dictStartIndex ?
827
+ MEM_read32(idxBase + idx) :
828
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
829
+
830
+ /* check match at ip[0] */
831
+ if (MEM_read32(ip0) == mval) {
832
+ /* found a match! */
833
+ goto _offset;
834
+ } }
835
+
836
+ /* lookup ip[1] */
837
+ idx = hashTable[hash1];
838
+ idxBase = idx < prefixStartIndex ? dictBase : base;
839
+
840
+ /* hash ip[2] */
841
+ hash0 = hash1;
842
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
843
+
844
+ /* advance to next positions */
845
+ ip0 = ip1;
846
+ ip1 = ip2;
847
+ ip2 = ip0 + step;
848
+ ip3 = ip1 + step;
849
+
850
+ /* calculate step */
851
+ if (ip2 >= nextStep) {
852
+ step++;
853
+ PREFETCH_L1(ip1 + 64);
854
+ PREFETCH_L1(ip1 + 128);
855
+ nextStep += kStepIncr;
856
+ }
857
+ } while (ip3 < ilimit);
858
+
859
+ _cleanup:
860
+ /* Note that there are probably still a couple positions we could search.
861
+ * However, it seems to be a meaningful performance hit to try to search
862
+ * them. So let's not. */
863
+
864
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
865
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
866
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
469
867
 
470
868
  /* save reps for next block */
471
- rep[0] = offset_1;
472
- rep[1] = offset_2;
869
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
870
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
473
871
 
474
872
  /* Return the last literals size */
475
873
  return (size_t)(iend - anchor);
874
+
875
+ _offset: /* Requires: ip0, idx, idxBase */
876
+
877
+ /* Compute the offset code. */
878
+ { U32 const offset = current0 - idx;
879
+ const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
880
+ matchEnd = idx < prefixStartIndex ? dictEnd : iend;
881
+ match0 = idxBase + idx;
882
+ offset_2 = offset_1;
883
+ offset_1 = offset;
884
+ offcode = OFFSET_TO_OFFBASE(offset);
885
+ mLength = 4;
886
+
887
+ /* Count the backwards match length. */
888
+ while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
889
+ ip0--;
890
+ match0--;
891
+ mLength++;
892
+ } }
893
+
894
+ _match: /* Requires: ip0, match0, offcode, matchEnd */
895
+
896
+ /* Count the forward length. */
897
+ assert(matchEnd != 0);
898
+ mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
899
+
900
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
901
+
902
+ ip0 += mLength;
903
+ anchor = ip0;
904
+
905
+ /* write next hash table entry */
906
+ if (ip1 < ip0) {
907
+ hashTable[hash1] = (U32)(ip1 - base);
908
+ }
909
+
910
+ /* Fill table and check for immediate repcode. */
911
+ if (ip0 <= ilimit) {
912
+ /* Fill Table */
913
+ assert(base+current0+2 > istart); /* check base overflow */
914
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
915
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
916
+
917
+ while (ip0 <= ilimit) {
918
+ U32 const repIndex2 = (U32)(ip0-base) - offset_2;
919
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
920
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */
921
+ && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
922
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
923
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
924
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
925
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
926
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
927
+ ip0 += repLength2;
928
+ anchor = ip0;
929
+ continue;
930
+ }
931
+ break;
932
+ } }
933
+
934
+ goto _start;
476
935
  }
477
936
 
937
+ ZSTD_GEN_FAST_FN(extDict, 4, 0)
938
+ ZSTD_GEN_FAST_FN(extDict, 5, 0)
939
+ ZSTD_GEN_FAST_FN(extDict, 6, 0)
940
+ ZSTD_GEN_FAST_FN(extDict, 7, 0)
478
941
 
479
942
  size_t ZSTD_compressBlock_fast_extDict(
480
943
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
481
944
  void const* src, size_t srcSize)
482
945
  {
483
946
  U32 const mls = ms->cParams.minMatch;
947
+ assert(ms->dictMatchState == NULL);
484
948
  switch(mls)
485
949
  {
486
950
  default: /* includes case 3 */
487
951
  case 4 :
488
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
952
+ return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
489
953
  case 5 :
490
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
954
+ return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
491
955
  case 6 :
492
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
956
+ return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
493
957
  case 7 :
494
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
958
+ return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
495
959
  }
496
960
  }