zstd-ruby 1.4.4.0 → 1.5.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/README.md +78 -5
  4. data/Rakefile +8 -2
  5. data/ext/zstdruby/common.h +15 -0
  6. data/ext/zstdruby/extconf.rb +3 -2
  7. data/ext/zstdruby/libzstd/common/allocations.h +55 -0
  8. data/ext/zstdruby/libzstd/common/bits.h +200 -0
  9. data/ext/zstdruby/libzstd/common/bitstream.h +74 -97
  10. data/ext/zstdruby/libzstd/common/compiler.h +219 -20
  11. data/ext/zstdruby/libzstd/common/cpu.h +1 -3
  12. data/ext/zstdruby/libzstd/common/debug.c +11 -31
  13. data/ext/zstdruby/libzstd/common/debug.h +22 -49
  14. data/ext/zstdruby/libzstd/common/entropy_common.c +184 -80
  15. data/ext/zstdruby/libzstd/common/error_private.c +11 -2
  16. data/ext/zstdruby/libzstd/common/error_private.h +87 -4
  17. data/ext/zstdruby/libzstd/common/fse.h +47 -116
  18. data/ext/zstdruby/libzstd/common/fse_decompress.c +127 -127
  19. data/ext/zstdruby/libzstd/common/huf.h +112 -197
  20. data/ext/zstdruby/libzstd/common/mem.h +124 -142
  21. data/ext/zstdruby/libzstd/common/pool.c +54 -27
  22. data/ext/zstdruby/libzstd/common/pool.h +11 -5
  23. data/ext/zstdruby/libzstd/common/portability_macros.h +156 -0
  24. data/ext/zstdruby/libzstd/common/threading.c +78 -22
  25. data/ext/zstdruby/libzstd/common/threading.h +9 -13
  26. data/ext/zstdruby/libzstd/common/xxhash.c +15 -873
  27. data/ext/zstdruby/libzstd/common/xxhash.h +5572 -191
  28. data/ext/zstdruby/libzstd/common/zstd_common.c +2 -37
  29. data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
  30. data/ext/zstdruby/libzstd/common/zstd_internal.h +186 -144
  31. data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
  32. data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
  33. data/ext/zstdruby/libzstd/compress/fse_compress.c +99 -196
  34. data/ext/zstdruby/libzstd/compress/hist.c +41 -63
  35. data/ext/zstdruby/libzstd/compress/hist.h +13 -33
  36. data/ext/zstdruby/libzstd/compress/huf_compress.c +968 -331
  37. data/ext/zstdruby/libzstd/compress/zstd_compress.c +4120 -1191
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +688 -159
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +121 -40
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -6
  41. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +62 -35
  42. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +10 -3
  43. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +577 -0
  44. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +32 -0
  45. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +322 -115
  46. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +394 -154
  47. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +4 -3
  48. data/ext/zstdruby/libzstd/compress/zstd_fast.c +729 -253
  49. data/ext/zstdruby/libzstd/compress/zstd_fast.h +4 -3
  50. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1289 -247
  51. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +61 -1
  52. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +339 -212
  53. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +15 -3
  54. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
  55. data/ext/zstdruby/libzstd/compress/zstd_opt.c +508 -282
  56. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  57. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +217 -466
  58. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +35 -114
  59. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1220 -572
  60. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +576 -0
  61. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +23 -19
  62. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +3 -3
  63. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +859 -273
  64. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1244 -375
  65. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +21 -7
  66. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +74 -11
  67. data/ext/zstdruby/libzstd/dictBuilder/cover.c +75 -54
  68. data/ext/zstdruby/libzstd/dictBuilder/cover.h +20 -9
  69. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
  70. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +55 -36
  71. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +126 -110
  72. data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +248 -56
  73. data/ext/zstdruby/libzstd/zstd.h +1277 -306
  74. data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +29 -8
  75. data/ext/zstdruby/main.c +20 -0
  76. data/ext/zstdruby/skippable_frame.c +63 -0
  77. data/ext/zstdruby/streaming_compress.c +177 -0
  78. data/ext/zstdruby/streaming_compress.h +5 -0
  79. data/ext/zstdruby/streaming_decompress.c +123 -0
  80. data/ext/zstdruby/zstdruby.c +114 -32
  81. data/lib/zstd-ruby/version.rb +1 -1
  82. data/lib/zstd-ruby.rb +0 -1
  83. data/zstd-ruby.gemspec +1 -1
  84. metadata +24 -39
  85. data/.travis.yml +0 -14
  86. data/ext/zstdruby/libzstd/.gitignore +0 -3
  87. data/ext/zstdruby/libzstd/BUCK +0 -234
  88. data/ext/zstdruby/libzstd/Makefile +0 -289
  89. data/ext/zstdruby/libzstd/README.md +0 -159
  90. data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
  91. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
  92. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -147
  93. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
  94. data/ext/zstdruby/libzstd/dll/example/Makefile +0 -47
  95. data/ext/zstdruby/libzstd/dll/example/README.md +0 -69
  96. data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
  97. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
  98. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
  99. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
  100. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2152
  101. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
  102. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3514
  103. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
  104. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3156
  105. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
  106. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3641
  107. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
  108. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4046
  109. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
  110. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4150
  111. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
  112. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4533
  113. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
  114. data/ext/zstdruby/libzstd/libzstd.pc.in +0 -15
  115. data/ext/zstdruby/zstdruby.h +0 -6
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +11,42 @@
11
11
  #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
12
12
  #include "zstd_fast.h"
13
13
 
14
+ static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
15
+ const void* const end,
16
+ ZSTD_dictTableLoadMethod_e dtlm)
17
+ {
18
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
19
+ U32* const hashTable = ms->hashTable;
20
+ U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
21
+ U32 const mls = cParams->minMatch;
22
+ const BYTE* const base = ms->window.base;
23
+ const BYTE* ip = base + ms->nextToUpdate;
24
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
25
+ const U32 fastHashFillStep = 3;
14
26
 
15
- void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
27
+ /* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
28
+ * Feel free to remove this assert if there's a good reason! */
29
+ assert(dtlm == ZSTD_dtlm_full);
30
+
31
+ /* Always insert every fastHashFillStep position into the hash table.
32
+ * Insert the other positions if their hash entry is empty.
33
+ */
34
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
35
+ U32 const curr = (U32)(ip - base);
36
+ { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
37
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
38
+
39
+ if (dtlm == ZSTD_dtlm_fast) continue;
40
+ /* Only load extra positions for ZSTD_dtlm_full */
41
+ { U32 p;
42
+ for (p = 1; p < fastHashFillStep; ++p) {
43
+ size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
44
+ if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
45
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
46
+ } } } }
47
+ }
48
+
49
+ static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
16
50
  const void* const end,
17
51
  ZSTD_dictTableLoadMethod_e dtlm)
18
52
  {
@@ -25,152 +59,365 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
25
59
  const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
26
60
  const U32 fastHashFillStep = 3;
27
61
 
62
+ /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
63
+ * Feel free to remove this assert if there's a good reason! */
64
+ assert(dtlm == ZSTD_dtlm_fast);
65
+
28
66
  /* Always insert every fastHashFillStep position into the hash table.
29
67
  * Insert the other positions if their hash entry is empty.
30
68
  */
31
69
  for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
32
- U32 const current = (U32)(ip - base);
70
+ U32 const curr = (U32)(ip - base);
33
71
  size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
34
- hashTable[hash0] = current;
72
+ hashTable[hash0] = curr;
35
73
  if (dtlm == ZSTD_dtlm_fast) continue;
36
74
  /* Only load extra positions for ZSTD_dtlm_full */
37
75
  { U32 p;
38
76
  for (p = 1; p < fastHashFillStep; ++p) {
39
77
  size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
40
78
  if (hashTable[hash] == 0) { /* not yet filled */
41
- hashTable[hash] = current + p;
79
+ hashTable[hash] = curr + p;
42
80
  } } } }
43
81
  }
44
82
 
83
+ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
84
+ const void* const end,
85
+ ZSTD_dictTableLoadMethod_e dtlm,
86
+ ZSTD_tableFillPurpose_e tfp)
87
+ {
88
+ if (tfp == ZSTD_tfp_forCDict) {
89
+ ZSTD_fillHashTableForCDict(ms, end, dtlm);
90
+ } else {
91
+ ZSTD_fillHashTableForCCtx(ms, end, dtlm);
92
+ }
93
+ }
94
+
45
95
 
96
+ /**
97
+ * If you squint hard enough (and ignore repcodes), the search operation at any
98
+ * given position is broken into 4 stages:
99
+ *
100
+ * 1. Hash (map position to hash value via input read)
101
+ * 2. Lookup (map hash val to index via hashtable read)
102
+ * 3. Load (map index to value at that position via input read)
103
+ * 4. Compare
104
+ *
105
+ * Each of these steps involves a memory read at an address which is computed
106
+ * from the previous step. This means these steps must be sequenced and their
107
+ * latencies are cumulative.
108
+ *
109
+ * Rather than do 1->2->3->4 sequentially for a single position before moving
110
+ * onto the next, this implementation interleaves these operations across the
111
+ * next few positions:
112
+ *
113
+ * R = Repcode Read & Compare
114
+ * H = Hash
115
+ * T = Table Lookup
116
+ * M = Match Read & Compare
117
+ *
118
+ * Pos | Time -->
119
+ * ----+-------------------
120
+ * N | ... M
121
+ * N+1 | ... TM
122
+ * N+2 | R H T M
123
+ * N+3 | H TM
124
+ * N+4 | R H T M
125
+ * N+5 | H ...
126
+ * N+6 | R ...
127
+ *
128
+ * This is very much analogous to the pipelining of execution in a CPU. And just
129
+ * like a CPU, we have to dump the pipeline when we find a match (i.e., take a
130
+ * branch).
131
+ *
132
+ * When this happens, we throw away our current state, and do the following prep
133
+ * to re-enter the loop:
134
+ *
135
+ * Pos | Time -->
136
+ * ----+-------------------
137
+ * N | H T
138
+ * N+1 | H
139
+ *
140
+ * This is also the work we do at the beginning to enter the loop initially.
141
+ */
46
142
  FORCE_INLINE_TEMPLATE size_t
47
- ZSTD_compressBlock_fast_generic(
143
+ ZSTD_compressBlock_fast_noDict_generic(
48
144
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
49
145
  void const* src, size_t srcSize,
50
- U32 const mls)
146
+ U32 const mls, U32 const hasStep)
51
147
  {
52
148
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
53
149
  U32* const hashTable = ms->hashTable;
54
150
  U32 const hlog = cParams->hashLog;
55
151
  /* support stepSize of 0 */
56
- size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
152
+ size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
57
153
  const BYTE* const base = ms->window.base;
58
154
  const BYTE* const istart = (const BYTE*)src;
59
- /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
60
- const BYTE* ip0 = istart;
61
- const BYTE* ip1;
62
- const BYTE* anchor = istart;
63
155
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
64
- const U32 maxDistance = 1U << cParams->windowLog;
65
- const U32 validStartIndex = ms->window.dictLimit;
66
- const U32 prefixStartIndex = (endIndex - validStartIndex > maxDistance) ? endIndex - maxDistance : validStartIndex;
156
+ const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
67
157
  const BYTE* const prefixStart = base + prefixStartIndex;
68
158
  const BYTE* const iend = istart + srcSize;
69
159
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
70
- U32 offset_1=rep[0], offset_2=rep[1];
71
- U32 offsetSaved = 0;
72
160
 
73
- /* init */
161
+ const BYTE* anchor = istart;
162
+ const BYTE* ip0 = istart;
163
+ const BYTE* ip1;
164
+ const BYTE* ip2;
165
+ const BYTE* ip3;
166
+ U32 current0;
167
+
168
+ U32 rep_offset1 = rep[0];
169
+ U32 rep_offset2 = rep[1];
170
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
171
+
172
+ size_t hash0; /* hash for ip0 */
173
+ size_t hash1; /* hash for ip1 */
174
+ U32 idx; /* match idx for ip0 */
175
+ U32 mval; /* src value at match idx */
176
+
177
+ U32 offcode;
178
+ const BYTE* match0;
179
+ size_t mLength;
180
+
181
+ /* ip0 and ip1 are always adjacent. The targetLength skipping and
182
+ * uncompressibility acceleration is applied to every other position,
183
+ * matching the behavior of #1562. step therefore represents the gap
184
+ * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
185
+ size_t step;
186
+ const BYTE* nextStep;
187
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
188
+
74
189
  DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
75
190
  ip0 += (ip0 == prefixStart);
191
+ { U32 const curr = (U32)(ip0 - base);
192
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
193
+ U32 const maxRep = curr - windowLow;
194
+ if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
195
+ if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
196
+ }
197
+
198
+ /* start each op */
199
+ _start: /* Requires: ip0 */
200
+
201
+ step = stepSize;
202
+ nextStep = ip0 + kStepIncr;
203
+
204
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
76
205
  ip1 = ip0 + 1;
77
- { U32 const maxRep = (U32)(ip0 - prefixStart);
78
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
79
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
206
+ ip2 = ip0 + step;
207
+ ip3 = ip2 + 1;
208
+
209
+ if (ip3 >= ilimit) {
210
+ goto _cleanup;
80
211
  }
81
212
 
82
- /* Main Search Loop */
83
- while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
84
- size_t mLength;
85
- BYTE const* ip2 = ip0 + 2;
86
- size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
87
- U32 const val0 = MEM_read32(ip0);
88
- size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
89
- U32 const val1 = MEM_read32(ip1);
90
- U32 const current0 = (U32)(ip0-base);
91
- U32 const current1 = (U32)(ip1-base);
92
- U32 const matchIndex0 = hashTable[h0];
93
- U32 const matchIndex1 = hashTable[h1];
94
- BYTE const* repMatch = ip2-offset_1;
95
- const BYTE* match0 = base + matchIndex0;
96
- const BYTE* match1 = base + matchIndex1;
97
- U32 offcode;
98
- hashTable[h0] = current0; /* update hash table */
99
- hashTable[h1] = current1; /* update hash table */
100
-
101
- assert(ip0 + 1 == ip1);
102
-
103
- if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
104
- mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
105
- ip0 = ip2 - mLength;
106
- match0 = repMatch - mLength;
107
- offcode = 0;
213
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
214
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
215
+
216
+ idx = hashTable[hash0];
217
+
218
+ do {
219
+ /* load repcode match for ip[2]*/
220
+ const U32 rval = MEM_read32(ip2 - rep_offset1);
221
+
222
+ /* write back hash table entry */
223
+ current0 = (U32)(ip0 - base);
224
+ hashTable[hash0] = current0;
225
+
226
+ /* check repcode at ip[2] */
227
+ if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
228
+ ip0 = ip2;
229
+ match0 = ip0 - rep_offset1;
230
+ mLength = ip0[-1] == match0[-1];
231
+ ip0 -= mLength;
232
+ match0 -= mLength;
233
+ offcode = REPCODE1_TO_OFFBASE;
234
+ mLength += 4;
235
+
236
+ /* First write next hash table entry; we've already calculated it.
237
+ * This write is known to be safe because the ip1 is before the
238
+ * repcode (ip2). */
239
+ hashTable[hash1] = (U32)(ip1 - base);
240
+
108
241
  goto _match;
109
242
  }
110
- if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
111
- /* found a regular match */
243
+
244
+ /* load match for ip[0] */
245
+ if (idx >= prefixStartIndex) {
246
+ mval = MEM_read32(base + idx);
247
+ } else {
248
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
249
+ }
250
+
251
+ /* check match at ip[0] */
252
+ if (MEM_read32(ip0) == mval) {
253
+ /* found a match! */
254
+
255
+ /* First write next hash table entry; we've already calculated it.
256
+ * This write is known to be safe because the ip1 == ip0 + 1, so
257
+ * we know we will resume searching after ip1 */
258
+ hashTable[hash1] = (U32)(ip1 - base);
259
+
112
260
  goto _offset;
113
261
  }
114
- if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
115
- /* found a regular match after one literal */
116
- ip0 = ip1;
117
- match0 = match1;
262
+
263
+ /* lookup ip[1] */
264
+ idx = hashTable[hash1];
265
+
266
+ /* hash ip[2] */
267
+ hash0 = hash1;
268
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
269
+
270
+ /* advance to next positions */
271
+ ip0 = ip1;
272
+ ip1 = ip2;
273
+ ip2 = ip3;
274
+
275
+ /* write back hash table entry */
276
+ current0 = (U32)(ip0 - base);
277
+ hashTable[hash0] = current0;
278
+
279
+ /* load match for ip[0] */
280
+ if (idx >= prefixStartIndex) {
281
+ mval = MEM_read32(base + idx);
282
+ } else {
283
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
284
+ }
285
+
286
+ /* check match at ip[0] */
287
+ if (MEM_read32(ip0) == mval) {
288
+ /* found a match! */
289
+
290
+ /* first write next hash table entry; we've already calculated it */
291
+ if (step <= 4) {
292
+ /* We need to avoid writing an index into the hash table >= the
293
+ * position at which we will pick up our searching after we've
294
+ * taken this match.
295
+ *
296
+ * The minimum possible match has length 4, so the earliest ip0
297
+ * can be after we take this match will be the current ip0 + 4.
298
+ * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
299
+ * write this position.
300
+ */
301
+ hashTable[hash1] = (U32)(ip1 - base);
302
+ }
303
+
118
304
  goto _offset;
119
305
  }
120
- { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
121
- assert(step >= 2);
122
- ip0 += step;
123
- ip1 += step;
124
- continue;
306
+
307
+ /* lookup ip[1] */
308
+ idx = hashTable[hash1];
309
+
310
+ /* hash ip[2] */
311
+ hash0 = hash1;
312
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
313
+
314
+ /* advance to next positions */
315
+ ip0 = ip1;
316
+ ip1 = ip2;
317
+ ip2 = ip0 + step;
318
+ ip3 = ip1 + step;
319
+
320
+ /* calculate step */
321
+ if (ip2 >= nextStep) {
322
+ step++;
323
+ PREFETCH_L1(ip1 + 64);
324
+ PREFETCH_L1(ip1 + 128);
325
+ nextStep += kStepIncr;
125
326
  }
126
- _offset: /* Requires: ip0, match0 */
127
- /* Compute the offset code */
128
- offset_2 = offset_1;
129
- offset_1 = (U32)(ip0-match0);
130
- offcode = offset_1 + ZSTD_REP_MOVE;
131
- mLength = 0;
132
- /* Count the backwards match length */
133
- while (((ip0>anchor) & (match0>prefixStart))
134
- && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
327
+ } while (ip3 < ilimit);
328
+
329
+ _cleanup:
330
+ /* Note that there are probably still a couple positions we could search.
331
+ * However, it seems to be a meaningful performance hit to try to search
332
+ * them. So let's not. */
333
+
334
+ /* When the repcodes are outside of the prefix, we set them to zero before the loop.
335
+ * When the offsets are still zero, we need to restore them after the block to have a correct
336
+ * repcode history. If only one offset was invalid, it is easy. The tricky case is when both
337
+ * offsets were invalid. We need to figure out which offset to refill with.
338
+ * - If both offsets are zero they are in the same order.
339
+ * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
340
+ * - If only one is zero, we need to decide which offset to restore.
341
+ * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
342
+ * - It is impossible for rep_offset2 to be non-zero.
343
+ *
344
+ * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
345
+ * set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
346
+ */
347
+ offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
348
+
349
+ /* save reps for next block */
350
+ rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
351
+ rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
352
+
353
+ /* Return the last literals size */
354
+ return (size_t)(iend - anchor);
355
+
356
+ _offset: /* Requires: ip0, idx */
357
+
358
+ /* Compute the offset code. */
359
+ match0 = base + idx;
360
+ rep_offset2 = rep_offset1;
361
+ rep_offset1 = (U32)(ip0-match0);
362
+ offcode = OFFSET_TO_OFFBASE(rep_offset1);
363
+ mLength = 4;
364
+
365
+ /* Count the backwards match length. */
366
+ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
367
+ ip0--;
368
+ match0--;
369
+ mLength++;
370
+ }
135
371
 
136
372
  _match: /* Requires: ip0, match0, offcode */
137
- /* Count the forward length */
138
- mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
139
- ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
140
- /* match found */
141
- ip0 += mLength;
142
- anchor = ip0;
143
- ip1 = ip0 + 1;
144
373
 
145
- if (ip0 <= ilimit) {
146
- /* Fill Table */
147
- assert(base+current0+2 > istart); /* check base overflow */
148
- hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
149
- hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
374
+ /* Count the forward length. */
375
+ mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
376
+
377
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
150
378
 
151
- while ( ((ip0 <= ilimit) & (offset_2>0)) /* offset_2==0 means offset_2 is invalidated */
152
- && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
379
+ ip0 += mLength;
380
+ anchor = ip0;
381
+
382
+ /* Fill table and check for immediate repcode. */
383
+ if (ip0 <= ilimit) {
384
+ /* Fill Table */
385
+ assert(base+current0+2 > istart); /* check base overflow */
386
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
387
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
388
+
389
+ if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
390
+ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
153
391
  /* store sequence */
154
- size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
155
- { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
392
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
393
+ { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
156
394
  hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
157
395
  ip0 += rLength;
158
- ip1 = ip0 + 1;
159
- ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
396
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
160
397
  anchor = ip0;
161
398
  continue; /* faster when present (confirmed on gcc-8) ... (?) */
162
- }
163
- }
164
- }
165
-
166
- /* save reps for next block */
167
- rep[0] = offset_1 ? offset_1 : offsetSaved;
168
- rep[1] = offset_2 ? offset_2 : offsetSaved;
399
+ } } }
169
400
 
170
- /* Return the last literals size */
171
- return (size_t)(iend - anchor);
401
+ goto _start;
172
402
  }
173
403
 
404
+ #define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
405
+ static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
406
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
407
+ void const* src, size_t srcSize) \
408
+ { \
409
+ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
410
+ }
411
+
412
+ ZSTD_GEN_FAST_FN(noDict, 4, 1)
413
+ ZSTD_GEN_FAST_FN(noDict, 5, 1)
414
+ ZSTD_GEN_FAST_FN(noDict, 6, 1)
415
+ ZSTD_GEN_FAST_FN(noDict, 7, 1)
416
+
417
+ ZSTD_GEN_FAST_FN(noDict, 4, 0)
418
+ ZSTD_GEN_FAST_FN(noDict, 5, 0)
419
+ ZSTD_GEN_FAST_FN(noDict, 6, 0)
420
+ ZSTD_GEN_FAST_FN(noDict, 7, 0)
174
421
 
175
422
  size_t ZSTD_compressBlock_fast(
176
423
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -178,24 +425,40 @@ size_t ZSTD_compressBlock_fast(
178
425
  {
179
426
  U32 const mls = ms->cParams.minMatch;
180
427
  assert(ms->dictMatchState == NULL);
181
- switch(mls)
182
- {
183
- default: /* includes case 3 */
184
- case 4 :
185
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
186
- case 5 :
187
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
188
- case 6 :
189
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
190
- case 7 :
191
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
428
+ if (ms->cParams.targetLength > 1) {
429
+ switch(mls)
430
+ {
431
+ default: /* includes case 3 */
432
+ case 4 :
433
+ return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
434
+ case 5 :
435
+ return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
436
+ case 6 :
437
+ return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
438
+ case 7 :
439
+ return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
440
+ }
441
+ } else {
442
+ switch(mls)
443
+ {
444
+ default: /* includes case 3 */
445
+ case 4 :
446
+ return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
447
+ case 5 :
448
+ return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
449
+ case 6 :
450
+ return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
451
+ case 7 :
452
+ return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
453
+ }
454
+
192
455
  }
193
456
  }
194
457
 
195
458
  FORCE_INLINE_TEMPLATE
196
459
  size_t ZSTD_compressBlock_fast_dictMatchState_generic(
197
460
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
198
- void const* src, size_t srcSize, U32 const mls)
461
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
199
462
  {
200
463
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
201
464
  U32* const hashTable = ms->hashTable;
@@ -204,14 +467,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
204
467
  U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
205
468
  const BYTE* const base = ms->window.base;
206
469
  const BYTE* const istart = (const BYTE*)src;
207
- const BYTE* ip = istart;
470
+ const BYTE* ip0 = istart;
471
+ const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
208
472
  const BYTE* anchor = istart;
209
473
  const U32 prefixStartIndex = ms->window.dictLimit;
210
474
  const BYTE* const prefixStart = base + prefixStartIndex;
211
475
  const BYTE* const iend = istart + srcSize;
212
476
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
213
477
  U32 offset_1=rep[0], offset_2=rep[1];
214
- U32 offsetSaved = 0;
215
478
 
216
479
  const ZSTD_matchState_t* const dms = ms->dictMatchState;
217
480
  const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
@@ -221,125 +484,182 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
221
484
  const BYTE* const dictStart = dictBase + dictStartIndex;
222
485
  const BYTE* const dictEnd = dms->window.nextSrc;
223
486
  const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
224
- const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
225
- const U32 dictHLog = dictCParams->hashLog;
487
+ const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
488
+ const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
226
489
 
227
490
  /* if a dictionary is still attached, it necessarily means that
228
491
  * it is within window size. So we just check it. */
229
492
  const U32 maxDistance = 1U << cParams->windowLog;
230
- const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
493
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
231
494
  assert(endIndex - prefixStartIndex <= maxDistance);
232
495
  (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
233
496
 
234
- /* ensure there will be no no underflow
497
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
498
+
499
+ /* ensure there will be no underflow
235
500
  * when translating a dict index into a local index */
236
501
  assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
237
502
 
503
+ if (ms->prefetchCDictTables) {
504
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
505
+ PREFETCH_AREA(dictHashTable, hashTableBytes)
506
+ }
507
+
238
508
  /* init */
239
509
  DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
240
- ip += (dictAndPrefixLength == 0);
510
+ ip0 += (dictAndPrefixLength == 0);
241
511
  /* dictMatchState repCode checks don't currently handle repCode == 0
242
512
  * disabling. */
243
513
  assert(offset_1 <= dictAndPrefixLength);
244
514
  assert(offset_2 <= dictAndPrefixLength);
245
515
 
246
- /* Main Search Loop */
247
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
516
+ /* Outer search loop */
517
+ assert(stepSize >= 1);
518
+ while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
248
519
  size_t mLength;
249
- size_t const h = ZSTD_hashPtr(ip, hlog, mls);
250
- U32 const current = (U32)(ip-base);
251
- U32 const matchIndex = hashTable[h];
252
- const BYTE* match = base + matchIndex;
253
- const U32 repIndex = current + 1 - offset_1;
254
- const BYTE* repMatch = (repIndex < prefixStartIndex) ?
255
- dictBase + (repIndex - dictIndexDelta) :
256
- base + repIndex;
257
- hashTable[h] = current; /* update hash table */
258
-
259
- if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
260
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
261
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
262
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
263
- ip++;
264
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
265
- } else if ( (matchIndex <= prefixStartIndex) ) {
266
- size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
267
- U32 const dictMatchIndex = dictHashTable[dictHash];
268
- const BYTE* dictMatch = dictBase + dictMatchIndex;
269
- if (dictMatchIndex <= dictStartIndex ||
270
- MEM_read32(dictMatch) != MEM_read32(ip)) {
271
- assert(stepSize >= 1);
272
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
273
- continue;
274
- } else {
275
- /* found a dict match */
276
- U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
277
- mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
278
- while (((ip>anchor) & (dictMatch>dictStart))
279
- && (ip[-1] == dictMatch[-1])) {
280
- ip--; dictMatch--; mLength++;
520
+ size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
521
+
522
+ size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
523
+ U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
524
+ int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
525
+
526
+ U32 matchIndex = hashTable[hash0];
527
+ U32 curr = (U32)(ip0 - base);
528
+ size_t step = stepSize;
529
+ const size_t kStepIncr = 1 << kSearchStrength;
530
+ const BYTE* nextStep = ip0 + kStepIncr;
531
+
532
+ /* Inner search loop */
533
+ while (1) {
534
+ const BYTE* match = base + matchIndex;
535
+ const U32 repIndex = curr + 1 - offset_1;
536
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
537
+ dictBase + (repIndex - dictIndexDelta) :
538
+ base + repIndex;
539
+ const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
540
+ size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
541
+ hashTable[hash0] = curr; /* update hash table */
542
+
543
+ if (((U32) ((prefixStartIndex - 1) - repIndex) >=
544
+ 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
545
+ && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
546
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
547
+ mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
548
+ ip0++;
549
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
550
+ break;
551
+ }
552
+
553
+ if (dictTagsMatch) {
554
+ /* Found a possible dict match */
555
+ const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
556
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
557
+ if (dictMatchIndex > dictStartIndex &&
558
+ MEM_read32(dictMatch) == MEM_read32(ip0)) {
559
+ /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
560
+ if (matchIndex <= prefixStartIndex) {
561
+ U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
562
+ mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
563
+ while (((ip0 > anchor) & (dictMatch > dictStart))
564
+ && (ip0[-1] == dictMatch[-1])) {
565
+ ip0--;
566
+ dictMatch--;
567
+ mLength++;
568
+ } /* catch up */
569
+ offset_2 = offset_1;
570
+ offset_1 = offset;
571
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
572
+ break;
573
+ }
574
+ }
575
+ }
576
+
577
+ if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
578
+ /* found a regular match */
579
+ U32 const offset = (U32) (ip0 - match);
580
+ mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
581
+ while (((ip0 > anchor) & (match > prefixStart))
582
+ && (ip0[-1] == match[-1])) {
583
+ ip0--;
584
+ match--;
585
+ mLength++;
281
586
  } /* catch up */
282
587
  offset_2 = offset_1;
283
588
  offset_1 = offset;
284
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
589
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
590
+ break;
285
591
  }
286
- } else if (MEM_read32(match) != MEM_read32(ip)) {
287
- /* it's not a match, and we're not going to check the dictionary */
288
- assert(stepSize >= 1);
289
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
290
- continue;
291
- } else {
292
- /* found a regular match */
293
- U32 const offset = (U32)(ip-match);
294
- mLength = ZSTD_count(ip+4, match+4, iend) + 4;
295
- while (((ip>anchor) & (match>prefixStart))
296
- && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
297
- offset_2 = offset_1;
298
- offset_1 = offset;
299
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
300
- }
592
+
593
+ /* Prepare for next iteration */
594
+ dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
595
+ dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
596
+ matchIndex = hashTable[hash1];
597
+
598
+ if (ip1 >= nextStep) {
599
+ step++;
600
+ nextStep += kStepIncr;
601
+ }
602
+ ip0 = ip1;
603
+ ip1 = ip1 + step;
604
+ if (ip1 > ilimit) goto _cleanup;
605
+
606
+ curr = (U32)(ip0 - base);
607
+ hash0 = hash1;
608
+ } /* end inner search loop */
301
609
 
302
610
  /* match found */
303
- ip += mLength;
304
- anchor = ip;
611
+ assert(mLength);
612
+ ip0 += mLength;
613
+ anchor = ip0;
305
614
 
306
- if (ip <= ilimit) {
615
+ if (ip0 <= ilimit) {
307
616
  /* Fill Table */
308
- assert(base+current+2 > istart); /* check base overflow */
309
- hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
310
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
617
+ assert(base+curr+2 > istart); /* check base overflow */
618
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
619
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
311
620
 
312
621
  /* check immediate repcode */
313
- while (ip <= ilimit) {
314
- U32 const current2 = (U32)(ip-base);
622
+ while (ip0 <= ilimit) {
623
+ U32 const current2 = (U32)(ip0-base);
315
624
  U32 const repIndex2 = current2 - offset_2;
316
625
  const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
317
626
  dictBase - dictIndexDelta + repIndex2 :
318
627
  base + repIndex2;
319
628
  if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
320
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
629
+ && (MEM_read32(repMatch2) == MEM_read32(ip0))) {
321
630
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
322
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
631
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
323
632
  U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
324
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
325
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
326
- ip += repLength2;
327
- anchor = ip;
633
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
634
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
635
+ ip0 += repLength2;
636
+ anchor = ip0;
328
637
  continue;
329
638
  }
330
639
  break;
331
640
  }
332
641
  }
642
+
643
+ /* Prepare for next iteration */
644
+ assert(ip0 == anchor);
645
+ ip1 = ip0 + stepSize;
333
646
  }
334
647
 
648
+ _cleanup:
335
649
  /* save reps for next block */
336
- rep[0] = offset_1 ? offset_1 : offsetSaved;
337
- rep[1] = offset_2 ? offset_2 : offsetSaved;
650
+ rep[0] = offset_1;
651
+ rep[1] = offset_2;
338
652
 
339
653
  /* Return the last literals size */
340
654
  return (size_t)(iend - anchor);
341
655
  }
342
656
 
657
+
658
+ ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
659
+ ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
660
+ ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
661
+ ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
662
+
343
663
  size_t ZSTD_compressBlock_fast_dictMatchState(
344
664
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
345
665
  void const* src, size_t srcSize)
@@ -350,30 +670,29 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
350
670
  {
351
671
  default: /* includes case 3 */
352
672
  case 4 :
353
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
673
+ return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
354
674
  case 5 :
355
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
675
+ return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
356
676
  case 6 :
357
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
677
+ return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
358
678
  case 7 :
359
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
679
+ return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
360
680
  }
361
681
  }
362
682
 
363
683
 
364
684
  static size_t ZSTD_compressBlock_fast_extDict_generic(
365
685
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
366
- void const* src, size_t srcSize, U32 const mls)
686
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
367
687
  {
368
688
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
369
689
  U32* const hashTable = ms->hashTable;
370
690
  U32 const hlog = cParams->hashLog;
371
691
  /* support stepSize of 0 */
372
- U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
692
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
373
693
  const BYTE* const base = ms->window.base;
374
694
  const BYTE* const dictBase = ms->window.dictBase;
375
695
  const BYTE* const istart = (const BYTE*)src;
376
- const BYTE* ip = istart;
377
696
  const BYTE* anchor = istart;
378
697
  const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
379
698
  const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
@@ -386,99 +705,256 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
386
705
  const BYTE* const iend = istart + srcSize;
387
706
  const BYTE* const ilimit = iend - 8;
388
707
  U32 offset_1=rep[0], offset_2=rep[1];
708
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
709
+
710
+ const BYTE* ip0 = istart;
711
+ const BYTE* ip1;
712
+ const BYTE* ip2;
713
+ const BYTE* ip3;
714
+ U32 current0;
715
+
716
+
717
+ size_t hash0; /* hash for ip0 */
718
+ size_t hash1; /* hash for ip1 */
719
+ U32 idx; /* match idx for ip0 */
720
+ const BYTE* idxBase; /* base pointer for idx */
721
+
722
+ U32 offcode;
723
+ const BYTE* match0;
724
+ size_t mLength;
725
+ const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
726
+
727
+ size_t step;
728
+ const BYTE* nextStep;
729
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
730
+
731
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
389
732
 
390
- DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic");
733
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
391
734
 
392
735
  /* switch to "regular" variant if extDict is invalidated due to maxDistance */
393
736
  if (prefixStartIndex == dictStartIndex)
394
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
395
-
396
- /* Search Loop */
397
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
398
- const size_t h = ZSTD_hashPtr(ip, hlog, mls);
399
- const U32 matchIndex = hashTable[h];
400
- const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
401
- const BYTE* match = matchBase + matchIndex;
402
- const U32 current = (U32)(ip-base);
403
- const U32 repIndex = current + 1 - offset_1;
404
- const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
405
- const BYTE* const repMatch = repBase + repIndex;
406
- hashTable[h] = current; /* update hash table */
407
- assert(offset_1 <= current +1); /* check repIndex */
408
-
409
- if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
410
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
411
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
412
- size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
413
- ip++;
414
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
415
- ip += rLength;
416
- anchor = ip;
417
- } else {
418
- if ( (matchIndex < dictStartIndex) ||
419
- (MEM_read32(match) != MEM_read32(ip)) ) {
420
- assert(stepSize >= 1);
421
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
422
- continue;
737
+ return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
738
+
739
+ { U32 const curr = (U32)(ip0 - base);
740
+ U32 const maxRep = curr - dictStartIndex;
741
+ if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
742
+ if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
743
+ }
744
+
745
+ /* start each op */
746
+ _start: /* Requires: ip0 */
747
+
748
+ step = stepSize;
749
+ nextStep = ip0 + kStepIncr;
750
+
751
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
752
+ ip1 = ip0 + 1;
753
+ ip2 = ip0 + step;
754
+ ip3 = ip2 + 1;
755
+
756
+ if (ip3 >= ilimit) {
757
+ goto _cleanup;
758
+ }
759
+
760
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
761
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
762
+
763
+ idx = hashTable[hash0];
764
+ idxBase = idx < prefixStartIndex ? dictBase : base;
765
+
766
+ do {
767
+ { /* load repcode match for ip[2] */
768
+ U32 const current2 = (U32)(ip2 - base);
769
+ U32 const repIndex = current2 - offset_1;
770
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
771
+ U32 rval;
772
+ if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
773
+ & (offset_1 > 0) ) {
774
+ rval = MEM_read32(repBase + repIndex);
775
+ } else {
776
+ rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
423
777
  }
424
- { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
425
- const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
426
- U32 const offset = current - matchIndex;
427
- size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
428
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
429
- offset_2 = offset_1; offset_1 = offset; /* update offset history */
430
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
431
- ip += mLength;
432
- anchor = ip;
778
+
779
+ /* write back hash table entry */
780
+ current0 = (U32)(ip0 - base);
781
+ hashTable[hash0] = current0;
782
+
783
+ /* check repcode at ip[2] */
784
+ if (MEM_read32(ip2) == rval) {
785
+ ip0 = ip2;
786
+ match0 = repBase + repIndex;
787
+ matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
788
+ assert((match0 != prefixStart) & (match0 != dictStart));
789
+ mLength = ip0[-1] == match0[-1];
790
+ ip0 -= mLength;
791
+ match0 -= mLength;
792
+ offcode = REPCODE1_TO_OFFBASE;
793
+ mLength += 4;
794
+ goto _match;
433
795
  } }
434
796
 
435
- if (ip <= ilimit) {
436
- /* Fill Table */
437
- hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
438
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
439
- /* check immediate repcode */
440
- while (ip <= ilimit) {
441
- U32 const current2 = (U32)(ip-base);
442
- U32 const repIndex2 = current2 - offset_2;
443
- const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
444
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
445
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
446
- const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
447
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
448
- { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
449
- ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
450
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
451
- ip += repLength2;
452
- anchor = ip;
453
- continue;
454
- }
455
- break;
456
- } } }
797
+ { /* load match for ip[0] */
798
+ U32 const mval = idx >= dictStartIndex ?
799
+ MEM_read32(idxBase + idx) :
800
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
801
+
802
+ /* check match at ip[0] */
803
+ if (MEM_read32(ip0) == mval) {
804
+ /* found a match! */
805
+ goto _offset;
806
+ } }
807
+
808
+ /* lookup ip[1] */
809
+ idx = hashTable[hash1];
810
+ idxBase = idx < prefixStartIndex ? dictBase : base;
811
+
812
+ /* hash ip[2] */
813
+ hash0 = hash1;
814
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
815
+
816
+ /* advance to next positions */
817
+ ip0 = ip1;
818
+ ip1 = ip2;
819
+ ip2 = ip3;
820
+
821
+ /* write back hash table entry */
822
+ current0 = (U32)(ip0 - base);
823
+ hashTable[hash0] = current0;
824
+
825
+ { /* load match for ip[0] */
826
+ U32 const mval = idx >= dictStartIndex ?
827
+ MEM_read32(idxBase + idx) :
828
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
829
+
830
+ /* check match at ip[0] */
831
+ if (MEM_read32(ip0) == mval) {
832
+ /* found a match! */
833
+ goto _offset;
834
+ } }
835
+
836
+ /* lookup ip[1] */
837
+ idx = hashTable[hash1];
838
+ idxBase = idx < prefixStartIndex ? dictBase : base;
839
+
840
+ /* hash ip[2] */
841
+ hash0 = hash1;
842
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
843
+
844
+ /* advance to next positions */
845
+ ip0 = ip1;
846
+ ip1 = ip2;
847
+ ip2 = ip0 + step;
848
+ ip3 = ip1 + step;
849
+
850
+ /* calculate step */
851
+ if (ip2 >= nextStep) {
852
+ step++;
853
+ PREFETCH_L1(ip1 + 64);
854
+ PREFETCH_L1(ip1 + 128);
855
+ nextStep += kStepIncr;
856
+ }
857
+ } while (ip3 < ilimit);
858
+
859
+ _cleanup:
860
+ /* Note that there are probably still a couple positions we could search.
861
+ * However, it seems to be a meaningful performance hit to try to search
862
+ * them. So let's not. */
863
+
864
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
865
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
866
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
457
867
 
458
868
  /* save reps for next block */
459
- rep[0] = offset_1;
460
- rep[1] = offset_2;
869
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
870
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
461
871
 
462
872
  /* Return the last literals size */
463
873
  return (size_t)(iend - anchor);
874
+
875
+ _offset: /* Requires: ip0, idx, idxBase */
876
+
877
+ /* Compute the offset code. */
878
+ { U32 const offset = current0 - idx;
879
+ const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
880
+ matchEnd = idx < prefixStartIndex ? dictEnd : iend;
881
+ match0 = idxBase + idx;
882
+ offset_2 = offset_1;
883
+ offset_1 = offset;
884
+ offcode = OFFSET_TO_OFFBASE(offset);
885
+ mLength = 4;
886
+
887
+ /* Count the backwards match length. */
888
+ while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
889
+ ip0--;
890
+ match0--;
891
+ mLength++;
892
+ } }
893
+
894
+ _match: /* Requires: ip0, match0, offcode, matchEnd */
895
+
896
+ /* Count the forward length. */
897
+ assert(matchEnd != 0);
898
+ mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
899
+
900
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
901
+
902
+ ip0 += mLength;
903
+ anchor = ip0;
904
+
905
+ /* write next hash table entry */
906
+ if (ip1 < ip0) {
907
+ hashTable[hash1] = (U32)(ip1 - base);
908
+ }
909
+
910
+ /* Fill table and check for immediate repcode. */
911
+ if (ip0 <= ilimit) {
912
+ /* Fill Table */
913
+ assert(base+current0+2 > istart); /* check base overflow */
914
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
915
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
916
+
917
+ while (ip0 <= ilimit) {
918
+ U32 const repIndex2 = (U32)(ip0-base) - offset_2;
919
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
920
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */
921
+ && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
922
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
923
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
924
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
925
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
926
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
927
+ ip0 += repLength2;
928
+ anchor = ip0;
929
+ continue;
930
+ }
931
+ break;
932
+ } }
933
+
934
+ goto _start;
464
935
  }
465
936
 
937
+ ZSTD_GEN_FAST_FN(extDict, 4, 0)
938
+ ZSTD_GEN_FAST_FN(extDict, 5, 0)
939
+ ZSTD_GEN_FAST_FN(extDict, 6, 0)
940
+ ZSTD_GEN_FAST_FN(extDict, 7, 0)
466
941
 
467
942
  size_t ZSTD_compressBlock_fast_extDict(
468
943
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
469
944
  void const* src, size_t srcSize)
470
945
  {
471
946
  U32 const mls = ms->cParams.minMatch;
947
+ assert(ms->dictMatchState == NULL);
472
948
  switch(mls)
473
949
  {
474
950
  default: /* includes case 3 */
475
951
  case 4 :
476
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
952
+ return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
477
953
  case 5 :
478
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
954
+ return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
479
955
  case 6 :
480
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
956
+ return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
481
957
  case 7 :
482
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
958
+ return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
483
959
  }
484
960
  }