yencode 1.0.8 → 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +339 -231
- package/binding.gyp +292 -39
- package/crcutil-1.0/code/multiword_64_64_gcc_amd64_asm.cc +7 -7
- package/crcutil-1.0/code/multiword_64_64_gcc_i386_mmx.cc +14 -14
- package/crcutil-1.0/code/multiword_64_64_intrinsic_i386_mmx.cc +1 -1
- package/crcutil-1.0/code/uint128_sse2.h +2 -0
- package/index.js +329 -22
- package/package.json +2 -2
- package/src/common.h +299 -0
- package/src/crc.cc +95 -0
- package/src/crc.h +23 -0
- package/src/crc_arm.cc +175 -0
- package/src/crc_common.h +4 -0
- package/{crc_folding.c → src/crc_folding.cc} +175 -185
- package/src/decoder.cc +61 -0
- package/src/decoder.h +53 -0
- package/src/decoder_avx.cc +18 -0
- package/src/decoder_avx2.cc +18 -0
- package/src/decoder_avx2_base.h +615 -0
- package/src/decoder_common.h +512 -0
- package/src/decoder_neon.cc +474 -0
- package/src/decoder_neon64.cc +451 -0
- package/src/decoder_sse2.cc +16 -0
- package/src/decoder_sse_base.h +711 -0
- package/src/decoder_ssse3.cc +18 -0
- package/src/encoder.cc +170 -0
- package/src/encoder.h +21 -0
- package/src/encoder_avx.cc +16 -0
- package/src/encoder_avx2.cc +16 -0
- package/src/encoder_avx_base.h +564 -0
- package/src/encoder_common.h +109 -0
- package/src/encoder_neon.cc +547 -0
- package/src/encoder_sse2.cc +13 -0
- package/src/encoder_sse_base.h +724 -0
- package/src/encoder_ssse3.cc +18 -0
- package/src/hedley.h +1899 -0
- package/src/platform.cc +147 -0
- package/src/yencode.cc +449 -0
- package/test/_maxsize.js +9 -0
- package/test/_speedbase.js +147 -0
- package/test/speedcrc.js +20 -0
- package/test/speeddec.js +92 -0
- package/test/speedenc.js +44 -0
- package/{testcrc.js → test/testcrc.js} +53 -39
- package/test/testdec.js +183 -0
- package/test/testenc.js +163 -0
- package/test/testpostdec.js +126 -0
- package/test.js +0 -91
- package/yencode.cc +0 -1622
|
@@ -0,0 +1,564 @@
|
|
|
1
|
+
// can't seem to make this worth it
|
|
2
|
+
#include "common.h"
|
|
3
|
+
#ifdef __AVX2__
|
|
4
|
+
|
|
5
|
+
#include "encoder.h"
|
|
6
|
+
#include "encoder_common.h"
|
|
7
|
+
#define YMM_SIZE 32
|
|
8
|
+
|
|
9
|
+
#if defined(__GNUC__) && __GNUC__ >= 7
|
|
10
|
+
# define KLOAD32(a, offs) _load_mask32((__mmask32*)(a) + (offs))
|
|
11
|
+
#else
|
|
12
|
+
# define KLOAD32(a, offs) (((uint32_t*)(a))[(offs)])
|
|
13
|
+
#endif
|
|
14
|
+
|
|
15
|
+
#pragma pack(16)
|
|
16
|
+
static struct {
|
|
17
|
+
uint32_t eolLastChar[256];
|
|
18
|
+
/*align32*/ __m256i shufExpand[65536]; // huge 2MB table
|
|
19
|
+
/*align32*/ int8_t expandMergemix[33*2*32]; // not used in AVX3
|
|
20
|
+
} * HEDLEY_RESTRICT lookupsAVX2;
|
|
21
|
+
static struct {
|
|
22
|
+
uint32_t eolLastChar[256];
|
|
23
|
+
uint32_t expand[65536]; // biggish 256KB table (but still smaller than the 2MB table)
|
|
24
|
+
} * HEDLEY_RESTRICT lookupsVBMI2;
|
|
25
|
+
#pragma pack()
|
|
26
|
+
|
|
27
|
+
static inline void fill_eolLastChar(uint32_t* table) {
|
|
28
|
+
for(int n=0; n<256; n++) {
|
|
29
|
+
table[n] = ((n == 214+'\t' || n == 214+' ' || n == 214+'\0' || n == 214+'\n' || n == 214+'\r' || n == '='-42) ? (((n+42+64)&0xff)<<8)+0x0a0d003d : ((n+42)&0xff)+0x0a0d00);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
template<enum YEncDecIsaLevel use_isa>
|
|
34
|
+
static void encoder_avx2_lut() {
|
|
35
|
+
if(use_isa >= ISA_LEVEL_VBMI2) {
|
|
36
|
+
ALIGN_ALLOC(lookupsVBMI2, sizeof(*lookupsVBMI2), 32);
|
|
37
|
+
fill_eolLastChar(lookupsVBMI2->eolLastChar);
|
|
38
|
+
for(int i=0; i<65536; i++) {
|
|
39
|
+
int k = i;
|
|
40
|
+
uint32_t expand = 0;
|
|
41
|
+
int p = 0;
|
|
42
|
+
for(int j=0; j<16; j++) {
|
|
43
|
+
if(k & 1) {
|
|
44
|
+
p++;
|
|
45
|
+
}
|
|
46
|
+
expand |= 1<<(j+p);
|
|
47
|
+
k >>= 1;
|
|
48
|
+
}
|
|
49
|
+
lookupsVBMI2->expand[i] = expand;
|
|
50
|
+
}
|
|
51
|
+
} else {
|
|
52
|
+
ALIGN_ALLOC(lookupsAVX2, sizeof(*lookupsAVX2), 32);
|
|
53
|
+
fill_eolLastChar(lookupsAVX2->eolLastChar);
|
|
54
|
+
for(int i=0; i<65536; i++) {
|
|
55
|
+
int k = i;
|
|
56
|
+
uint8_t* res = (uint8_t*)(lookupsAVX2->shufExpand + i);
|
|
57
|
+
int p = 0;
|
|
58
|
+
for(int j=0; j<16; j++) {
|
|
59
|
+
if(k & 1) {
|
|
60
|
+
res[j+p] = 0xff;
|
|
61
|
+
p++;
|
|
62
|
+
}
|
|
63
|
+
res[j+p] = j;
|
|
64
|
+
k >>= 1;
|
|
65
|
+
}
|
|
66
|
+
for(; p<16; p++)
|
|
67
|
+
res[16+p] = 0x40; // arbitrary value (top bit cannot be set)
|
|
68
|
+
}
|
|
69
|
+
for(int i=0; i<33; i++) {
|
|
70
|
+
int n = (i == 32 ? 32 : 31-i);
|
|
71
|
+
for(int j=0; j<32; j++) {
|
|
72
|
+
lookupsAVX2->expandMergemix[i*64 + j] = (n>=j ? -1 : 0);
|
|
73
|
+
lookupsAVX2->expandMergemix[i*64 + j + 32] = ('='*(n==j) + 64*(n==j-1) + 42*(n!=j));
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
template<enum YEncDecIsaLevel use_isa>
|
|
80
|
+
HEDLEY_ALWAYS_INLINE void do_encode_avx2(int line_size, int* colOffset, const uint8_t* HEDLEY_RESTRICT srcEnd, uint8_t* HEDLEY_RESTRICT& dest, size_t& len) {
|
|
81
|
+
// offset position to enable simpler loop condition checking
|
|
82
|
+
const int INPUT_OFFSET = YMM_SIZE*4 + 1 -1; // -1 to change <= to <
|
|
83
|
+
if(len <= INPUT_OFFSET || line_size < 16) return;
|
|
84
|
+
|
|
85
|
+
uint8_t *p = dest; // destination pointer
|
|
86
|
+
intptr_t i = -(intptr_t)len; // input position
|
|
87
|
+
intptr_t lineSizeOffset = -line_size +1; // -1 because we want to stop one char before the end to handle the last char differently
|
|
88
|
+
intptr_t col = *colOffset + lineSizeOffset;
|
|
89
|
+
|
|
90
|
+
i += INPUT_OFFSET;
|
|
91
|
+
const uint8_t* es = srcEnd - INPUT_OFFSET;
|
|
92
|
+
|
|
93
|
+
#if !defined(__tune_bdver4__) && !defined(__tune_znver1__)
|
|
94
|
+
// always process at least one byte to prevent underflow when doing a read with -1 offset
|
|
95
|
+
if(col < 0 && col != -line_size+1) {
|
|
96
|
+
// not the first/last character of a line
|
|
97
|
+
uint8_t c = es[i++];
|
|
98
|
+
if(HEDLEY_UNLIKELY(c == 214 || c == '\n'+214 || c == '\r'+214 || c == '='-42)) {
|
|
99
|
+
*(uint16_t*)p = 0x6a3d + (((uint16_t)c) << 8);
|
|
100
|
+
p += 2;
|
|
101
|
+
col += 2;
|
|
102
|
+
} else {
|
|
103
|
+
*p++ = c+42;
|
|
104
|
+
col++;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
#endif
|
|
108
|
+
|
|
109
|
+
if(HEDLEY_UNLIKELY(col >= 0)) {
|
|
110
|
+
uint8_t c = es[i++];
|
|
111
|
+
if(col == 0) {
|
|
112
|
+
// last char
|
|
113
|
+
uint32_t eolChar = (use_isa >= ISA_LEVEL_VBMI2 ? lookupsVBMI2->eolLastChar[c] : lookupsAVX2->eolLastChar[c]);
|
|
114
|
+
*(uint32_t*)p = eolChar;
|
|
115
|
+
p += 3 + (uintptr_t)(eolChar>>27);
|
|
116
|
+
col = -line_size+1;
|
|
117
|
+
} else {
|
|
118
|
+
// line overflowed, insert a newline
|
|
119
|
+
if (LIKELIHOOD(0.0273, escapedLUT[c]!=0)) {
|
|
120
|
+
*(uint32_t*)p = UINT32_16_PACK(UINT16_PACK('\r', '\n'), (uint32_t)escapedLUT[c]);
|
|
121
|
+
p += 4;
|
|
122
|
+
col = 2-line_size + 1;
|
|
123
|
+
} else {
|
|
124
|
+
*(uint32_t*)p = UINT32_PACK('\r', '\n', (uint32_t)(c+42), 0);
|
|
125
|
+
p += 3;
|
|
126
|
+
col = 2-line_size;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
if (HEDLEY_LIKELY(col == -line_size+1)) {
|
|
131
|
+
// first char of the line
|
|
132
|
+
uint8_t c = es[i++];
|
|
133
|
+
if (LIKELIHOOD(0.0273, escapedLUT[c] != 0)) {
|
|
134
|
+
*(uint16_t*)p = escapedLUT[c];
|
|
135
|
+
p += 2;
|
|
136
|
+
col += 2;
|
|
137
|
+
} else {
|
|
138
|
+
*(p++) = c + 42;
|
|
139
|
+
col += 1;
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
do {
|
|
143
|
+
__m256i dataA = _mm256_loadu_si256((__m256i *)(es + i));
|
|
144
|
+
__m256i dataB = _mm256_loadu_si256((__m256i *)(es + i) + 1);
|
|
145
|
+
i += YMM_SIZE*2;
|
|
146
|
+
// search for special chars
|
|
147
|
+
__m256i cmpA = _mm256_cmpeq_epi8(
|
|
148
|
+
_mm256_shuffle_epi8(_mm256_set_epi8(
|
|
149
|
+
'\0'-42,-42,'\r'-42,'.'-42,'='-42,'\0'-42,'\t'-42,'\n'-42,-42,-42,'\r'-42,-42,'='-42,' '-42,-42,'\n'-42,
|
|
150
|
+
'\0'-42,-42,'\r'-42,'.'-42,'='-42,'\0'-42,'\t'-42,'\n'-42,-42,-42,'\r'-42,-42,'='-42,' '-42,-42,'\n'-42
|
|
151
|
+
), _mm256_abs_epi8(dataA)),
|
|
152
|
+
dataA
|
|
153
|
+
);
|
|
154
|
+
__m256i cmpB = _mm256_cmpeq_epi8(
|
|
155
|
+
_mm256_shuffle_epi8(_mm256_set_epi8(
|
|
156
|
+
'\0'-42,-42,'\r'-42,'.'-42,'='-42,'\0'-42,'\t'-42,'\n'-42,-42,-42,'\r'-42,-42,'='-42,' '-42,-42,'\n'-42,
|
|
157
|
+
'\0'-42,-42,'\r'-42,'.'-42,'='-42,'\0'-42,'\t'-42,'\n'-42,-42,-42,'\r'-42,-42,'='-42,' '-42,-42,'\n'-42
|
|
158
|
+
), _mm256_abs_epi8(dataB)),
|
|
159
|
+
dataB
|
|
160
|
+
);
|
|
161
|
+
|
|
162
|
+
#if defined(__AVX512VL__)
|
|
163
|
+
if(use_isa >= ISA_LEVEL_AVX3) {
|
|
164
|
+
dataA = _mm256_add_epi8(dataA, _mm256_set1_epi8(42));
|
|
165
|
+
dataA = _mm256_ternarylogic_epi32(dataA, cmpA, _mm256_set1_epi8(64), 0xf8); // data | (cmp & 64)
|
|
166
|
+
dataB = _mm256_add_epi8(dataB, _mm256_set1_epi8(42));
|
|
167
|
+
dataB = _mm256_ternarylogic_epi32(dataB, cmpB, _mm256_set1_epi8(64), 0xf8); // data | (cmp & 64)
|
|
168
|
+
}
|
|
169
|
+
#endif
|
|
170
|
+
|
|
171
|
+
uint32_t maskA = (uint32_t)_mm256_movemask_epi8(cmpA);
|
|
172
|
+
uint32_t maskB = (uint32_t)_mm256_movemask_epi8(cmpB);
|
|
173
|
+
unsigned int maskBitsA = popcnt32(maskA);
|
|
174
|
+
unsigned int maskBitsB = popcnt32(maskB);
|
|
175
|
+
unsigned int outputBytesA = maskBitsA + YMM_SIZE;
|
|
176
|
+
unsigned int bitIndexA, bitIndexB;
|
|
177
|
+
if (LIKELIHOOD(0.170, (maskBitsA|maskBitsB) > 1)) {
|
|
178
|
+
_encode_loop_branch_slow:
|
|
179
|
+
unsigned int m1 = maskA & 0xffff, m3 = maskB & 0xffff;
|
|
180
|
+
unsigned int m2, m4;
|
|
181
|
+
__m256i data1A, data2A;
|
|
182
|
+
__m256i data1B, data2B;
|
|
183
|
+
__m256i shuf1A, shuf1B; // not set in VBMI2 path
|
|
184
|
+
__m256i shuf2A, shuf2B; // not set in VBMI2 path
|
|
185
|
+
|
|
186
|
+
#if defined(__AVX512VBMI2__) && defined(__AVX512VL__) && defined(__AVX512BW__)
|
|
187
|
+
if(use_isa >= ISA_LEVEL_VBMI2) {
|
|
188
|
+
m2 = maskA >> 16;
|
|
189
|
+
m4 = maskB >> 16;
|
|
190
|
+
|
|
191
|
+
/* alternative no-LUT strategy
|
|
192
|
+
uint64_t expandMaskA = ~_pdep_u64(~maskA, 0x5555555555555555); // expand bits, with bits set
|
|
193
|
+
expandMaskA = _pext_u64(expandMaskA^0x5555555555555555, expandMaskA);
|
|
194
|
+
*/
|
|
195
|
+
|
|
196
|
+
data1A = _mm256_mask_expand_epi8(_mm256_set1_epi8('='), KLOAD32(lookupsVBMI2->expand, m1), dataA);
|
|
197
|
+
data2A = _mm256_mask_expand_epi8(_mm256_set1_epi8('='), KLOAD32(lookupsVBMI2->expand, m2), _mm256_castsi128_si256(
|
|
198
|
+
_mm256_extracti128_si256(dataA, 1)
|
|
199
|
+
));
|
|
200
|
+
data1B = _mm256_mask_expand_epi8(_mm256_set1_epi8('='), KLOAD32(lookupsVBMI2->expand, m3), dataB);
|
|
201
|
+
data2B = _mm256_mask_expand_epi8(_mm256_set1_epi8('='), KLOAD32(lookupsVBMI2->expand, m4), _mm256_castsi128_si256(
|
|
202
|
+
_mm256_extracti128_si256(dataB, 1)
|
|
203
|
+
));
|
|
204
|
+
} else
|
|
205
|
+
#endif
|
|
206
|
+
{
|
|
207
|
+
if(use_isa < ISA_LEVEL_AVX3) {
|
|
208
|
+
dataA = _mm256_add_epi8(dataA, _mm256_blendv_epi8(_mm256_set1_epi8(42), _mm256_set1_epi8(42+64), cmpA));
|
|
209
|
+
dataB = _mm256_add_epi8(dataB, _mm256_blendv_epi8(_mm256_set1_epi8(42), _mm256_set1_epi8(42+64), cmpB));
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
m2 = (maskA >> 11) & 0x1fffe0;
|
|
213
|
+
m4 = (maskB >> 11) & 0x1fffe0;
|
|
214
|
+
|
|
215
|
+
// duplicate halves
|
|
216
|
+
data1A = _mm256_inserti128_si256(dataA, _mm256_castsi256_si128(dataA), 1);
|
|
217
|
+
data1B = _mm256_inserti128_si256(dataB, _mm256_castsi256_si128(dataB), 1);
|
|
218
|
+
#if defined(__tune_znver2__) || defined(__tune_znver3__)
|
|
219
|
+
data2A = _mm256_permute2x128_si256(dataA, dataA, 0x11);
|
|
220
|
+
data2B = _mm256_permute2x128_si256(dataB, dataB, 0x11);
|
|
221
|
+
#else
|
|
222
|
+
data2A = _mm256_permute4x64_epi64(dataA, 0xee);
|
|
223
|
+
data2B = _mm256_permute4x64_epi64(dataB, 0xee);
|
|
224
|
+
#endif
|
|
225
|
+
|
|
226
|
+
shuf1A = _mm256_load_si256(lookupsAVX2->shufExpand + m1);
|
|
227
|
+
shuf2A = _mm256_load_si256((__m256i*)((char*)(lookupsAVX2->shufExpand) + m2));
|
|
228
|
+
shuf1B = _mm256_load_si256(lookupsAVX2->shufExpand + m3);
|
|
229
|
+
shuf2B = _mm256_load_si256((__m256i*)((char*)(lookupsAVX2->shufExpand) + m4));
|
|
230
|
+
|
|
231
|
+
// expand
|
|
232
|
+
data1A = _mm256_shuffle_epi8(data1A, shuf1A);
|
|
233
|
+
data2A = _mm256_shuffle_epi8(data2A, shuf2A);
|
|
234
|
+
data1B = _mm256_shuffle_epi8(data1B, shuf1B);
|
|
235
|
+
data2B = _mm256_shuffle_epi8(data2B, shuf2B);
|
|
236
|
+
// add in '='
|
|
237
|
+
data1A = _mm256_blendv_epi8(data1A, _mm256_set1_epi8('='), shuf1A);
|
|
238
|
+
data2A = _mm256_blendv_epi8(data2A, _mm256_set1_epi8('='), shuf2A);
|
|
239
|
+
data1B = _mm256_blendv_epi8(data1B, _mm256_set1_epi8('='), shuf1B);
|
|
240
|
+
data2B = _mm256_blendv_epi8(data2B, _mm256_set1_epi8('='), shuf2B);
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
unsigned int shuf1Len = popcnt32(m1) + 16;
|
|
244
|
+
unsigned int shuf3Len = popcnt32(m3) + 16;
|
|
245
|
+
_mm256_storeu_si256((__m256i*)p, data1A);
|
|
246
|
+
_mm256_storeu_si256((__m256i*)(p + shuf1Len), data2A);
|
|
247
|
+
_mm256_storeu_si256((__m256i*)(p + outputBytesA), data1B);
|
|
248
|
+
_mm256_storeu_si256((__m256i*)(p + outputBytesA + shuf3Len), data2B);
|
|
249
|
+
unsigned int outputBytes = YMM_SIZE + outputBytesA + maskBitsB;
|
|
250
|
+
p += outputBytes;
|
|
251
|
+
col += outputBytes;
|
|
252
|
+
|
|
253
|
+
if(col >= 0) {
|
|
254
|
+
// we overflowed - find correct position to revert back to
|
|
255
|
+
// this is perhaps sub-optimal on 32-bit, but who still uses that with AVX2?
|
|
256
|
+
uint64_t eqMask;
|
|
257
|
+
int shiftAmt = (int)(maskBitsB + YMM_SIZE -1 - col);
|
|
258
|
+
if(HEDLEY_UNLIKELY(shiftAmt < 0)) {
|
|
259
|
+
uint32_t eqMask1, eqMask2;
|
|
260
|
+
#if defined(__AVX512VBMI2__) && defined(__AVX512VL__) && defined(__AVX512BW__)
|
|
261
|
+
if(use_isa >= ISA_LEVEL_VBMI2) {
|
|
262
|
+
eqMask1 = lookupsVBMI2->expand[m1];
|
|
263
|
+
eqMask2 = lookupsVBMI2->expand[m2];
|
|
264
|
+
} else
|
|
265
|
+
#endif
|
|
266
|
+
{
|
|
267
|
+
eqMask1 = (uint32_t)_mm256_movemask_epi8(shuf1A);
|
|
268
|
+
eqMask2 = (uint32_t)_mm256_movemask_epi8(shuf2A);
|
|
269
|
+
}
|
|
270
|
+
eqMask = eqMask1 | ((uint64_t)eqMask2 << shuf1Len);
|
|
271
|
+
if(use_isa < ISA_LEVEL_VBMI2)
|
|
272
|
+
i += (uintptr_t)maskBitsB;
|
|
273
|
+
else
|
|
274
|
+
i -= YMM_SIZE;
|
|
275
|
+
shiftAmt += outputBytesA;
|
|
276
|
+
} else {
|
|
277
|
+
uint32_t eqMask3, eqMask4;
|
|
278
|
+
#if defined(__AVX512VBMI2__) && defined(__AVX512VL__) && defined(__AVX512BW__)
|
|
279
|
+
if(use_isa >= ISA_LEVEL_VBMI2) {
|
|
280
|
+
eqMask3 = lookupsVBMI2->expand[m3];
|
|
281
|
+
eqMask4 = lookupsVBMI2->expand[m4];
|
|
282
|
+
} else
|
|
283
|
+
#endif
|
|
284
|
+
{
|
|
285
|
+
eqMask3 = (uint32_t)_mm256_movemask_epi8(shuf1B);
|
|
286
|
+
eqMask4 = (uint32_t)_mm256_movemask_epi8(shuf2B);
|
|
287
|
+
}
|
|
288
|
+
eqMask = eqMask3 | ((uint64_t)eqMask4 << shuf3Len);
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
#if defined(__GNUC__) && defined(PLATFORM_AMD64)
|
|
292
|
+
if(use_isa >= ISA_LEVEL_VBMI2) {
|
|
293
|
+
asm(
|
|
294
|
+
"shrq $1, %[eqMask] \n"
|
|
295
|
+
"shrq %%cl, %[eqMask] \n"
|
|
296
|
+
"adcq %[col], %[p] \n"
|
|
297
|
+
: [eqMask]"+r"(eqMask), [p]"+r"(p)
|
|
298
|
+
: "c"(shiftAmt), [col]"r"(~col)
|
|
299
|
+
);
|
|
300
|
+
i -= _mm_popcnt_u64(eqMask);
|
|
301
|
+
} else
|
|
302
|
+
#endif
|
|
303
|
+
{
|
|
304
|
+
eqMask >>= shiftAmt;
|
|
305
|
+
unsigned int bitCount;
|
|
306
|
+
#ifdef PLATFORM_AMD64
|
|
307
|
+
bitCount = (unsigned int)_mm_popcnt_u64(eqMask);
|
|
308
|
+
#else
|
|
309
|
+
bitCount = popcnt32(eqMask & 0xffffffff) + popcnt32(eqMask >> 32);
|
|
310
|
+
#endif
|
|
311
|
+
#if defined(__AVX512VBMI2__) && defined(__AVX512VL__) && defined(__AVX512BW__)
|
|
312
|
+
if(use_isa >= ISA_LEVEL_VBMI2) {
|
|
313
|
+
i -= bitCount;
|
|
314
|
+
p -= col;
|
|
315
|
+
if(LIKELIHOOD(0.98, (eqMask & 1) != 1))
|
|
316
|
+
p--;
|
|
317
|
+
else
|
|
318
|
+
i++;
|
|
319
|
+
} else
|
|
320
|
+
#endif
|
|
321
|
+
{
|
|
322
|
+
i += bitCount;
|
|
323
|
+
unsigned int revert = (unsigned int)(col + (eqMask & 1));
|
|
324
|
+
p -= revert;
|
|
325
|
+
i -= revert;
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
goto _encode_eol_handle_pre;
|
|
329
|
+
}
|
|
330
|
+
} else {
|
|
331
|
+
//_encode_loop_branch_fast:
|
|
332
|
+
maskBitsB += YMM_SIZE;
|
|
333
|
+
#if defined(__AVX512VL__) && defined(__AVX512BW__)
|
|
334
|
+
if(use_isa >= ISA_LEVEL_AVX3) {
|
|
335
|
+
# if defined(__AVX512VBMI2__)
|
|
336
|
+
if(use_isa >= ISA_LEVEL_VBMI2) {
|
|
337
|
+
_mm256_mask_storeu_epi8(p+1, 1UL<<31, dataA);
|
|
338
|
+
dataA = _mm256_mask_expand_epi8(_mm256_set1_epi8('='), KNOT32(maskA), dataA);
|
|
339
|
+
_mm256_storeu_si256((__m256i*)p, dataA);
|
|
340
|
+
p += outputBytesA;
|
|
341
|
+
|
|
342
|
+
_mm256_mask_storeu_epi8(p+1, 1UL<<31, dataB);
|
|
343
|
+
dataB = _mm256_mask_expand_epi8(_mm256_set1_epi8('='), KNOT32(maskB), dataB);
|
|
344
|
+
_mm256_storeu_si256((__m256i*)p, dataB);
|
|
345
|
+
p += maskBitsB;
|
|
346
|
+
} else
|
|
347
|
+
# endif
|
|
348
|
+
{
|
|
349
|
+
_mm256_mask_storeu_epi8(p+1, 1UL<<31, dataA);
|
|
350
|
+
dataA = _mm256_mask_alignr_epi8(dataA, (uint32_t)(-(int32_t)maskA), dataA, _mm256_permute4x64_epi64(dataA, _MM_SHUFFLE(1,0,3,2)), 15);
|
|
351
|
+
dataA = _mm256_ternarylogic_epi32(dataA, cmpA, _mm256_set1_epi8('='), 0xb8); // (data & ~cmp) | (cmp & '=')
|
|
352
|
+
_mm256_storeu_si256((__m256i*)p, dataA);
|
|
353
|
+
p += outputBytesA;
|
|
354
|
+
|
|
355
|
+
_mm256_mask_storeu_epi8(p+1, 1UL<<31, dataB);
|
|
356
|
+
dataB = _mm256_mask_alignr_epi8(dataB, (uint32_t)(-(int32_t)maskB), dataB, _mm256_permute4x64_epi64(dataB, _MM_SHUFFLE(1,0,3,2)), 15);
|
|
357
|
+
dataB = _mm256_ternarylogic_epi32(dataB, cmpB, _mm256_set1_epi8('='), 0xb8);
|
|
358
|
+
_mm256_storeu_si256((__m256i*)p, dataB);
|
|
359
|
+
p += maskBitsB;
|
|
360
|
+
}
|
|
361
|
+
} else
|
|
362
|
+
#endif
|
|
363
|
+
{
|
|
364
|
+
bitIndexA = _lzcnt_u32(maskA);
|
|
365
|
+
bitIndexB = _lzcnt_u32(maskB);
|
|
366
|
+
__m256i mergeMaskA = _mm256_load_si256((const __m256i*)(lookupsAVX2->expandMergemix + bitIndexA*2*YMM_SIZE));
|
|
367
|
+
__m256i mergeMaskB = _mm256_load_si256((const __m256i*)(lookupsAVX2->expandMergemix + bitIndexB*2*YMM_SIZE));
|
|
368
|
+
|
|
369
|
+
#if defined(__tune_bdver4__) || defined(__tune_znver1__)
|
|
370
|
+
// avoid slower 32-byte crossing loads on Zen1
|
|
371
|
+
__m256i dataAShifted = _mm256_alignr_epi8(
|
|
372
|
+
dataA,
|
|
373
|
+
_mm256_inserti128_si256(dataA, _mm256_castsi256_si128(dataA), 1),
|
|
374
|
+
15
|
|
375
|
+
);
|
|
376
|
+
__m256i dataBShifted = _mm256_alignr_epi8(
|
|
377
|
+
dataB,
|
|
378
|
+
_mm256_inserti128_si256(dataB, _mm256_castsi256_si128(dataB), 1),
|
|
379
|
+
15
|
|
380
|
+
);
|
|
381
|
+
#else
|
|
382
|
+
__m256i dataAShifted = _mm256_loadu_si256((__m256i *)(es + i - YMM_SIZE*2 - 1));
|
|
383
|
+
__m256i dataBShifted = _mm256_loadu_si256((__m256i *)(es + i - YMM_SIZE - 1));
|
|
384
|
+
#endif
|
|
385
|
+
dataA = _mm256_andnot_si256(cmpA, dataA); // clear space for '=' char
|
|
386
|
+
dataA = _mm256_blendv_epi8(dataAShifted, dataA, mergeMaskA);
|
|
387
|
+
dataA = _mm256_add_epi8(dataA, _mm256_load_si256((const __m256i*)(lookupsAVX2->expandMergemix + bitIndexA*2*YMM_SIZE) + 1));
|
|
388
|
+
_mm256_storeu_si256((__m256i*)p, dataA);
|
|
389
|
+
p[YMM_SIZE] = es[i-1-YMM_SIZE] + 42 + (64 & (maskA>>(YMM_SIZE-1-6)));
|
|
390
|
+
p += outputBytesA;
|
|
391
|
+
|
|
392
|
+
dataB = _mm256_andnot_si256(cmpB, dataB);
|
|
393
|
+
dataB = _mm256_blendv_epi8(dataBShifted, dataB, mergeMaskB);
|
|
394
|
+
dataB = _mm256_add_epi8(dataB, _mm256_load_si256((const __m256i*)(lookupsAVX2->expandMergemix + bitIndexB*2*YMM_SIZE) + 1));
|
|
395
|
+
_mm256_storeu_si256((__m256i*)p, dataB);
|
|
396
|
+
p[YMM_SIZE] = es[i-1] + 42 + (64 & (maskB>>(YMM_SIZE-1-6)));
|
|
397
|
+
p += maskBitsB;
|
|
398
|
+
}
|
|
399
|
+
col += outputBytesA + maskBitsB;
|
|
400
|
+
|
|
401
|
+
if(col >= 0) {
|
|
402
|
+
_encode_loop_branch_fast_eol:
|
|
403
|
+
if(HEDLEY_UNLIKELY(col > (intptr_t)maskBitsB)) {
|
|
404
|
+
if(use_isa >= ISA_LEVEL_AVX3)
|
|
405
|
+
bitIndexA = _lzcnt_u32(maskA);
|
|
406
|
+
bitIndexA += 1 + maskBitsB;
|
|
407
|
+
|
|
408
|
+
i += maskBitsB - YMM_SIZE;
|
|
409
|
+
if(HEDLEY_UNLIKELY(col == (intptr_t)bitIndexA)) {
|
|
410
|
+
// this is an escape character, so line will need to overflow
|
|
411
|
+
p--;
|
|
412
|
+
} else {
|
|
413
|
+
i += (col > (intptr_t)bitIndexA);
|
|
414
|
+
}
|
|
415
|
+
} else {
|
|
416
|
+
if(use_isa >= ISA_LEVEL_AVX3)
|
|
417
|
+
bitIndexB = _lzcnt_u32(maskB);
|
|
418
|
+
bitIndexB++;
|
|
419
|
+
|
|
420
|
+
if(HEDLEY_UNLIKELY(col == (intptr_t)bitIndexB)) {
|
|
421
|
+
p--;
|
|
422
|
+
} else {
|
|
423
|
+
i += (col > (intptr_t)bitIndexB);
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
i -= col;
|
|
427
|
+
p -= col;
|
|
428
|
+
|
|
429
|
+
_encode_eol_handle_pre:
|
|
430
|
+
uint32_t eolChar = (use_isa >= ISA_LEVEL_VBMI2 ? lookupsVBMI2->eolLastChar[es[i]] : lookupsAVX2->eolLastChar[es[i]]);
|
|
431
|
+
*(uint32_t*)p = eolChar;
|
|
432
|
+
p += 3 + (uintptr_t)(eolChar>>27);
|
|
433
|
+
col = lineSizeOffset;
|
|
434
|
+
|
|
435
|
+
if(HEDLEY_UNLIKELY(i >= 0)) { // this isn't really a proper check - it's only needed to support short lines; basically, if the line is too short, `i` never gets checked, so we need one somewhere
|
|
436
|
+
i++;
|
|
437
|
+
break;
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
dataA = _mm256_loadu_si256((__m256i *)(es + i + 1));
|
|
441
|
+
dataB = _mm256_loadu_si256((__m256i *)(es + i + 1) + 1);
|
|
442
|
+
i += YMM_SIZE*2 + 1;
|
|
443
|
+
// search for special chars
|
|
444
|
+
cmpA = _mm256_cmpeq_epi8(
|
|
445
|
+
_mm256_shuffle_epi8(_mm256_set_epi8(
|
|
446
|
+
'\0'-42,-42,'\r'-42,'.'-42,'='-42,'\0'-42,'\t'-42,'\n'-42,-42,-42,'\r'-42,-42,'='-42,' '-42,-42,'\n'-42,
|
|
447
|
+
'\0'-42,-42,'\r'-42,'.'-42,'='-42,'\0'-42,'\t'-42,'\n'-42,-42,-42,'\r'-42,-42,'='-42,' '-42,-42,'\n'-42
|
|
448
|
+
), _mm256_adds_epi8(
|
|
449
|
+
_mm256_abs_epi8(dataA), _mm256_set_epi64x(0, 0, 0, 88)
|
|
450
|
+
)),
|
|
451
|
+
dataA
|
|
452
|
+
);
|
|
453
|
+
cmpB = _mm256_cmpeq_epi8(
|
|
454
|
+
_mm256_shuffle_epi8(_mm256_set_epi8(
|
|
455
|
+
'\0'-42,-42,'\r'-42,'.'-42,'='-42,'\0'-42,'\t'-42,'\n'-42,-42,-42,'\r'-42,-42,'='-42,' '-42,-42,'\n'-42,
|
|
456
|
+
'\0'-42,-42,'\r'-42,'.'-42,'='-42,'\0'-42,'\t'-42,'\n'-42,-42,-42,'\r'-42,-42,'='-42,' '-42,-42,'\n'-42
|
|
457
|
+
), _mm256_abs_epi8(dataB)),
|
|
458
|
+
dataB
|
|
459
|
+
);
|
|
460
|
+
|
|
461
|
+
// duplicate some code from above to reduce jumping a little
|
|
462
|
+
#if defined(__AVX512VL__)
|
|
463
|
+
if(use_isa >= ISA_LEVEL_AVX3) {
|
|
464
|
+
dataA = _mm256_add_epi8(dataA, _mm256_set1_epi8(42));
|
|
465
|
+
dataA = _mm256_ternarylogic_epi32(dataA, cmpA, _mm256_set1_epi8(64), 0xf8); // data | (cmp & 64)
|
|
466
|
+
dataB = _mm256_add_epi8(dataB, _mm256_set1_epi8(42));
|
|
467
|
+
dataB = _mm256_ternarylogic_epi32(dataB, cmpB, _mm256_set1_epi8(64), 0xf8); // data | (cmp & 64)
|
|
468
|
+
}
|
|
469
|
+
#endif
|
|
470
|
+
|
|
471
|
+
maskA = (uint32_t)_mm256_movemask_epi8(cmpA);
|
|
472
|
+
maskB = (uint32_t)_mm256_movemask_epi8(cmpB);
|
|
473
|
+
maskBitsA = popcnt32(maskA);
|
|
474
|
+
maskBitsB = popcnt32(maskB);
|
|
475
|
+
outputBytesA = maskBitsA + YMM_SIZE;
|
|
476
|
+
if (LIKELIHOOD(0.170, (maskBitsA|maskBitsB) > 1))
|
|
477
|
+
goto _encode_loop_branch_slow;
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
//goto _encode_loop_branch_fast;
|
|
481
|
+
// duplicating the code, instead of using the goto above, seems to fix a performance regression in GCC
|
|
482
|
+
maskBitsB += YMM_SIZE;
|
|
483
|
+
#if defined(__AVX512VL__) && defined(__AVX512BW__)
|
|
484
|
+
if(use_isa >= ISA_LEVEL_AVX3) {
|
|
485
|
+
# if defined(__AVX512VBMI2__)
|
|
486
|
+
if(use_isa >= ISA_LEVEL_VBMI2) {
|
|
487
|
+
_mm256_mask_storeu_epi8(p+1, 1UL<<31, dataA);
|
|
488
|
+
dataA = _mm256_mask_expand_epi8(_mm256_set1_epi8('='), KNOT32(maskA), dataA);
|
|
489
|
+
_mm256_storeu_si256((__m256i*)p, dataA);
|
|
490
|
+
p += outputBytesA;
|
|
491
|
+
|
|
492
|
+
_mm256_mask_storeu_epi8(p+1, 1UL<<31, dataB);
|
|
493
|
+
dataB = _mm256_mask_expand_epi8(_mm256_set1_epi8('='), KNOT32(maskB), dataB);
|
|
494
|
+
_mm256_storeu_si256((__m256i*)p, dataB);
|
|
495
|
+
p += maskBitsB;
|
|
496
|
+
} else
|
|
497
|
+
# endif
|
|
498
|
+
{
|
|
499
|
+
_mm256_mask_storeu_epi8(p+1, 1UL<<31, dataA);
|
|
500
|
+
dataA = _mm256_mask_alignr_epi8(dataA, (uint32_t)(-(int32_t)maskA), dataA, _mm256_permute4x64_epi64(dataA, _MM_SHUFFLE(1,0,3,2)), 15);
|
|
501
|
+
dataA = _mm256_ternarylogic_epi32(dataA, cmpA, _mm256_set1_epi8('='), 0xb8); // (data & ~cmp) | (cmp & '=')
|
|
502
|
+
_mm256_storeu_si256((__m256i*)p, dataA);
|
|
503
|
+
p += outputBytesA;
|
|
504
|
+
|
|
505
|
+
_mm256_mask_storeu_epi8(p+1, 1UL<<31, dataB);
|
|
506
|
+
dataB = _mm256_mask_alignr_epi8(dataB, (uint32_t)(-(int32_t)maskB), dataB, _mm256_permute4x64_epi64(dataB, _MM_SHUFFLE(1,0,3,2)), 15);
|
|
507
|
+
dataB = _mm256_ternarylogic_epi32(dataB, cmpB, _mm256_set1_epi8('='), 0xb8);
|
|
508
|
+
_mm256_storeu_si256((__m256i*)p, dataB);
|
|
509
|
+
p += maskBitsB;
|
|
510
|
+
}
|
|
511
|
+
} else
|
|
512
|
+
#endif
|
|
513
|
+
{
|
|
514
|
+
bitIndexA = _lzcnt_u32(maskA);
|
|
515
|
+
bitIndexB = _lzcnt_u32(maskB);
|
|
516
|
+
__m256i mergeMaskA = _mm256_load_si256((const __m256i*)(lookupsAVX2->expandMergemix + bitIndexA*2*YMM_SIZE));
|
|
517
|
+
__m256i mergeMaskB = _mm256_load_si256((const __m256i*)(lookupsAVX2->expandMergemix + bitIndexB*2*YMM_SIZE));
|
|
518
|
+
|
|
519
|
+
#if defined(__tune_bdver4__) || defined(__tune_znver1__)
|
|
520
|
+
// avoid slower 32-byte crossing loads on Zen1
|
|
521
|
+
__m256i dataAShifted = _mm256_alignr_epi8(
|
|
522
|
+
dataA,
|
|
523
|
+
_mm256_inserti128_si256(dataA, _mm256_castsi256_si128(dataA), 1),
|
|
524
|
+
15
|
|
525
|
+
);
|
|
526
|
+
__m256i dataBShifted = _mm256_alignr_epi8(
|
|
527
|
+
dataB,
|
|
528
|
+
_mm256_inserti128_si256(dataB, _mm256_castsi256_si128(dataB), 1),
|
|
529
|
+
15
|
|
530
|
+
);
|
|
531
|
+
#else
|
|
532
|
+
__m256i dataAShifted = _mm256_loadu_si256((__m256i *)(es + i - YMM_SIZE*2 - 1));
|
|
533
|
+
__m256i dataBShifted = _mm256_loadu_si256((__m256i *)(es + i - YMM_SIZE - 1));
|
|
534
|
+
#endif
|
|
535
|
+
dataA = _mm256_andnot_si256(cmpA, dataA); // clear space for '=' char
|
|
536
|
+
dataA = _mm256_blendv_epi8(dataAShifted, dataA, mergeMaskA);
|
|
537
|
+
dataA = _mm256_add_epi8(dataA, _mm256_load_si256((const __m256i*)(lookupsAVX2->expandMergemix + bitIndexA*2*YMM_SIZE) + 1));
|
|
538
|
+
_mm256_storeu_si256((__m256i*)p, dataA);
|
|
539
|
+
p[YMM_SIZE] = es[i-1-YMM_SIZE] + 42 + (64 & (maskA>>(YMM_SIZE-1-6)));
|
|
540
|
+
p += outputBytesA;
|
|
541
|
+
|
|
542
|
+
dataB = _mm256_andnot_si256(cmpB, dataB);
|
|
543
|
+
dataB = _mm256_blendv_epi8(dataBShifted, dataB, mergeMaskB);
|
|
544
|
+
dataB = _mm256_add_epi8(dataB, _mm256_load_si256((const __m256i*)(lookupsAVX2->expandMergemix + bitIndexB*2*YMM_SIZE) + 1));
|
|
545
|
+
_mm256_storeu_si256((__m256i*)p, dataB);
|
|
546
|
+
p[YMM_SIZE] = es[i-1] + 42 + (64 & (maskB>>(YMM_SIZE-1-6)));
|
|
547
|
+
p += maskBitsB;
|
|
548
|
+
}
|
|
549
|
+
col += outputBytesA + maskBitsB;
|
|
550
|
+
|
|
551
|
+
if(col >= 0)
|
|
552
|
+
goto _encode_loop_branch_fast_eol;
|
|
553
|
+
}
|
|
554
|
+
}
|
|
555
|
+
} while(i < 0);
|
|
556
|
+
|
|
557
|
+
_mm256_zeroupper();
|
|
558
|
+
|
|
559
|
+
*colOffset = (int)(col + line_size -1);
|
|
560
|
+
dest = p;
|
|
561
|
+
len = -(i - INPUT_OFFSET);
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
#endif
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
#ifndef __YENC_ENCODER_COMMON
|
|
2
|
+
#define __YENC_ENCODER_COMMON
|
|
3
|
+
|
|
4
|
+
// lookup tables for scalar processing
|
|
5
|
+
#define _B1(n) _B(n), _B(n+1), _B(n+2), _B(n+3)
|
|
6
|
+
#define _B2(n) _B1(n), _B1(n+4), _B1(n+8), _B1(n+12)
|
|
7
|
+
#define _B3(n) _B2(n), _B2(n+16), _B2(n+32), _B2(n+48)
|
|
8
|
+
#define _BX _B3(0), _B3(64), _B3(128), _B3(192)
|
|
9
|
+
|
|
10
|
+
static const unsigned char escapeLUT[256] = { // whether or not the character is critical
|
|
11
|
+
#define _B(n) ((n == 214 || n == '\r'+214 || n == '\n'+214 || n == '='-42) ? 0 : (n+42) & 0xff)
|
|
12
|
+
_BX
|
|
13
|
+
#undef _B
|
|
14
|
+
};
|
|
15
|
+
static const uint16_t escapedLUT[256] = { // escaped sequences for characters that need escaping
|
|
16
|
+
#define _B(n) ((n == 214 || n == 214+'\r' || n == 214+'\n' || n == '='-42 || n == 214+'\t' || n == 214+' ' || n == '.'-42) ? UINT16_PACK('=', ((n+42+64)&0xff)) : 0)
|
|
17
|
+
_BX
|
|
18
|
+
#undef _B
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
#undef _B1
|
|
22
|
+
#undef _B2
|
|
23
|
+
#undef _B3
|
|
24
|
+
#undef _BX
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
size_t do_encode_generic(int line_size, int* colOffset, const unsigned char* HEDLEY_RESTRICT src, unsigned char* HEDLEY_RESTRICT dest, size_t len, int doEnd);
|
|
28
|
+
|
|
29
|
+
template<void(&kernel)(int, int*, const uint8_t* HEDLEY_RESTRICT, uint8_t* HEDLEY_RESTRICT&, size_t&)>
|
|
30
|
+
static size_t do_encode_simd(int line_size, int* colOffset, const uint8_t* HEDLEY_RESTRICT src, uint8_t* HEDLEY_RESTRICT dest, size_t len, int doEnd) {
|
|
31
|
+
if(len < 1) return 0;
|
|
32
|
+
if(line_size < 12) { // short lines probably not worth processing in a SIMD way
|
|
33
|
+
// we assume at least the first and last char exist in the line, and since the first char could be escaped, and SIMD encoder assumes at least one non-first/last char, assumption means that line size has to be >= 4
|
|
34
|
+
return do_encode_generic(line_size, colOffset, src, dest, len, doEnd);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const uint8_t* es = src + len;
|
|
38
|
+
uint8_t* p = dest;
|
|
39
|
+
|
|
40
|
+
if(*colOffset < 0) *colOffset = 0; // sanity check
|
|
41
|
+
|
|
42
|
+
kernel(line_size, colOffset, es, p, len);
|
|
43
|
+
|
|
44
|
+
// scalar loop to process remaining
|
|
45
|
+
long i = -(long)len;
|
|
46
|
+
if(*colOffset == 0 && i < 0) {
|
|
47
|
+
uint8_t c = es[i++];
|
|
48
|
+
if (LIKELIHOOD(0.0273, escapedLUT[c] != 0)) {
|
|
49
|
+
memcpy(p, escapedLUT + c, 2);
|
|
50
|
+
p += 2;
|
|
51
|
+
*colOffset = 2;
|
|
52
|
+
} else {
|
|
53
|
+
*(p++) = c + 42;
|
|
54
|
+
*colOffset = 1;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
while(i < 0) {
|
|
58
|
+
uint8_t c = es[i++];
|
|
59
|
+
if(*colOffset < line_size-1) {
|
|
60
|
+
if(!escapeLUT[c]) {
|
|
61
|
+
p[0] = '=';
|
|
62
|
+
p[1] = c+42+64;
|
|
63
|
+
p += 2;
|
|
64
|
+
(*colOffset) += 2;
|
|
65
|
+
} else {
|
|
66
|
+
*(p++) = escapeLUT[c];
|
|
67
|
+
(*colOffset) += 1;
|
|
68
|
+
}
|
|
69
|
+
} else {
|
|
70
|
+
if(*colOffset < line_size) {
|
|
71
|
+
if (escapedLUT[c] && c != '.'-42) {
|
|
72
|
+
memcpy(p, escapedLUT + c, 2);
|
|
73
|
+
p += 2;
|
|
74
|
+
} else {
|
|
75
|
+
*(p++) = c + 42;
|
|
76
|
+
}
|
|
77
|
+
if(i == 0) break;
|
|
78
|
+
c = es[i++];
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// handle EOL
|
|
82
|
+
if (escapedLUT[c]) {
|
|
83
|
+
uint32_t w = UINT32_16_PACK(UINT16_PACK('\r', '\n'), (uint32_t)escapedLUT[c]);
|
|
84
|
+
memcpy(p, &w, sizeof(w));
|
|
85
|
+
p += 4;
|
|
86
|
+
*colOffset = 2;
|
|
87
|
+
} else {
|
|
88
|
+
uint32_t w = UINT32_PACK('\r', '\n', (uint32_t)(c+42), 0);
|
|
89
|
+
memcpy(p, &w, sizeof(w));
|
|
90
|
+
p += 3;
|
|
91
|
+
*colOffset = 1;
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if(doEnd) {
|
|
97
|
+
// special case: if the last character is a space/tab, it needs to be escaped as it's the final character on the line
|
|
98
|
+
unsigned char lc = *(p-1);
|
|
99
|
+
if(lc == '\t' || lc == ' ') {
|
|
100
|
+
p[-1] = '=';
|
|
101
|
+
*p = lc+64;
|
|
102
|
+
p++;
|
|
103
|
+
(*colOffset)++;
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
return p - dest;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
#endif /* __YENC_ENCODER_COMMON */
|