@exodus/bytes 1.0.0-rc.8 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +286 -4
- package/array.d.ts +24 -0
- package/base58.js +16 -8
- package/base64.d.ts +76 -0
- package/bigint.js +14 -0
- package/encoding-lite.js +7 -0
- package/encoding.js +12 -0
- package/fallback/_utils.js +100 -10
- package/fallback/encoding.js +290 -0
- package/fallback/encoding.labels.js +46 -0
- package/fallback/encoding.util.js +34 -0
- package/fallback/hex.js +2 -70
- package/fallback/latin1.js +2 -1
- package/fallback/multi-byte.encodings.cjs +1 -0
- package/fallback/multi-byte.encodings.json +545 -0
- package/fallback/multi-byte.js +448 -0
- package/fallback/multi-byte.table.js +114 -0
- package/fallback/single-byte.encodings.js +61 -0
- package/fallback/single-byte.js +86 -0
- package/fallback/utf16.js +180 -0
- package/hex.d.ts +22 -0
- package/hex.node.js +2 -0
- package/multi-byte.js +13 -0
- package/multi-byte.node.js +25 -0
- package/package.json +62 -13
- package/single-byte.js +54 -0
- package/single-byte.node.js +62 -0
- package/utf16.js +73 -0
- package/utf16.node.js +79 -0
- package/utf8.d.ts +42 -0
- package/utf8.js +7 -9
- package/utf8.node.js +8 -5
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
// We can't return native TextDecoder if it's present, as Node.js one is broken on windows-1252 and we fix that
|
|
2
|
+
// We are also faster than Node.js built-in on both TextEncoder and TextDecoder
|
|
3
|
+
|
|
4
|
+
import { utf16toString, utf16toStringLoose } from '@exodus/bytes/utf16.js'
|
|
5
|
+
import { utf8fromStringLoose, utf8toString, utf8toStringLoose } from '@exodus/bytes/utf8.js'
|
|
6
|
+
import { createSinglebyteDecoder } from '@exodus/bytes/single-byte.js'
|
|
7
|
+
import labels from './encoding.labels.js'
|
|
8
|
+
import { unfinishedBytes } from './encoding.util.js'
|
|
9
|
+
|
|
10
|
+
const E_OPTIONS = 'The "options" argument must be of type object'
|
|
11
|
+
const E_ENCODING = 'Unknown encoding'
|
|
12
|
+
const replacementChar = '\uFFFD'
|
|
13
|
+
|
|
14
|
+
const E_MULTI =
|
|
15
|
+
'Legacy multi-byte encodings are disabled in /encoding-lite.js, use /encoding.js for full encodings range support'
|
|
16
|
+
const multibyteSet = new Set(['big5', 'euc-kr', 'euc-jp', 'iso-2022-jp', 'shift_jis', 'gbk', 'gb18030']) // prettier-ignore
|
|
17
|
+
let createMultibyteDecoder
|
|
18
|
+
|
|
19
|
+
export function setMultibyteDecoder(createDecoder) {
|
|
20
|
+
createMultibyteDecoder = createDecoder
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
let labelsMap
|
|
24
|
+
|
|
25
|
+
// Warning: unlike whatwg-encoding, returns lowercased labels
|
|
26
|
+
// Those are case-insensitive and that's how TextDecoder encoding getter normalizes them
|
|
27
|
+
// https://encoding.spec.whatwg.org/#names-and-labels
|
|
28
|
+
export function normalizeEncoding(label) {
|
|
29
|
+
// fast path
|
|
30
|
+
if (label === 'utf-8' || label === 'utf8' || label === 'UTF-8' || label === 'UTF8') return 'utf-8'
|
|
31
|
+
if (label === 'windows-1252' || label === 'ascii' || label === 'latin1') return 'windows-1252'
|
|
32
|
+
// full map
|
|
33
|
+
if (/[^\w\t\n\f\r .:-]/i.test(label)) return null // must be ASCII (with ASCII whitespace)
|
|
34
|
+
const low = `${label}`.trim().toLowerCase()
|
|
35
|
+
if (Object.hasOwn(labels, low)) return low
|
|
36
|
+
if (!labelsMap) {
|
|
37
|
+
labelsMap = new Map()
|
|
38
|
+
for (const [label, aliases] of Object.entries(labels)) {
|
|
39
|
+
for (const alias of aliases) labelsMap.set(alias, label)
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
const mapped = labelsMap.get(low)
|
|
44
|
+
if (mapped) return mapped
|
|
45
|
+
return null
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
const define = (obj, key, value) => Object.defineProperty(obj, key, { value, writable: false })
|
|
49
|
+
|
|
50
|
+
const fromSource = (x) => {
|
|
51
|
+
if (x instanceof Uint8Array) return x
|
|
52
|
+
if (x instanceof ArrayBuffer) return new Uint8Array(x)
|
|
53
|
+
if (ArrayBuffer.isView(x)) return new Uint8Array(x.buffer, x.byteOffset, x.byteLength)
|
|
54
|
+
if (globalThis.SharedArrayBuffer && x instanceof SharedArrayBuffer) return new Uint8Array(x)
|
|
55
|
+
throw new TypeError('Argument must be a SharedArrayBuffer, ArrayBuffer or ArrayBufferView')
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
function unicodeDecoder(encoding, loose) {
|
|
59
|
+
if (encoding === 'utf-8') return loose ? utf8toStringLoose : utf8toString // likely
|
|
60
|
+
const form = encoding === 'utf-16le' ? 'uint8-le' : 'uint8-be'
|
|
61
|
+
return loose ? (u) => utf16toStringLoose(u, form) : (u) => utf16toString(u, form)
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
export class TextDecoder {
|
|
65
|
+
#decode
|
|
66
|
+
#unicode
|
|
67
|
+
#multibyte
|
|
68
|
+
#chunk
|
|
69
|
+
#canBOM
|
|
70
|
+
|
|
71
|
+
constructor(encoding = 'utf-8', options = {}) {
|
|
72
|
+
if (typeof options !== 'object') throw new TypeError(E_OPTIONS)
|
|
73
|
+
const enc = normalizeEncoding(encoding)
|
|
74
|
+
if (!enc || enc === 'replacement') throw new RangeError(E_ENCODING)
|
|
75
|
+
define(this, 'encoding', enc)
|
|
76
|
+
define(this, 'fatal', Boolean(options.fatal))
|
|
77
|
+
define(this, 'ignoreBOM', Boolean(options.ignoreBOM))
|
|
78
|
+
this.#unicode = enc === 'utf-8' || enc === 'utf-16le' || enc === 'utf-16be'
|
|
79
|
+
this.#multibyte = !this.#unicode && multibyteSet.has(enc)
|
|
80
|
+
this.#canBOM = this.#unicode && !this.ignoreBOM
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
get [Symbol.toStringTag]() {
|
|
84
|
+
return 'TextDecoder'
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
decode(input, options = {}) {
|
|
88
|
+
if (typeof options !== 'object') throw new TypeError(E_OPTIONS)
|
|
89
|
+
const stream = Boolean(options.stream)
|
|
90
|
+
let u = input === undefined ? new Uint8Array() : fromSource(input)
|
|
91
|
+
|
|
92
|
+
if (this.#unicode) {
|
|
93
|
+
let prefix
|
|
94
|
+
if (this.#chunk) {
|
|
95
|
+
if (u.length === 0) {
|
|
96
|
+
if (stream) return '' // no change
|
|
97
|
+
u = this.#chunk // process as final chunk to handle errors and state changes
|
|
98
|
+
} else if (u.length < 3) {
|
|
99
|
+
// No reason to bruteforce offsets, also it's possible this doesn't yet end the sequence
|
|
100
|
+
const a = new Uint8Array(u.length + this.#chunk.length)
|
|
101
|
+
a.set(this.#chunk)
|
|
102
|
+
a.set(u, this.#chunk.length)
|
|
103
|
+
u = a
|
|
104
|
+
} else {
|
|
105
|
+
// Slice off a small portion of u into prefix chunk so we can decode them separately without extending array size
|
|
106
|
+
const t = new Uint8Array(this.#chunk.length + 3) // We have 1-3 bytes and need 1-3 more bytes
|
|
107
|
+
t.set(this.#chunk)
|
|
108
|
+
t.set(u.subarray(0, 3), this.#chunk.length)
|
|
109
|
+
|
|
110
|
+
// Stop at the first offset where unfinished bytes reaches 0 or fits into u
|
|
111
|
+
// If that doesn't happen (u too short), just concat chunk and u completely
|
|
112
|
+
for (let i = 1; i <= 3; i++) {
|
|
113
|
+
const unfinished = unfinishedBytes(t, this.#chunk.length + i, this.encoding) // 0-3
|
|
114
|
+
if (unfinished <= i) {
|
|
115
|
+
// Always reachable at 3, but we still need 'unfinished' value for it
|
|
116
|
+
const add = i - unfinished // 0-3
|
|
117
|
+
prefix = add > 0 ? t.subarray(0, this.#chunk.length + add) : this.#chunk
|
|
118
|
+
if (add > 0) u = u.subarray(add)
|
|
119
|
+
break
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
this.#chunk = null
|
|
125
|
+
} else if (u.byteLength === 0) {
|
|
126
|
+
if (!stream) this.#canBOM = !this.ignoreBOM
|
|
127
|
+
return ''
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// For non-stream utf-8 we don't have to do this as it matches utf8toStringLoose already
|
|
131
|
+
// For non-stream loose utf-16 we still have to do this as this API supports uneven byteLength unlike utf16toStringLoose
|
|
132
|
+
let suffix = ''
|
|
133
|
+
if (stream || (!this.fatal && this.encoding !== 'utf-8')) {
|
|
134
|
+
const trail = unfinishedBytes(u, u.byteLength, this.encoding)
|
|
135
|
+
if (trail > 0) {
|
|
136
|
+
if (stream) {
|
|
137
|
+
this.#chunk = Uint8Array.from(u.subarray(-trail)) // copy
|
|
138
|
+
} else {
|
|
139
|
+
// non-fatal mode as already checked
|
|
140
|
+
suffix = replacementChar
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
u = u.subarray(0, -trail)
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
if (this.#canBOM) {
|
|
148
|
+
const bom = this.#findBom(prefix ?? u)
|
|
149
|
+
if (bom) {
|
|
150
|
+
if (stream) this.#canBOM = false
|
|
151
|
+
if (prefix) {
|
|
152
|
+
prefix = prefix.subarray(bom)
|
|
153
|
+
} else {
|
|
154
|
+
u = u.subarray(bom)
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
if (!this.#decode) this.#decode = unicodeDecoder(this.encoding, !this.fatal)
|
|
160
|
+
try {
|
|
161
|
+
const res = (prefix ? this.#decode(prefix) : '') + this.#decode(u) + suffix
|
|
162
|
+
if (res.length > 0 && stream) this.#canBOM = false
|
|
163
|
+
|
|
164
|
+
if (!stream) this.#canBOM = !this.ignoreBOM
|
|
165
|
+
return res
|
|
166
|
+
} catch (err) {
|
|
167
|
+
this.#chunk = null // reset unfinished chunk on errors
|
|
168
|
+
throw err
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// eslint-disable-next-line no-else-return
|
|
172
|
+
} else if (this.#multibyte) {
|
|
173
|
+
if (!createMultibyteDecoder) throw new Error(E_MULTI)
|
|
174
|
+
if (!this.#decode) this.#decode = createMultibyteDecoder(this.encoding, !this.fatal) // can contain state!
|
|
175
|
+
return this.#decode(u, stream)
|
|
176
|
+
} else {
|
|
177
|
+
if (!this.#decode) this.#decode = createSinglebyteDecoder(this.encoding, !this.fatal)
|
|
178
|
+
return this.#decode(u)
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
#findBom(u) {
|
|
183
|
+
switch (this.encoding) {
|
|
184
|
+
case 'utf-8':
|
|
185
|
+
return u.byteLength >= 3 && u[0] === 0xef && u[1] === 0xbb && u[2] === 0xbf ? 3 : 0
|
|
186
|
+
case 'utf-16le':
|
|
187
|
+
return u.byteLength >= 2 && u[0] === 0xff && u[1] === 0xfe ? 2 : 0
|
|
188
|
+
case 'utf-16be':
|
|
189
|
+
return u.byteLength >= 2 && u[0] === 0xfe && u[1] === 0xff ? 2 : 0
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
throw new Error('Unreachable')
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
export class TextEncoder {
|
|
197
|
+
constructor() {
|
|
198
|
+
define(this, 'encoding', 'utf-8')
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
get [Symbol.toStringTag]() {
|
|
202
|
+
return 'TextEncoder'
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
encode(str = '') {
|
|
206
|
+
if (typeof str !== 'string') str = `${str}`
|
|
207
|
+
const res = utf8fromStringLoose(str)
|
|
208
|
+
return res.byteOffset === 0 ? res : res.slice(0) // Ensure 0-offset, to match new Uint8Array (per spec), which is non-pooled
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
encodeInto(str, target) {
|
|
212
|
+
if (typeof str !== 'string') str = `${str}`
|
|
213
|
+
if (!(target instanceof Uint8Array)) throw new TypeError('Target must be an Uint8Array')
|
|
214
|
+
if (target.buffer.detached) return { read: 0, written: 0 } // Until https://github.com/whatwg/encoding/issues/324 is resolved
|
|
215
|
+
|
|
216
|
+
const tlen = target.length
|
|
217
|
+
if (tlen < str.length) str = str.slice(0, tlen)
|
|
218
|
+
let u8 = utf8fromStringLoose(str)
|
|
219
|
+
let read
|
|
220
|
+
if (tlen >= u8.length) {
|
|
221
|
+
read = str.length
|
|
222
|
+
} else if (u8.length === str.length) {
|
|
223
|
+
if (u8.length > tlen) u8 = u8.subarray(0, tlen) // ascii can be truncated
|
|
224
|
+
read = u8.length
|
|
225
|
+
} else {
|
|
226
|
+
u8 = u8.subarray(0, tlen)
|
|
227
|
+
const unfinished = unfinishedBytes(u8, u8.length, 'utf-8')
|
|
228
|
+
if (unfinished > 0) u8 = u8.subarray(0, u8.length - unfinished)
|
|
229
|
+
|
|
230
|
+
// We can do this because loose str -> u8 -> str preserves length, unlike loose u8 -> str -> u8
|
|
231
|
+
// Each unpaired surrogate (1 charcode) is replaced with a single charcode
|
|
232
|
+
read = utf8toStringLoose(u8).length // FIXME: Converting back is very inefficient
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
try {
|
|
236
|
+
target.set(u8)
|
|
237
|
+
} catch {
|
|
238
|
+
return { read: 0, written: 0 } // see above, likely detached but no .detached property support
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
return { read, written: u8.length }
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
// Warning: unlike whatwg-encoding, returns lowercased labels
|
|
246
|
+
// Those are case-insensitive and that's how TextDecoder encoding getter normalizes them
|
|
247
|
+
export function getBOMEncoding(input) {
|
|
248
|
+
const u8 = fromSource(input) // asserts
|
|
249
|
+
if (u8.length >= 3 && u8[0] === 0xef && u8[1] === 0xbb && u8[2] === 0xbf) return 'utf-8'
|
|
250
|
+
if (u8.length < 2) return null
|
|
251
|
+
if (u8[0] === 0xff && u8[1] === 0xfe) return 'utf-16le'
|
|
252
|
+
if (u8[0] === 0xfe && u8[1] === 0xff) return 'utf-16be'
|
|
253
|
+
return null
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
// https://encoding.spec.whatwg.org/#decode
|
|
257
|
+
// Warning: encoding sniffed from BOM takes preference over the supplied one
|
|
258
|
+
// Warning: lossy, performs replacement, no option of throwing
|
|
259
|
+
// Expects normalized (lower-case) encoding as input. Completely ignores it and even skips validation when BOM is found
|
|
260
|
+
// Unlike TextDecoder public API, additionally supports 'replacement' encoding
|
|
261
|
+
export function legacyHookDecode(input, fallbackEncoding) {
|
|
262
|
+
let u8 = fromSource(input)
|
|
263
|
+
const bomEncoding = getBOMEncoding(u8)
|
|
264
|
+
if (bomEncoding) u8 = u8.subarray(bomEncoding === 'utf-8' ? 3 : 2)
|
|
265
|
+
const enc = bomEncoding ?? fallbackEncoding ?? 'utf-8' // "the byte order mark is more authoritative than anything else"
|
|
266
|
+
|
|
267
|
+
if (enc === 'utf-8') return utf8toStringLoose(u8)
|
|
268
|
+
if (enc === 'utf-16le' || enc === 'utf-16be') {
|
|
269
|
+
let suffix = ''
|
|
270
|
+
if (u8.byteLength % 2 !== 0) {
|
|
271
|
+
suffix = replacementChar
|
|
272
|
+
u8 = u8.subarray(0, -1)
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
return utf16toStringLoose(u8, enc === 'utf-16le' ? 'uint8-le' : 'uint8-be') + suffix
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
if (!Object.hasOwn(labels, enc)) throw new RangeError(E_ENCODING)
|
|
279
|
+
|
|
280
|
+
if (multibyteSet.has(enc)) {
|
|
281
|
+
if (!createMultibyteDecoder) throw new Error(E_MULTI)
|
|
282
|
+
return createMultibyteDecoder(enc, true)(u8)
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
// https://encoding.spec.whatwg.org/#replacement-decoder
|
|
286
|
+
// On non-streaming non-fatal case, it just replaces any non-empty input with a single replacement char
|
|
287
|
+
if (enc === 'replacement') return input.byteLength > 0 ? replacementChar : ''
|
|
288
|
+
|
|
289
|
+
return createSinglebyteDecoder(enc, true)(u8)
|
|
290
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
// See https://encoding.spec.whatwg.org/#names-and-labels
|
|
2
|
+
|
|
3
|
+
/* eslint-disable @exodus/export-default/named */
|
|
4
|
+
// prettier-ignore
|
|
5
|
+
export default {
|
|
6
|
+
'utf-8': ['unicode-1-1-utf-8', 'unicode11utf8', 'unicode20utf8', 'utf8', 'x-unicode20utf8'],
|
|
7
|
+
ibm866: ['866', 'cp866', 'csibm866'],
|
|
8
|
+
'iso-8859-2': ['csisolatin2', 'iso-ir-101', 'iso8859-2', 'iso88592', 'iso_8859-2', 'iso_8859-2:1987', 'l2', 'latin2'],
|
|
9
|
+
'iso-8859-3': ['csisolatin3', 'iso-ir-109', 'iso8859-3', 'iso88593', 'iso_8859-3', 'iso_8859-3:1988', 'l3', 'latin3'],
|
|
10
|
+
'iso-8859-4': ['csisolatin4', 'iso-ir-110', 'iso8859-4', 'iso88594', 'iso_8859-4', 'iso_8859-4:1988', 'l4', 'latin4'],
|
|
11
|
+
'iso-8859-5': ['csisolatincyrillic', 'cyrillic', 'iso-ir-144', 'iso8859-5', 'iso88595', 'iso_8859-5', 'iso_8859-5:1988'],
|
|
12
|
+
'iso-8859-6': ['arabic', 'asmo-708', 'csiso88596e', 'csiso88596i', 'csisolatinarabic', 'ecma-114', 'iso-8859-6-e', 'iso-8859-6-i', 'iso-ir-127', 'iso8859-6', 'iso88596', 'iso_8859-6', 'iso_8859-6:1987'],
|
|
13
|
+
'iso-8859-7': ['csisolatingreek', 'ecma-118', 'elot_928', 'greek', 'greek8', 'iso-ir-126', 'iso8859-7', 'iso88597', 'iso_8859-7', 'iso_8859-7:1987', 'sun_eu_greek'],
|
|
14
|
+
'iso-8859-8': ['csiso88598e', 'csisolatinhebrew', 'hebrew', 'iso-8859-8-e', 'iso-ir-138', 'iso8859-8', 'iso88598', 'iso_8859-8', 'iso_8859-8:1988', 'visual'],
|
|
15
|
+
'iso-8859-8-i': ['csiso88598i', 'logical'],
|
|
16
|
+
'iso-8859-10': ['csisolatin6', 'iso-ir-157', 'iso8859-10', 'iso885910', 'l6', 'latin6'],
|
|
17
|
+
'iso-8859-13': ['iso8859-13', 'iso885913'],
|
|
18
|
+
'iso-8859-14': ['iso8859-14', 'iso885914'],
|
|
19
|
+
'iso-8859-15': ['csisolatin9', 'iso8859-15', 'iso885915', 'iso_8859-15', 'l9'],
|
|
20
|
+
'iso-8859-16': [],
|
|
21
|
+
'koi8-r': ['cskoi8r', 'koi', 'koi8', 'koi8_r'],
|
|
22
|
+
'koi8-u': ['koi8-ru'],
|
|
23
|
+
macintosh: ['csmacintosh', 'mac', 'x-mac-roman'],
|
|
24
|
+
'windows-874': ['dos-874', 'iso-8859-11', 'iso8859-11', 'iso885911', 'tis-620'],
|
|
25
|
+
'windows-1250': ['cp1250', 'x-cp1250'],
|
|
26
|
+
'windows-1251': ['cp1251', 'x-cp1251'],
|
|
27
|
+
'windows-1252': ['ansi_x3.4-1968', 'ascii', 'cp1252', 'cp819', 'csisolatin1', 'ibm819', 'iso-8859-1', 'iso-ir-100', 'iso8859-1', 'iso88591', 'iso_8859-1', 'iso_8859-1:1987', 'l1', 'latin1', 'us-ascii', 'x-cp1252'],
|
|
28
|
+
'windows-1253': ['cp1253', 'x-cp1253'],
|
|
29
|
+
'windows-1254': ['cp1254', 'csisolatin5', 'iso-8859-9', 'iso-ir-148', 'iso8859-9', 'iso88599', 'iso_8859-9', 'iso_8859-9:1989', 'l5', 'latin5', 'x-cp1254'],
|
|
30
|
+
'windows-1255': ['cp1255', 'x-cp1255'],
|
|
31
|
+
'windows-1256': ['cp1256', 'x-cp1256'],
|
|
32
|
+
'windows-1257': ['cp1257', 'x-cp1257'],
|
|
33
|
+
'windows-1258': ['cp1258', 'x-cp1258'],
|
|
34
|
+
'x-mac-cyrillic': ['x-mac-ukrainian'],
|
|
35
|
+
gbk: ['chinese', 'csgb2312', 'csiso58gb231280', 'gb2312', 'gb_2312', 'gb_2312-80', 'iso-ir-58', 'x-gbk'],
|
|
36
|
+
gb18030: [],
|
|
37
|
+
big5: ['big5-hkscs', 'cn-big5', 'csbig5', 'x-x-big5'],
|
|
38
|
+
'euc-jp': ['cseucpkdfmtjapanese', 'x-euc-jp'],
|
|
39
|
+
'iso-2022-jp': ['csiso2022jp'],
|
|
40
|
+
shift_jis: ['csshiftjis', 'ms932', 'ms_kanji', 'shift-jis', 'sjis', 'windows-31j', 'x-sjis'],
|
|
41
|
+
'euc-kr': ['cseuckr', 'csksc56011987', 'iso-ir-149', 'korean', 'ks_c_5601-1987', 'ks_c_5601-1989', 'ksc5601', 'ksc_5601', 'windows-949'],
|
|
42
|
+
replacement: ['csiso2022kr', 'hz-gb-2312', 'iso-2022-cn', 'iso-2022-cn-ext', 'iso-2022-kr'],
|
|
43
|
+
'utf-16be': ['unicodefffe'],
|
|
44
|
+
'utf-16le': ['csunicode', 'iso-10646-ucs-2', 'ucs-2', 'unicode', 'unicodefeff', 'utf-16'],
|
|
45
|
+
'x-user-defined': [],
|
|
46
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
export function unfinishedBytes(u, len, enc) {
|
|
2
|
+
switch (enc) {
|
|
3
|
+
case 'utf-8': {
|
|
4
|
+
// 0-3
|
|
5
|
+
let p = 0
|
|
6
|
+
while (p < 2 && p < len && (u[len - p - 1] & 0xc0) === 0x80) p++ // go back 0-2 trailing bytes
|
|
7
|
+
if (p === len) return 0 // no space for lead
|
|
8
|
+
const l = u[len - p - 1]
|
|
9
|
+
if (l < 0xc2 || l > 0xf4) return 0 // not a lead
|
|
10
|
+
if (p === 0) return 1 // nothing to recheck, we have only lead, return it. 2-byte must return here
|
|
11
|
+
if (l < 0xe0 || (l < 0xf0 && p >= 2)) return 0 // 2-byte, or 3-byte or less and we already have 2 trailing
|
|
12
|
+
const lower = l === 0xf0 ? 0x90 : l === 0xe0 ? 0xa0 : 0x80
|
|
13
|
+
const upper = l === 0xf4 ? 0x8f : l === 0xed ? 0x9f : 0xbf
|
|
14
|
+
const n = u[len - p]
|
|
15
|
+
return n >= lower && n <= upper ? p + 1 : 0
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
case 'utf-16le':
|
|
19
|
+
case 'utf-16be': {
|
|
20
|
+
// 0-3
|
|
21
|
+
let p = 0
|
|
22
|
+
if (len % 2 !== 0) p++ // uneven bytes
|
|
23
|
+
const l = len - p - 1
|
|
24
|
+
if (len - p >= 2) {
|
|
25
|
+
const last = enc === 'utf-16le' ? (u[l] << 8) ^ u[l - 1] : (u[l - 1] << 8) ^ u[l]
|
|
26
|
+
if (last >= 0xd8_00 && last < 0xdc_00) p += 2 // lone lead
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
return p
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
throw new Error('Unsupported encoding')
|
|
34
|
+
}
|
package/fallback/hex.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { assertUint8 } from '../assert.js'
|
|
2
|
-
import { nativeDecoder, nativeEncoder,
|
|
2
|
+
import { nativeDecoder, nativeEncoder, decode2string } from './_utils.js'
|
|
3
3
|
import { encodeAscii, decodeAscii } from './latin1.js'
|
|
4
4
|
|
|
5
5
|
let hexArray // array of 256 bytes converted to two-char hex strings
|
|
@@ -11,58 +11,6 @@ const allowed = '0123456789ABCDEFabcdef'
|
|
|
11
11
|
|
|
12
12
|
export const E_HEX = 'Input is not a hex string'
|
|
13
13
|
|
|
14
|
-
function toHexPartAddition(a, start, end) {
|
|
15
|
-
let o = ''
|
|
16
|
-
let i = start
|
|
17
|
-
const h = hexArray
|
|
18
|
-
for (const last3 = end - 3; i < last3; i += 4) {
|
|
19
|
-
const x0 = a[i]
|
|
20
|
-
const x1 = a[i + 1]
|
|
21
|
-
const x2 = a[i + 2]
|
|
22
|
-
const x3 = a[i + 3]
|
|
23
|
-
o += h[x0]
|
|
24
|
-
o += h[x1]
|
|
25
|
-
o += h[x2]
|
|
26
|
-
o += h[x3]
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
while (i < end) o += h[a[i++]]
|
|
30
|
-
return o
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
// Optimiziation for Hermes which is the main user of fallback
|
|
34
|
-
function toHexPartTemplates(a, start, end) {
|
|
35
|
-
let o = ''
|
|
36
|
-
let i = start
|
|
37
|
-
const h = hexArray
|
|
38
|
-
for (const last15 = end - 15; i < last15; i += 16) {
|
|
39
|
-
const x0 = a[i]
|
|
40
|
-
const x1 = a[i + 1]
|
|
41
|
-
const x2 = a[i + 2]
|
|
42
|
-
const x3 = a[i + 3]
|
|
43
|
-
const x4 = a[i + 4]
|
|
44
|
-
const x5 = a[i + 5]
|
|
45
|
-
const x6 = a[i + 6]
|
|
46
|
-
const x7 = a[i + 7]
|
|
47
|
-
const x8 = a[i + 8]
|
|
48
|
-
const x9 = a[i + 9]
|
|
49
|
-
const x10 = a[i + 10]
|
|
50
|
-
const x11 = a[i + 11]
|
|
51
|
-
const x12 = a[i + 12]
|
|
52
|
-
const x13 = a[i + 13]
|
|
53
|
-
const x14 = a[i + 14]
|
|
54
|
-
const x15 = a[i + 15]
|
|
55
|
-
o += `${h[x0]}${h[x1]}${h[x2]}${h[x3]}${h[x4]}${h[x5]}${h[x6]}${h[x7]}${h[x8]}${h[x9]}${h[x10]}${h[x11]}${h[x12]}${h[x13]}${h[x14]}${h[x15]}`
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
while (i < end) o += h[a[i++]]
|
|
59
|
-
return o
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
// Using templates is significantly faster in Hermes and JSC
|
|
63
|
-
// It's harder to detect JSC and not important anyway as it has native impl, so we detect only Hermes
|
|
64
|
-
const toHexPart = isHermes ? toHexPartTemplates : toHexPartAddition
|
|
65
|
-
|
|
66
14
|
export function toHex(arr) {
|
|
67
15
|
assertUint8(arr)
|
|
68
16
|
|
|
@@ -100,23 +48,7 @@ export function toHex(arr) {
|
|
|
100
48
|
return decodeAscii(oa)
|
|
101
49
|
}
|
|
102
50
|
|
|
103
|
-
|
|
104
|
-
// Limit concatenation to avoid excessive GC
|
|
105
|
-
// Thresholds checked on Hermes
|
|
106
|
-
const concat = []
|
|
107
|
-
for (let i = 0; i < length; ) {
|
|
108
|
-
const step = i + 500
|
|
109
|
-
const end = step > length ? length : step
|
|
110
|
-
concat.push(toHexPart(arr, i, end))
|
|
111
|
-
i = end
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
const res = concat.join('')
|
|
115
|
-
concat.length = 0
|
|
116
|
-
return res
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
return toHexPart(arr, 0, length)
|
|
51
|
+
return decode2string(arr, 0, length, hexArray)
|
|
120
52
|
}
|
|
121
53
|
|
|
122
54
|
export function fromHex(str) {
|
package/fallback/latin1.js
CHANGED
|
@@ -4,6 +4,7 @@ import {
|
|
|
4
4
|
nativeDecoderLatin1,
|
|
5
5
|
nativeBuffer,
|
|
6
6
|
isHermes,
|
|
7
|
+
isDeno,
|
|
7
8
|
} from './_utils.js'
|
|
8
9
|
|
|
9
10
|
// See http://stackoverflow.com/a/22747272/680742, which says that lowest limit is in Chrome, with 0xffff args
|
|
@@ -64,7 +65,7 @@ export function decodeLatin1(arr, start = 0, stop = arr.length) {
|
|
|
64
65
|
export const decodeAscii = nativeBuffer
|
|
65
66
|
? (a) =>
|
|
66
67
|
// Buffer is faster on Node.js (but only for long enough data), if we know that output is ascii
|
|
67
|
-
a.byteLength >= 0x3_00
|
|
68
|
+
a.byteLength >= 0x3_00 && !isDeno
|
|
68
69
|
? nativeBuffer.from(a.buffer, a.byteOffset, a.byteLength).latin1Slice(0, a.byteLength) // .latin1Slice is faster than .asciiSlice
|
|
69
70
|
: nativeDecoder.decode(a) // On Node.js, utf8 decoder is faster than latin1
|
|
70
71
|
: nativeDecoderLatin1
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = () => require('./multi-byte.encodings.json') // lazy-load
|