@exodus/bytes 1.0.0-rc.3 → 1.0.0-rc.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -2
- package/assert.js +8 -5
- package/base32.js +33 -0
- package/base58.js +212 -0
- package/base58check.js +30 -0
- package/base64.js +90 -50
- package/fallback/_utils.js +6 -0
- package/fallback/base32.js +198 -0
- package/fallback/base64.js +86 -51
- package/fallback/hex.js +31 -17
- package/fallback/utf8.js +280 -0
- package/hex.js +13 -4
- package/package.json +37 -8
- package/utf8.js +117 -0
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
import { assertUint8 } from '../assert.js'
|
|
2
|
+
import { nativeEncoder, nativeDecoder } from './_utils.js'
|
|
3
|
+
|
|
4
|
+
// See https://datatracker.ietf.org/doc/html/rfc4648
|
|
5
|
+
|
|
6
|
+
const BASE32 = [...'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'] // RFC 4648, #6
|
|
7
|
+
const BASE32HEX = [...'0123456789ABCDEFGHIJKLMNOPQRSTUV'] // RFC 4648, #7
|
|
8
|
+
const BASE32_HELPERS = {}
|
|
9
|
+
const BASE32HEX_HELPERS = {}
|
|
10
|
+
|
|
11
|
+
export const E_CHAR = 'Invalid character in base32 input'
|
|
12
|
+
export const E_PADDING = 'Invalid base32 padding'
|
|
13
|
+
export const E_LENGTH = 'Invalid base32 length'
|
|
14
|
+
export const E_LAST = 'Invalid last chunk'
|
|
15
|
+
|
|
16
|
+
// We construct output by concatenating chars, this seems to be fine enough on modern JS engines
|
|
17
|
+
export function toBase32(arr, isBase32Hex, padding) {
|
|
18
|
+
assertUint8(arr)
|
|
19
|
+
const fullChunks = Math.floor(arr.length / 5)
|
|
20
|
+
const fullChunksBytes = fullChunks * 5
|
|
21
|
+
let o = ''
|
|
22
|
+
let i = 0
|
|
23
|
+
|
|
24
|
+
const alphabet = isBase32Hex ? BASE32HEX : BASE32
|
|
25
|
+
const helpers = isBase32Hex ? BASE32HEX_HELPERS : BASE32_HELPERS
|
|
26
|
+
if (!helpers.pairs) {
|
|
27
|
+
helpers.pairs = []
|
|
28
|
+
if (nativeDecoder) {
|
|
29
|
+
// Lazy to save memory in case if this is not needed
|
|
30
|
+
helpers.codepairs = new Uint16Array(32 * 32)
|
|
31
|
+
const u16 = helpers.codepairs
|
|
32
|
+
const u8 = new Uint8Array(u16.buffer, u16.byteOffset, u16.byteLength) // write as 1-byte to ignore BE/LE difference
|
|
33
|
+
for (let i = 0; i < 32; i++) {
|
|
34
|
+
const ic = alphabet[i].charCodeAt(0)
|
|
35
|
+
for (let j = 0; j < 32; j++) u8[(i << 6) | (j << 1)] = u8[(j << 6) | ((i << 1) + 1)] = ic
|
|
36
|
+
}
|
|
37
|
+
} else {
|
|
38
|
+
const p = helpers.pairs
|
|
39
|
+
for (let i = 0; i < 32; i++) {
|
|
40
|
+
for (let j = 0; j < 32; j++) p.push(`${alphabet[i]}${alphabet[j]}`)
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
const { pairs, codepairs } = helpers
|
|
46
|
+
|
|
47
|
+
// Fast path for complete blocks
|
|
48
|
+
// This whole loop can be commented out, the algorithm won't change, it's just an optimization of the next loop
|
|
49
|
+
if (nativeDecoder) {
|
|
50
|
+
const oa = new Uint16Array(fullChunks * 4)
|
|
51
|
+
for (let j = 0; i < fullChunksBytes; i += 5) {
|
|
52
|
+
const a = arr[i]
|
|
53
|
+
const b = arr[i + 1]
|
|
54
|
+
const c = arr[i + 2]
|
|
55
|
+
const d = arr[i + 3]
|
|
56
|
+
const e = arr[i + 4]
|
|
57
|
+
oa[j++] = codepairs[(a << 2) | (b >> 6)] // 8 + 8 - 5 - 5 = 6 left
|
|
58
|
+
oa[j++] = codepairs[((b & 0x3f) << 4) | (c >> 4)] // 6 + 8 - 5 - 5 = 4 left
|
|
59
|
+
oa[j++] = codepairs[((c & 0xf) << 6) | (d >> 2)] // 4 + 8 - 5 - 5 = 2 left
|
|
60
|
+
oa[j++] = codepairs[((d & 0x3) << 8) | e] // 2 + 8 - 5 - 5 = 0 left
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
o = nativeDecoder.decode(oa)
|
|
64
|
+
} else {
|
|
65
|
+
for (; i < fullChunksBytes; i += 5) {
|
|
66
|
+
const a = arr[i]
|
|
67
|
+
const b = arr[i + 1]
|
|
68
|
+
const c = arr[i + 2]
|
|
69
|
+
const d = arr[i + 3]
|
|
70
|
+
const e = arr[i + 4]
|
|
71
|
+
o += pairs[(a << 2) | (b >> 6)] // 8 + 8 - 5 - 5 = 6 left
|
|
72
|
+
o += pairs[((b & 0x3f) << 4) | (c >> 4)] // 6 + 8 - 5 - 5 = 4 left
|
|
73
|
+
o += pairs[((c & 0xf) << 6) | (d >> 2)] // 4 + 8 - 5 - 5 = 2 left
|
|
74
|
+
o += pairs[((d & 0x3) << 8) | e] // 2 + 8 - 5 - 5 = 0 left
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// If we have something left, process it with a full algo
|
|
79
|
+
let carry = 0
|
|
80
|
+
let shift = 3 // First byte needs to be shifted by 3 to get 5 bits
|
|
81
|
+
for (; i < arr.length; i++) {
|
|
82
|
+
const x = arr[i]
|
|
83
|
+
o += alphabet[carry | (x >> shift)] // shift >= 3, so this fits
|
|
84
|
+
if (shift >= 5) {
|
|
85
|
+
shift -= 5
|
|
86
|
+
o += alphabet[(x >> shift) & 0x1f]
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
carry = (x << (5 - shift)) & 0x1f
|
|
90
|
+
shift += 3 // Each byte prints 5 bits and leaves 3 bits
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if (shift !== 3) o += alphabet[carry] // shift 3 means we have no carry left
|
|
94
|
+
if (padding) o += ['', '======', '====', '===', '='][arr.length - fullChunksBytes]
|
|
95
|
+
|
|
96
|
+
return o
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// TODO: can this be optimized? This only affects non-Hermes barebone engines though
|
|
100
|
+
const mapSize = nativeEncoder ? 256 : 65_536 // we have to store 64 KiB map or recheck everything if we can't decode to byte array
|
|
101
|
+
|
|
102
|
+
export function fromBase32(str, isBase32Hex) {
|
|
103
|
+
let inputLength = str.length
|
|
104
|
+
while (str[inputLength - 1] === '=') inputLength--
|
|
105
|
+
const paddingLength = str.length - inputLength
|
|
106
|
+
const tailLength = inputLength % 8
|
|
107
|
+
const mainLength = inputLength - tailLength // multiples of 8
|
|
108
|
+
if (![0, 2, 4, 5, 7].includes(tailLength)) throw new SyntaxError(E_LENGTH) // fast verification
|
|
109
|
+
if (paddingLength > 7 || (paddingLength !== 0 && str.length % 8 !== 0)) {
|
|
110
|
+
throw new SyntaxError(E_PADDING)
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
const alphabet = isBase32Hex ? BASE32HEX : BASE32
|
|
114
|
+
const helpers = isBase32Hex ? BASE32HEX_HELPERS : BASE32_HELPERS
|
|
115
|
+
|
|
116
|
+
if (!helpers.fromMap) {
|
|
117
|
+
helpers.fromMap = new Int8Array(mapSize).fill(-1) // no regex input validation here, so we map all other bytes to -1 and recheck sign
|
|
118
|
+
alphabet.forEach((c, i) => {
|
|
119
|
+
helpers.fromMap[c.charCodeAt(0)] = helpers.fromMap[c.toLowerCase().charCodeAt(0)] = i
|
|
120
|
+
})
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const m = helpers.fromMap
|
|
124
|
+
|
|
125
|
+
const arr = new Uint8Array(Math.floor((inputLength * 5) / 8))
|
|
126
|
+
let at = 0
|
|
127
|
+
let i = 0
|
|
128
|
+
|
|
129
|
+
if (nativeEncoder) {
|
|
130
|
+
const codes = nativeEncoder.encode(str)
|
|
131
|
+
if (codes.length !== str.length) throw new SyntaxError(E_CHAR) // non-ascii
|
|
132
|
+
while (i < mainLength) {
|
|
133
|
+
// each 5 bits, grouped 5 * 4 = 20
|
|
134
|
+
const a = (m[codes[i++]] << 15) | (m[codes[i++]] << 10) | (m[codes[i++]] << 5) | m[codes[i++]]
|
|
135
|
+
const b = (m[codes[i++]] << 15) | (m[codes[i++]] << 10) | (m[codes[i++]] << 5) | m[codes[i++]]
|
|
136
|
+
if (a < 0 || b < 0) throw new SyntaxError(E_CHAR)
|
|
137
|
+
arr[at++] = a >> 12
|
|
138
|
+
arr[at++] = (a >> 4) & 0xff
|
|
139
|
+
arr[at++] = ((a << 4) & 0xff) | (b >> 16)
|
|
140
|
+
arr[at++] = (b >> 8) & 0xff
|
|
141
|
+
arr[at++] = b & 0xff
|
|
142
|
+
}
|
|
143
|
+
} else {
|
|
144
|
+
while (i < mainLength) {
|
|
145
|
+
// each 5 bits, grouped 5 * 4 = 20
|
|
146
|
+
const a =
|
|
147
|
+
(m[str.charCodeAt(i++)] << 15) |
|
|
148
|
+
(m[str.charCodeAt(i++)] << 10) |
|
|
149
|
+
(m[str.charCodeAt(i++)] << 5) |
|
|
150
|
+
m[str.charCodeAt(i++)]
|
|
151
|
+
const b =
|
|
152
|
+
(m[str.charCodeAt(i++)] << 15) |
|
|
153
|
+
(m[str.charCodeAt(i++)] << 10) |
|
|
154
|
+
(m[str.charCodeAt(i++)] << 5) |
|
|
155
|
+
m[str.charCodeAt(i++)]
|
|
156
|
+
if (a < 0 || b < 0) throw new SyntaxError(E_CHAR)
|
|
157
|
+
arr[at++] = a >> 12
|
|
158
|
+
arr[at++] = (a >> 4) & 0xff
|
|
159
|
+
arr[at++] = ((a << 4) & 0xff) | (b >> 16)
|
|
160
|
+
arr[at++] = (b >> 8) & 0xff
|
|
161
|
+
arr[at++] = b & 0xff
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// Last block, valid tailLength: 0 2 4 5 7, checked already
|
|
166
|
+
// We check last chunk to be strict
|
|
167
|
+
if (tailLength < 2) return arr
|
|
168
|
+
const ab = (m[str.charCodeAt(i++)] << 5) | m[str.charCodeAt(i++)]
|
|
169
|
+
if (ab < 0) throw new SyntaxError(E_CHAR)
|
|
170
|
+
arr[at++] = ab >> 2
|
|
171
|
+
if (tailLength < 4) {
|
|
172
|
+
if (ab & 0x3) throw new SyntaxError(E_LAST)
|
|
173
|
+
return arr
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
const cd = (m[str.charCodeAt(i++)] << 5) | m[str.charCodeAt(i++)]
|
|
177
|
+
if (cd < 0) throw new SyntaxError(E_CHAR)
|
|
178
|
+
arr[at++] = ((ab << 6) & 0xff) | (cd >> 4)
|
|
179
|
+
if (tailLength < 5) {
|
|
180
|
+
if (cd & 0xf) throw new SyntaxError(E_LAST)
|
|
181
|
+
return arr
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
const e = m[str.charCodeAt(i++)]
|
|
185
|
+
if (e < 0) throw new SyntaxError(E_CHAR)
|
|
186
|
+
arr[at++] = ((cd << 4) & 0xff) | (e >> 1) // 4 + 4
|
|
187
|
+
if (tailLength < 7) {
|
|
188
|
+
if (e & 0x1) throw new SyntaxError(E_LAST)
|
|
189
|
+
return arr
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
const fg = (m[str.charCodeAt(i++)] << 5) | m[str.charCodeAt(i++)]
|
|
193
|
+
if (fg < 0) throw new SyntaxError(E_CHAR)
|
|
194
|
+
arr[at++] = ((e << 7) & 0xff) | (fg >> 3) // 1 + 5 + 2
|
|
195
|
+
// Can't be 8, so no h
|
|
196
|
+
if (fg & 0x7) throw new SyntaxError(E_LAST)
|
|
197
|
+
return arr
|
|
198
|
+
}
|
package/fallback/base64.js
CHANGED
|
@@ -1,15 +1,17 @@
|
|
|
1
1
|
import { assertUint8 } from '../assert.js'
|
|
2
|
+
import { nativeEncoder, nativeDecoder } from './_utils.js'
|
|
2
3
|
|
|
3
4
|
// See https://datatracker.ietf.org/doc/html/rfc4648
|
|
4
5
|
|
|
5
|
-
const { TextDecoder } = globalThis
|
|
6
|
-
const nativeDecoder = TextDecoder?.toString().includes('[native code]') ? new TextDecoder() : null
|
|
7
6
|
const BASE64 = [...'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/']
|
|
8
7
|
const BASE64URL = [...'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_']
|
|
9
|
-
const
|
|
10
|
-
const
|
|
11
|
-
|
|
12
|
-
const
|
|
8
|
+
const BASE64_HELPERS = {}
|
|
9
|
+
const BASE64URL_HELPERS = {}
|
|
10
|
+
|
|
11
|
+
export const E_CHAR = 'Invalid character in base64 input'
|
|
12
|
+
export const E_PADDING = 'Invalid base64 padding'
|
|
13
|
+
export const E_LENGTH = 'Invalid base64 length'
|
|
14
|
+
export const E_LAST = 'Invalid last chunk'
|
|
13
15
|
|
|
14
16
|
// Alternatively, we could have mapped 0-255 bytes to charcodes and just used btoa(ascii),
|
|
15
17
|
// but that approach is _slower_ than our toBase64js function, even on Hermes
|
|
@@ -23,27 +25,38 @@ export function toBase64(arr, isURL, padding) {
|
|
|
23
25
|
let i = 0
|
|
24
26
|
|
|
25
27
|
const alphabet = isURL ? BASE64URL : BASE64
|
|
26
|
-
const
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
28
|
+
const helpers = isURL ? BASE64URL_HELPERS : BASE64_HELPERS
|
|
29
|
+
if (!helpers.pairs) {
|
|
30
|
+
helpers.pairs = []
|
|
31
|
+
if (nativeDecoder) {
|
|
32
|
+
// Lazy to save memory in case if this is not needed
|
|
33
|
+
helpers.codepairs = new Uint16Array(64 * 64)
|
|
34
|
+
const u16 = helpers.codepairs
|
|
35
|
+
const u8 = new Uint8Array(u16.buffer, u16.byteOffset, u16.byteLength) // write as 1-byte to ignore BE/LE difference
|
|
36
|
+
for (let i = 0; i < 64; i++) {
|
|
37
|
+
const ic = alphabet[i].charCodeAt(0)
|
|
38
|
+
for (let j = 0; j < 64; j++) u8[(i << 7) | (j << 1)] = u8[(j << 7) | ((i << 1) + 1)] = ic
|
|
39
|
+
}
|
|
40
|
+
} else {
|
|
41
|
+
const p = helpers.pairs
|
|
42
|
+
for (let i = 0; i < 64; i++) {
|
|
43
|
+
for (let j = 0; j < 64; j++) p.push(`${alphabet[i]}${alphabet[j]}`)
|
|
44
|
+
}
|
|
32
45
|
}
|
|
33
46
|
}
|
|
34
47
|
|
|
48
|
+
const { pairs, codepairs } = helpers
|
|
49
|
+
|
|
35
50
|
// Fast path for complete blocks
|
|
36
51
|
// This whole loop can be commented out, the algorithm won't change, it's just an optimization of the next loop
|
|
37
52
|
if (nativeDecoder) {
|
|
38
|
-
const oa = new
|
|
53
|
+
const oa = new Uint16Array(fullChunks * 2)
|
|
39
54
|
for (let j = 0; i < fullChunksBytes; i += 3) {
|
|
40
55
|
const a = arr[i]
|
|
41
56
|
const b = arr[i + 1]
|
|
42
57
|
const c = arr[i + 2]
|
|
43
|
-
oa[j++] =
|
|
44
|
-
oa[j++] =
|
|
45
|
-
oa[j++] = map[((b & 0xf) << 2) | (c >> 6)]
|
|
46
|
-
oa[j++] = map[c & 0x3f]
|
|
58
|
+
oa[j++] = codepairs[(a << 4) | (b >> 4)]
|
|
59
|
+
oa[j++] = codepairs[((b & 0x0f) << 8) | c]
|
|
47
60
|
}
|
|
48
61
|
|
|
49
62
|
o = nativeDecoder.decode(oa)
|
|
@@ -78,50 +91,72 @@ export function toBase64(arr, isURL, padding) {
|
|
|
78
91
|
return o
|
|
79
92
|
}
|
|
80
93
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
// Assumes valid input and no chars after =, checked at API
|
|
84
|
-
// Last chunk is rechecked at API too
|
|
85
|
-
export function fromBase64(str) {
|
|
86
|
-
const map = fromBase64jsMap || new Array(256)
|
|
87
|
-
if (!fromBase64jsMap) {
|
|
88
|
-
fromBase64jsMap = map
|
|
89
|
-
BASE64.forEach((c, i) => (map[c.charCodeAt(0)] = i))
|
|
90
|
-
map['-'.charCodeAt(0)] = map['+'.charCodeAt(0)] // for base64url
|
|
91
|
-
map['_'.charCodeAt(0)] = map['/'.charCodeAt(0)] // for base64url
|
|
92
|
-
}
|
|
94
|
+
// TODO: can this be optimized? This only affects non-Hermes barebone engines though
|
|
95
|
+
const mapSize = nativeEncoder ? 256 : 65_536 // we have to store 64 KiB map or recheck everything if we can't decode to byte array
|
|
93
96
|
|
|
97
|
+
// Last chunk is rechecked at API
|
|
98
|
+
export function fromBase64(str, isURL) {
|
|
94
99
|
let inputLength = str.length
|
|
95
100
|
while (str[inputLength - 1] === '=') inputLength--
|
|
96
|
-
|
|
97
|
-
const arr = new Uint8Array(Math.floor((inputLength * 3) / 4))
|
|
101
|
+
const paddingLength = str.length - inputLength
|
|
98
102
|
const tailLength = inputLength % 4
|
|
99
103
|
const mainLength = inputLength - tailLength // multiples of 4
|
|
104
|
+
if (tailLength === 1) throw new SyntaxError(E_LENGTH)
|
|
105
|
+
if (paddingLength > 3 || (paddingLength !== 0 && str.length % 4 !== 0)) {
|
|
106
|
+
throw new SyntaxError(E_PADDING)
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const alphabet = isURL ? BASE64URL : BASE64
|
|
110
|
+
const helpers = isURL ? BASE64URL_HELPERS : BASE64_HELPERS
|
|
100
111
|
|
|
112
|
+
if (!helpers.fromMap) {
|
|
113
|
+
helpers.fromMap = new Int8Array(mapSize).fill(-1) // no regex input validation here, so we map all other bytes to -1 and recheck sign
|
|
114
|
+
alphabet.forEach((c, i) => (helpers.fromMap[c.charCodeAt(0)] = i))
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
const m = helpers.fromMap
|
|
118
|
+
|
|
119
|
+
const arr = new Uint8Array(Math.floor((inputLength * 3) / 4))
|
|
101
120
|
let at = 0
|
|
102
121
|
let i = 0
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
122
|
+
|
|
123
|
+
if (nativeEncoder) {
|
|
124
|
+
const codes = nativeEncoder.encode(str)
|
|
125
|
+
if (codes.length !== str.length) throw new SyntaxError(E_CHAR) // non-ascii
|
|
126
|
+
while (i < mainLength) {
|
|
127
|
+
const a = (m[codes[i++]] << 18) | (m[codes[i++]] << 12) | (m[codes[i++]] << 6) | m[codes[i++]]
|
|
128
|
+
if (a < 0) throw new SyntaxError(E_CHAR)
|
|
129
|
+
arr[at++] = a >> 16
|
|
130
|
+
arr[at++] = (a >> 8) & 0xff
|
|
131
|
+
arr[at++] = a & 0xff
|
|
132
|
+
}
|
|
133
|
+
} else {
|
|
134
|
+
while (i < mainLength) {
|
|
135
|
+
const a =
|
|
136
|
+
(m[str.charCodeAt(i++)] << 18) |
|
|
137
|
+
(m[str.charCodeAt(i++)] << 12) |
|
|
138
|
+
(m[str.charCodeAt(i++)] << 6) |
|
|
139
|
+
m[str.charCodeAt(i++)]
|
|
140
|
+
if (a < 0) throw new SyntaxError(E_CHAR)
|
|
141
|
+
arr[at++] = a >> 16
|
|
142
|
+
arr[at++] = (a >> 8) & 0xff
|
|
143
|
+
arr[at++] = a & 0xff
|
|
144
|
+
}
|
|
112
145
|
}
|
|
113
146
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
tmp = (map[str.charCodeAt(i)] << 2) | (map[str.charCodeAt(i + 1)] >> 4)
|
|
123
|
-
arr[at++] = tmp & 0xff
|
|
147
|
+
// Can be 0, 2 or 3, verified by padding checks already
|
|
148
|
+
if (tailLength < 2) return arr // 0
|
|
149
|
+
const ab = (m[str.charCodeAt(i++)] << 6) | m[str.charCodeAt(i++)]
|
|
150
|
+
if (ab < 0) throw new SyntaxError(E_CHAR)
|
|
151
|
+
arr[at++] = ab >> 4
|
|
152
|
+
if (tailLength < 3) {
|
|
153
|
+
if (ab & 0xf) throw new SyntaxError(E_LAST)
|
|
154
|
+
return arr // 2
|
|
124
155
|
}
|
|
125
156
|
|
|
126
|
-
|
|
157
|
+
const c = m[str.charCodeAt(i++)]
|
|
158
|
+
if (c < 0) throw new SyntaxError(E_CHAR)
|
|
159
|
+
arr[at++] = ((ab << 4) & 0xff) | (c >> 2)
|
|
160
|
+
if (c & 0x3) throw new SyntaxError(E_LAST)
|
|
161
|
+
return arr // 3
|
|
127
162
|
}
|
package/fallback/hex.js
CHANGED
|
@@ -1,12 +1,11 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
3
|
-
// We use TextEncoder here to parse strings to charcodes, this is faster than individual charCodeAt calls
|
|
4
|
-
const { TextEncoder } = globalThis // Buffer is optional, only used when native
|
|
5
|
-
const nativeEncoder = TextEncoder?.toString().includes('[native code]') ? new TextEncoder() : null
|
|
1
|
+
import { assertUint8 } from '../assert.js'
|
|
2
|
+
import { nativeEncoder } from './_utils.js'
|
|
6
3
|
|
|
7
4
|
let hexArray
|
|
8
5
|
let dehexArray
|
|
9
6
|
|
|
7
|
+
export const E_HEX = 'Input is not a hex string'
|
|
8
|
+
|
|
10
9
|
function toHexPart(arr, start, end) {
|
|
11
10
|
let o = ''
|
|
12
11
|
let i = start
|
|
@@ -52,15 +51,15 @@ export function toHex(arr) {
|
|
|
52
51
|
return toHexPart(arr, 0, length)
|
|
53
52
|
}
|
|
54
53
|
|
|
54
|
+
// TODO: can this be optimized? This only affects non-Hermes barebone engines though
|
|
55
|
+
const mapSize = nativeEncoder ? 256 : 65_536 // we have to store 64 KiB map or recheck everything if we can't decode to byte array
|
|
56
|
+
|
|
55
57
|
export function fromHex(str) {
|
|
56
58
|
if (typeof str !== 'string') throw new TypeError('Input is not a string')
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
// We don't use native Buffer impl, as rechecking input make it slower than pure js
|
|
60
|
-
// This path is used only on older engines though
|
|
59
|
+
if (str.length % 2 !== 0) throw new SyntaxError(E_HEX)
|
|
61
60
|
|
|
62
61
|
if (!dehexArray) {
|
|
63
|
-
dehexArray = new
|
|
62
|
+
dehexArray = new Int8Array(mapSize).fill(-1) // no regex input validation here, so we map all other bytes to -1 and recheck sign
|
|
64
63
|
for (let i = 0; i < 16; i++) {
|
|
65
64
|
const s = i.toString(16)
|
|
66
65
|
dehexArray[s.charCodeAt(0)] = dehexArray[s.toUpperCase().charCodeAt(0)] = i
|
|
@@ -73,16 +72,31 @@ export function fromHex(str) {
|
|
|
73
72
|
if (nativeEncoder) {
|
|
74
73
|
// Native encoder path is beneficial even for small arrays in Hermes
|
|
75
74
|
const codes = nativeEncoder.encode(str)
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
75
|
+
if (codes.length !== str.length) throw new SyntaxError(E_HEX) // non-ascii
|
|
76
|
+
const last3 = length - 3 // Unroll nativeEncoder path as this is what modern Hermes takes and a small perf improvement is nice there
|
|
77
|
+
let i = 0
|
|
78
|
+
while (i < last3) {
|
|
79
|
+
const a = (dehexArray[codes[j++]] << 4) | dehexArray[codes[j++]]
|
|
80
|
+
const b = (dehexArray[codes[j++]] << 4) | dehexArray[codes[j++]]
|
|
81
|
+
const c = (dehexArray[codes[j++]] << 4) | dehexArray[codes[j++]]
|
|
82
|
+
const d = (dehexArray[codes[j++]] << 4) | dehexArray[codes[j++]]
|
|
83
|
+
if (a < 0 || b < 0 || c < 0 || d < 0) throw new SyntaxError(E_HEX)
|
|
84
|
+
arr[i++] = a
|
|
85
|
+
arr[i++] = b
|
|
86
|
+
arr[i++] = c
|
|
87
|
+
arr[i++] = d
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
while (i < length) {
|
|
91
|
+
const res = (dehexArray[codes[j++]] << 4) | dehexArray[codes[j++]]
|
|
92
|
+
if (res < 0) throw new SyntaxError(E_HEX)
|
|
93
|
+
arr[i++] = res
|
|
80
94
|
}
|
|
81
95
|
} else {
|
|
82
96
|
for (let i = 0; i < length; i++) {
|
|
83
|
-
const
|
|
84
|
-
if (
|
|
85
|
-
arr[i] =
|
|
97
|
+
const res = (dehexArray[str.charCodeAt(j++)] << 4) | dehexArray[str.charCodeAt(j++)]
|
|
98
|
+
if (res < 0) throw new SyntaxError(E_HEX)
|
|
99
|
+
arr[i] = res
|
|
86
100
|
}
|
|
87
101
|
}
|
|
88
102
|
|