@exodus/bytes 1.0.0-rc.7 → 1.0.0-rc.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,10 +1,150 @@
1
1
  # `@exodus/bytes`
2
2
 
3
- `Uint8Array` conversion to and from `base64`, `base32`, `hex` and `utf8`
3
+ `Uint8Array` conversion to and from `base64`, `base32`, `base58`, `hex`, `utf8`, `utf16`, `bech32` and `wif`
4
4
 
5
- [Fast](./Performance.md)
5
+ ## Strict
6
6
 
7
- Performs proper input validation
7
+ Performs proper input validation, ensures no garbage-in-garbage-out
8
+
9
+ Tested on Node.js, Deno, Bun, browsers (including Servo), Hermes, QuickJS and barebone engines in CI [(how?)](https://github.com/ExodusMovement/test#exodustest)
10
+
11
+ ## Fast
12
+
13
+ * `10-20x` faster than `Buffer` polyfill
14
+ * `2-10x` faster than `iconv-lite`
15
+
16
+ The above was for the js fallback
17
+
18
+ It's up to `100x` when native impl is available \
19
+ e.g. in `utf8fromString` on Hermes / React Native or `fromHex` in Chrome
20
+
21
+ Also:
22
+ * `3-8x` faster than `bs58`
23
+ * `10-30x` faster than `@scure/base` (or `>100x` on Node.js <25)
24
+ * Faster in `utf8toString` / `utf8fromString` than `Buffer` or `TextDecoder` / `TextEncoder` on Node.js
25
+
26
+ See [Performance](./Performance.md) for more info
27
+
28
+ ## TextEncoder / TextDecoder polyfill
29
+
30
+ ```js
31
+ import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding.js'
32
+ ```
33
+
34
+ Less than half the bundle size of [text-encoding](https://npmjs.com/text-encoding), [whatwg-encoding](https://npmjs.com/whatwg-encoding) or [iconv-lite](https://npmjs.com/iconv-lite) (gzipped or not), and [is much faster](#fast).
35
+
36
+ Spec compliant, passing WPT and covered with extra tests.
37
+
38
+ Moreover, tests for this library uncovered [bugs in all major implementations](https://docs.google.com/spreadsheets/d/1pdEefRG6r9fZy61WHGz0TKSt8cO4ISWqlpBN5KntIvQ/edit).
39
+
40
+ [Faster than Node.js native implementation on Node.js](https://github.com/nodejs/node/issues/61041#issuecomment-3649242024).
41
+
42
+ ### Caveat: `TextDecoder` / `TextEncoder` APIs are lossy by default per spec
43
+
44
+ _These are only provided as a compatibility layer, prefer hardened APIs instead in new code._
45
+
46
+ * `TextDecoder` can (and should) be used with `{ fatal: true }` option for all purposes demanding correctness / lossless transforms
47
+
48
+ * `TextEncoder` does not support a fatal mode per spec, it always performs replacement.
49
+
50
+ That is not suitable for hashing, cryptography or consensus applications.\
51
+ Otherwise there would be non-equal strings with equal signatures and hashes — the collision is caused by the lossy transform of a JS string to bytes.
52
+ Those also survive e.g. `JSON.stringify`/`JSON.parse` or being sent over network.
53
+
54
+ Use strict APIs in new applications, see `utf8fromString` / `utf16fromString` below.\
55
+ Those throw on non-well-formed strings by default.
56
+
57
+ ## API
58
+
59
+ ### `@exodus/bytes/utf8.js`
60
+
61
+ ##### `utf8fromString(str, format = 'uint8')`
62
+ ##### `utf8fromStringLoose(str, format = 'uint8')`
63
+ ##### `utf8toString(arr)`
64
+ ##### `utf8toStringLoose(arr)`
65
+
66
+ ### `@exodus/bytes/utf16.js`
67
+
68
+ ##### `utf16fromString(str, format = 'uint16')`
69
+ ##### `utf16fromStringLoose(str, format = 'uint16')`
70
+ ##### `utf16toString(arr, 'uint16')`
71
+ ##### `utf16toStringLoose(arr, 'uint16')`
72
+
73
+ ### `@exodus/bytes/single-byte.js`
74
+
75
+ ##### `createSinglebyteDecoder(encoding, loose = false)`
76
+
77
+ Create a decoder for a supported one-byte `encoding`.
78
+
79
+ Returns a function `decode(arr)` that decodes bytes to a string.
80
+
81
+ ### `@exodus/bytes/multi-byte.js`
82
+
83
+ ##### `createMultibyteDecoder(encoding, loose = false)`
84
+
85
+ Create a decoder for a supported legacy multi-byte `encoding`.
86
+
87
+ Returns a function `decode(arr, stream = false)` that decodes bytes to a string.
88
+
89
+ That function will have state while `stream = true` is used.
90
+
91
+ ##### `windows1252toString(arr)`
92
+
93
+ Decode `windows-1252` bytes to a string.
94
+
95
+ Also supports `ascii` and `latin-1` as those are strict subsets of `windows-1252`.
96
+
97
+ There is no loose variant for this encoding, all bytes can be decoded.
98
+
99
+ Same as `windows1252toString = createSinglebyteDecoder('windows-1252')`.
100
+
101
+ ### `@exodus/bytes/hex.js`
102
+
103
+ ##### `toHex(arr)`
104
+ ##### `fromHex(string)`
105
+
106
+ ### `@exodus/bytes/base64.js`
107
+
108
+ ##### `toBase64(arr, { padding = true })`
109
+ ##### `toBase64url(arr, { padding = false })`
110
+ ##### `fromBase64(str, { format = 'uint8', padding = 'both' })`
111
+ ##### `fromBase64url(str, { format = 'uint8', padding = false })`
112
+ ##### `fromBase64any(str, { format = 'uint8', padding = 'both' })`
113
+
114
+ ### `@exodus/bytes/base32.js`
115
+
116
+ ##### `toBase32(arr, { padding = false })`
117
+ ##### `toBase32hex(arr, { padding = false })`
118
+ ##### `fromBase32(str, { format = 'uint8', padding = 'both' })`
119
+ ##### `fromBase32hex(str, { format = 'uint8', padding = 'both' })`
120
+
121
+ ### `@exodus/bytes/bech32.js`
122
+
123
+ ##### `getPrefix(str, limit = 90)`
124
+ ##### `toBech32(prefix, bytes, limit = 90)`
125
+ ##### `fromBech32(str, limit = 90)`
126
+ ##### `toBech32m(prefix, bytes, limit = 90)`
127
+ ##### `fromBech32m(str, limit = 90)`
128
+
129
+ ### `@exodus/bytes/base58.js`
130
+
131
+ ##### `toBase58(arr)`
132
+ ##### `fromBase58(str, format = 'uint8')`
133
+
134
+ ### `@exodus/bytes/base58check.js`
135
+
136
+ ##### `async toBase58check(arr)`
137
+ ##### `toBase58checkSync(arr)`
138
+ ##### `async fromBase58check(str, format = 'uint8')`
139
+ ##### `fromBase58checkSync(str, format = 'uint8')`
140
+ ##### `makeBase58check(hashAlgo, hashAlgoSync)`
141
+
142
+ ### `@exodus/bytes/wif.js`
143
+
144
+ ##### `async fromWifString(string, version)`
145
+ ##### `fromWifStringSync(string, version)`
146
+ ##### `async toWifString({ version, privateKey, compressed })`
147
+ ##### `toWifStringSync({ version, privateKey, compressed })`
8
148
 
9
149
  ## License
10
150
 
package/base58.js CHANGED
@@ -1,6 +1,6 @@
1
1
  import { typedView } from './array.js'
2
2
  import { assertUint8 } from './assert.js'
3
- import { nativeDecoder, nativeEncoder } from './fallback/_utils.js'
3
+ import { nativeDecoder, nativeEncoder, isHermes } from './fallback/_utils.js'
4
4
  import { encodeAscii, decodeAscii } from './fallback/latin1.js'
5
5
 
6
6
  const alphabet = [...'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz']
@@ -20,7 +20,7 @@ let fromMap
20
20
 
21
21
  const E_CHAR = 'Invalid character in base58 input'
22
22
 
23
- const shouldUseBigIntFrom = Boolean(globalThis.HermesInternal) // faster only on Hermes, numbers path beats it on normal engines
23
+ const shouldUseBigIntFrom = isHermes // faster only on Hermes, numbers path beats it on normal engines
24
24
 
25
25
  export function toBase58(arr) {
26
26
  assertUint8(arr)
@@ -134,7 +134,7 @@ export function fromBase58(str, format = 'uint8') {
134
134
  for (let i = 0; i < 58; i++) fromMap[alphabet[i].charCodeAt(0)] = i
135
135
  }
136
136
 
137
- const size = zeros + (((length - zeros) * 3 + 1) >> 2) // 3/4 rounded up, larger than ~0.73 coef to fit everything
137
+ const size = zeros + (((length - zeros + 1) * 3) >> 2) // 3/4 rounded up, larger than ~0.73 coef to fit everything
138
138
  const res = new Uint8Array(size)
139
139
  let at = size // where is the first significant byte written
140
140
 
package/base58check.js CHANGED
@@ -1,7 +1,7 @@
1
1
  import { typedView } from './array.js'
2
2
  import { assertUint8 } from './assert.js'
3
3
  import { toBase58, fromBase58 } from './base58.js'
4
- import { hashSync } from '@exodus/crypto/hash'
4
+ import { hashSync } from '@exodus/crypto/hash' // eslint-disable-line @exodus/import/no-deprecated
5
5
 
6
6
  // Note: while API is async, we use hashSync for now until we improve webcrypto perf for hash256
7
7
  // Inputs to base58 are typically very small, and that makes a difference
@@ -56,6 +56,7 @@ export const makeBase58check = (hashAlgo, hashAlgoSync) => {
56
56
  }
57
57
  }
58
58
 
59
+ // eslint-disable-next-line @exodus/import/no-deprecated
59
60
  const hash256sync = (x) => hashSync('sha256', hashSync('sha256', x, 'uint8'), 'uint8')
60
61
  const hash256 = hash256sync // See note at the top
61
62
  const {
package/base64.js CHANGED
@@ -1,5 +1,6 @@
1
1
  import { assertUint8, assertEmptyRest } from './assert.js'
2
2
  import { typedView } from './array.js'
3
+ import { isHermes, skipWeb } from './fallback/_utils.js'
3
4
  import { decodeLatin1, encodeLatin1 } from './fallback/latin1.js'
4
5
  import * as js from './fallback/base64.js'
5
6
 
@@ -15,8 +16,8 @@ const { toBase64: web64 } = Uint8Array.prototype // Modern engines have this
15
16
  const { E_CHAR, E_PADDING, E_LENGTH, E_LAST } = js
16
17
 
17
18
  // faster only on Hermes (and a little in old Chrome), js path beats it on normal engines
18
- const shouldUseBtoa = btoa && Boolean(globalThis.HermesInternal)
19
- const shouldUseAtob = atob && Boolean(globalThis.HermesInternal)
19
+ const shouldUseBtoa = btoa && isHermes
20
+ const shouldUseAtob = atob && isHermes
20
21
 
21
22
  // For native Buffer codepaths only
22
23
  const isBuffer = (x) => x.constructor === Buffer && Buffer.isBuffer(x)
@@ -33,8 +34,7 @@ function maybePad(res, padding) {
33
34
  }
34
35
 
35
36
  const toUrl = (x) => x.replaceAll('+', '-').replaceAll('/', '_')
36
- const fromUrl = (x) => x.replaceAll('-', '+').replaceAll('_', '/')
37
- const haveWeb = (x) => web64 && x.toBase64 === web64
37
+ const haveWeb = (x) => !skipWeb && web64 && x.toBase64 === web64
38
38
 
39
39
  export function toBase64(x, { padding = true } = {}) {
40
40
  assertUint8(x)
@@ -111,7 +111,7 @@ function noWhitespaceSeen(str, arr) {
111
111
  }
112
112
 
113
113
  let fromBase64impl
114
- if (Uint8Array.fromBase64) {
114
+ if (!skipWeb && Uint8Array.fromBase64) {
115
115
  // NOTICE: this is actually slower than our JS impl in older JavaScriptCore and (slightly) in SpiderMonkey, but faster on V8 and new JavaScriptCore
116
116
  fromBase64impl = (str, isBase64url, padding) => {
117
117
  const alphabet = isBase64url ? 'base64url' : 'base64'
@@ -137,40 +137,41 @@ if (Uint8Array.fromBase64) {
137
137
  if (!noWhitespaceSeen(str, arr)) throw new SyntaxError(E_CHAR)
138
138
  return arr
139
139
  }
140
- } else {
140
+ } else if (haveNativeBuffer) {
141
+ fromBase64impl = (str, isBase64url, padding) => {
142
+ const arr = Buffer.from(str, 'base64')
143
+ // Rechecking by re-encoding is cheaper than regexes on Node.js
144
+ const got = isBase64url ? maybeUnpad(str, padding === false) : maybePad(str, padding !== true)
145
+ const valid = isBase64url ? arr.base64urlSlice(0, arr.length) : arr.base64Slice(0, arr.length)
146
+ if (got !== valid) throw new SyntaxError(E_PADDING)
147
+ return arr // fully checked
148
+ }
149
+ } else if (shouldUseAtob) {
150
+ // atob is faster than manual parsing on Hermes
141
151
  fromBase64impl = (str, isBase64url, padding) => {
142
152
  let arr
143
- if (haveNativeBuffer) {
144
- arr = Buffer.from(str, 'base64')
145
- // Rechecking is cheaper than regexes on Node.js
146
- const r = isBase64url ? maybeUnpad(str, padding === false) : maybePad(str, padding !== true)
147
- if (r !== arr.toString(isBase64url ? 'base64url' : 'base64')) throw new SyntaxError(E_PADDING)
148
- } else if (shouldUseAtob) {
149
- // atob is faster than manual parsing on Hermes
150
- if (isBase64url) {
151
- if (/[\t\n\f\r +/]/.test(str)) throw new SyntaxError(E_CHAR) // atob verifies other invalid input
152
- str = fromUrl(str)
153
- }
154
-
155
- try {
156
- arr = encodeLatin1(atob(str))
157
- } catch {
158
- throw new SyntaxError(E_CHAR) // convert atob errors
159
- }
153
+ if (isBase64url) {
154
+ if (/[\t\n\f\r +/]/.test(str)) throw new SyntaxError(E_CHAR) // atob verifies other invalid input
155
+ str = str.replaceAll('-', '+').replaceAll('_', '/') // from url to normal
156
+ }
160
157
 
161
- if (!isBase64url && !noWhitespaceSeen(str, arr)) throw new SyntaxError(E_CHAR) // base64url checks input above
162
- } else {
163
- return js.fromBase64(str, isBase64url) // early return to skip last chunk verification, it's already validated in js
158
+ try {
159
+ arr = encodeLatin1(atob(str))
160
+ } catch {
161
+ throw new SyntaxError(E_CHAR) // convert atob errors
164
162
  }
165
163
 
164
+ if (!isBase64url && !noWhitespaceSeen(str, arr)) throw new SyntaxError(E_CHAR) // base64url checks input above
165
+
166
166
  if (arr.length % 3 !== 0) {
167
167
  // Check last chunk to be strict if it was incomplete
168
- const expected = toBase64(arr.subarray(-(arr.length % 3)))
168
+ const expected = toBase64(arr.subarray(-(arr.length % 3))) // str is normalized to non-url already
169
169
  const end = str.length % 4 === 0 ? str.slice(-4) : str.slice(-(str.length % 4)).padEnd(4, '=')
170
- const actual = isBase64url ? fromUrl(end) : end
171
- if (expected !== actual) throw new SyntaxError(E_LAST)
170
+ if (expected !== end) throw new SyntaxError(E_LAST)
172
171
  }
173
172
 
174
173
  return arr
175
174
  }
175
+ } else {
176
+ fromBase64impl = (str, isBase64url, padding) => js.fromBase64(str, isBase64url) // validated in js
176
177
  }
package/bech32.js ADDED
@@ -0,0 +1,254 @@
1
+ import { assertUint8 } from './assert.js'
2
+ import { nativeEncoder } from './fallback/_utils.js'
3
+ import { decodeAscii, encodeAscii, encodeLatin1 } from './fallback/latin1.js'
4
+
5
+ const alphabet = [...'qpzry9x8gf2tvdw0s3jn54khce6mua7l']
6
+ const BECH32 = 1
7
+ const BECH32M = 0x2b_c8_30_a3
8
+
9
+ const E_SIZE = 'Input length is out of range'
10
+ const E_PREFIX = 'Missing or invalid prefix'
11
+ const E_MIXED = 'Mixed-case string'
12
+ const E_PADDING = 'Padding is invalid'
13
+ const E_CHECKSUM = 'Invalid checksum'
14
+ const E_CHARACTER = 'Non-bech32 character'
15
+ const E_STRING = 'Input is not a string'
16
+
17
+ // nativeEncoder path uses encodeAscii which asserts ascii, otherwise we have 0-255 bytes from encodeLatin1
18
+ const c2x = new Int8Array(nativeEncoder ? 128 : 256).fill(-1)
19
+ const x2c = new Uint8Array(32)
20
+ for (let i = 0; i < alphabet.length; i++) {
21
+ const c = alphabet[i].charCodeAt(0)
22
+ c2x[c] = i
23
+ x2c[i] = c
24
+ }
25
+
26
+ // checksum size is 30 bits, 0x3f_ff_ff_ff
27
+ // The good thing about the checksum is that it's linear over every bit
28
+ const poly0 = new Uint32Array(32) // just precache all possible ones, it's only 1 KiB
29
+ const p = (x) => ((x & 0x1_ff_ff_ff) << 5) ^ poly0[x >> 25]
30
+ for (let i = 0; i < 32; i++) {
31
+ poly0[i] =
32
+ (i & 0b0_0001 ? 0x3b_6a_57_b2 : 0) ^
33
+ (i & 0b0_0010 ? 0x26_50_8e_6d : 0) ^
34
+ (i & 0b0_0100 ? 0x1e_a1_19_fa : 0) ^
35
+ (i & 0b0_1000 ? 0x3d_42_33_dd : 0) ^
36
+ (i & 0b1_0000 ? 0x2a_14_62_b3 : 0)
37
+ }
38
+
39
+ // 7 KiB more for faster p6/p8
40
+ const poly1 = new Uint32Array(32)
41
+ const poly2 = new Uint32Array(32)
42
+ const poly3 = new Uint32Array(32)
43
+ const poly4 = new Uint32Array(32)
44
+ const poly5 = new Uint32Array(32)
45
+ const poly6 = new Uint32Array(32)
46
+ const poly7 = new Uint32Array(32)
47
+ for (let i = 0; i < 32; i++) {
48
+ // poly0[i] === p(p(p(p(p(p(i))))))
49
+ poly1[i] = p(poly0[i]) // aka p(p(p(p(p(p(i << 5))))))
50
+ poly2[i] = p(poly1[i]) // aka p(p(p(p(p(p(i << 10))))))
51
+ poly3[i] = p(poly2[i]) // aka p(p(p(p(p(p(i << 15))))))
52
+ poly4[i] = p(poly3[i]) // aka p(p(p(p(p(p(i << 20))))))
53
+ poly5[i] = p(poly4[i]) // aka p(p(p(p(p(p(i << 25))))))
54
+ poly6[i] = p(poly5[i])
55
+ poly7[i] = p(poly6[i])
56
+ }
57
+
58
+ function p6(x) {
59
+ // Same as: return p(p(p(p(p(p(x))))))
60
+ const x0 = x & 0x1f
61
+ const x1 = (x >> 5) & 0x1f
62
+ const x2 = (x >> 10) & 0x1f
63
+ const x3 = (x >> 15) & 0x1f
64
+ const x4 = (x >> 20) & 0x1f
65
+ const x5 = (x >> 25) & 0x1f
66
+ return poly0[x0] ^ poly1[x1] ^ poly2[x2] ^ poly3[x3] ^ poly4[x4] ^ poly5[x5]
67
+ }
68
+
69
+ function p8(x) {
70
+ // Same as: return p(p(p(p(p(p(p(p(x))))))))
71
+ const x0 = x & 0x1f
72
+ const x1 = (x >> 5) & 0x1f
73
+ const x2 = (x >> 10) & 0x1f
74
+ const x3 = (x >> 15) & 0x1f
75
+ const x4 = (x >> 20) & 0x1f
76
+ const x5 = (x >> 25) & 0x1f
77
+ return poly2[x0] ^ poly3[x1] ^ poly4[x2] ^ poly5[x3] ^ poly6[x4] ^ poly7[x5]
78
+ }
79
+
80
+ // p(p(p(p(p(p(chk) ^ x0) ^ x1) ^ x2) ^ x3) ^ x4) ^ x5 === p6(chk) ^ merge(x0, x1, x2, x3, x4, x5)
81
+ const merge = (a, b, c, d, e, f) => f ^ (e << 5) ^ (d << 10) ^ (c << 15) ^ (b << 20) ^ (a << 25)
82
+
83
+ const prefixCache = new Map() // Cache 10 of them
84
+
85
+ function pPrefix(prefix) {
86
+ if (prefix === 'bc') return 0x2_31_80_43 // perf
87
+ const cached = prefixCache.get(prefix)
88
+ if (cached !== undefined) return cached
89
+
90
+ // bech32_hrp_expand(s): [ord(x) >> 5 for x in s] + [0] + [ord(x) & 31 for x in s]
91
+ // We can do this in a single scan due to linearity, but it's not very beneficial
92
+ let chk = 1 // it starts with one (see def bech32_polymod in BIP_0173)
93
+ const length = prefix.length
94
+ for (let i = 0; i < length; i++) {
95
+ const c = prefix.charCodeAt(i)
96
+ if (c < 33 || c > 126) throw new Error(E_PREFIX) // each character having a value in the range [33-126]
97
+ chk = p(chk) ^ (c >> 5)
98
+ }
99
+
100
+ chk = p(chk) // <= for + [0]
101
+ for (let i = 0; i < length; i++) {
102
+ const c = prefix.charCodeAt(i)
103
+ chk = p(chk) ^ (c & 0x1f)
104
+ }
105
+
106
+ if (prefixCache.size < 10) prefixCache.set(prefix, chk)
107
+ return chk
108
+ }
109
+
110
+ function toBech32enc(prefix, bytes, limit, encoding) {
111
+ if (typeof prefix !== 'string' || !prefix) throw new TypeError(E_PREFIX)
112
+ if (typeof limit !== 'number') throw new TypeError(E_SIZE)
113
+ assertUint8(bytes)
114
+ const bytesLength = bytes.length
115
+ const wordsLength = Math.ceil((bytesLength * 8) / 5)
116
+ if (!(prefix.length + 7 + wordsLength <= limit)) throw new TypeError(E_SIZE)
117
+ prefix = prefix.toLowerCase()
118
+ const out = new Uint8Array(wordsLength + 6)
119
+
120
+ let chk = pPrefix(prefix)
121
+ let i = 0, j = 0 // prettier-ignore
122
+
123
+ // This loop is just an optimization of the next one
124
+ for (const length4 = bytesLength - 4; i < length4; i += 5, j += 8) {
125
+ const b0 = bytes[i], b1 = bytes[i + 1], b2 = bytes[i + 2], b3 = bytes[i + 3], b4 = bytes[i + 4] // prettier-ignore
126
+ const x0 = b0 >> 3
127
+ const x1 = ((b0 << 2) & 0x1f) | (b1 >> 6)
128
+ const x2 = (b1 >> 1) & 0x1f
129
+ const x3 = ((b1 << 4) & 0x1f) | (b2 >> 4)
130
+ const x4 = ((b2 << 1) & 0x1f) | (b3 >> 7)
131
+ const x5 = (b3 >> 2) & 0x1f
132
+ const x6 = ((b3 << 3) & 0x1f) | (b4 >> 5)
133
+ const x7 = b4 & 0x1f
134
+ chk = merge(x2, x3, x4, x5, x6, x7) ^ poly0[x1] ^ poly1[x0] ^ p8(chk)
135
+ out[j] = x2c[x0]
136
+ out[j + 1] = x2c[x1]
137
+ out[j + 2] = x2c[x2]
138
+ out[j + 3] = x2c[x3]
139
+ out[j + 4] = x2c[x4]
140
+ out[j + 5] = x2c[x5]
141
+ out[j + 6] = x2c[x6]
142
+ out[j + 7] = x2c[x7]
143
+ }
144
+
145
+ let value = 0, bits = 0 // prettier-ignore
146
+ for (; i < bytesLength; i++) {
147
+ value = ((value & 0xf) << 8) | bytes[i]
148
+ bits += 3
149
+ const x = (value >> bits) & 0x1f
150
+ chk = p(chk) ^ x
151
+ out[j++] = x2c[x]
152
+ if (bits >= 5) {
153
+ bits -= 5
154
+ const x = (value >> bits) & 0x1f
155
+ chk = p(chk) ^ x
156
+ out[j++] = x2c[x]
157
+ }
158
+ }
159
+
160
+ if (bits > 0) {
161
+ const x = (value << (5 - bits)) & 0x1f
162
+ chk = p(chk) ^ x
163
+ out[j++] = x2c[x]
164
+ }
165
+
166
+ chk = encoding ^ p6(chk)
167
+ out[j++] = x2c[(chk >> 25) & 0x1f]
168
+ out[j++] = x2c[(chk >> 20) & 0x1f]
169
+ out[j++] = x2c[(chk >> 15) & 0x1f]
170
+ out[j++] = x2c[(chk >> 10) & 0x1f]
171
+ out[j++] = x2c[(chk >> 5) & 0x1f]
172
+ out[j++] = x2c[(chk >> 0) & 0x1f]
173
+
174
+ return prefix + '1' + decodeAscii(out) // suboptimal in barebones, but actually ok in Hermes for not to care atm
175
+ }
176
+
177
+ function assertDecodeArgs(str, limit) {
178
+ if (typeof str !== 'string') throw new TypeError(E_STRING)
179
+ if (typeof limit !== 'number' || str.length < 8 || !(str.length <= limit)) throw new Error(E_SIZE)
180
+ }
181
+
182
+ function fromBech32enc(str, limit, encoding) {
183
+ assertDecodeArgs(str, limit)
184
+ const lower = str.toLowerCase()
185
+ if (str !== lower) {
186
+ if (str !== str.toUpperCase()) throw new Error(E_MIXED)
187
+ str = lower
188
+ }
189
+
190
+ const split = str.lastIndexOf('1')
191
+ if (split <= 0) throw new Error(E_PREFIX)
192
+ const prefix = str.slice(0, split)
193
+ const charsLength = str.length - split - 1
194
+ const wordsLength = charsLength - 6
195
+ if (wordsLength < 0) throw new Error(E_SIZE)
196
+ const bytesLength = (wordsLength * 5) >> 3
197
+ const slice = str.slice(split + 1)
198
+ const c = nativeEncoder ? encodeAscii(slice, E_CHARACTER) : encodeLatin1(slice) // suboptimal, but only affects non-Hermes barebones
199
+ const bytes = new Uint8Array(bytesLength)
200
+
201
+ let chk = pPrefix(prefix)
202
+ let i = 0, j = 0 // prettier-ignore
203
+
204
+ // This loop is just an optimization of the next one
205
+ for (const length7 = wordsLength - 7; i < length7; i += 8, j += 5) {
206
+ const c0 = c[i], c1 = c[i + 1], c2 = c[i + 2], c3 = c[i + 3], c4 = c[i + 4], c5 = c[i + 5], c6 = c[i + 6], c7 = c[i + 7] // prettier-ignore
207
+ const x0 = c2x[c0], x1 = c2x[c1], x2 = c2x[c2], x3 = c2x[c3], x4 = c2x[c4], x5 = c2x[c5], x6 = c2x[c6], x7 = c2x[c7] // prettier-ignore
208
+ if (x0 < 0 || x1 < 0 || x2 < 0 || x3 < 0 || x4 < 0 || x5 < 0 || x6 < 0 || x7 < 0) throw new SyntaxError(E_CHARACTER) // prettier-ignore
209
+ chk = merge(x2, x3, x4, x5, x6, x7) ^ poly0[x1] ^ poly1[x0] ^ p8(chk)
210
+ bytes[j] = (x0 << 3) | (x1 >> 2)
211
+ bytes[j + 1] = (((x1 << 6) | (x2 << 1)) & 0xff) | (x3 >> 4)
212
+ bytes[j + 2] = ((x3 << 4) & 0xff) | (x4 >> 1)
213
+ bytes[j + 3] = ((((x4 << 5) | x5) << 2) & 0xff) | (x6 >> 3)
214
+ bytes[j + 4] = ((x6 << 5) & 0xff) | x7
215
+ }
216
+
217
+ let value = 0, bits = 0 // prettier-ignore
218
+ for (; i < wordsLength; i++) {
219
+ const x = c2x[c[i]]
220
+ if (x < 0) throw new SyntaxError(E_CHARACTER)
221
+ chk = p(chk) ^ x
222
+ value = (value << 5) | x
223
+ bits += 5
224
+ if (bits >= 8) {
225
+ bits -= 8
226
+ bytes[j++] = (value >> bits) & 0xff
227
+ }
228
+ }
229
+
230
+ if (bits >= 5 || (value << (8 - bits)) & 0xff) throw new Error(E_PADDING)
231
+
232
+ // Checksum
233
+ {
234
+ const c0 = c[i], c1 = c[i + 1], c2 = c[i + 2], c3 = c[i + 3], c4 = c[i + 4], c5 = c[i + 5] // prettier-ignore
235
+ const x0 = c2x[c0], x1 = c2x[c1], x2 = c2x[c2], x3 = c2x[c3], x4 = c2x[c4], x5 = c2x[c5] // prettier-ignore
236
+ if (x0 < 0 || x1 < 0 || x2 < 0 || x3 < 0 || x4 < 0 || x5 < 0) throw new SyntaxError(E_CHARACTER)
237
+ if ((merge(x0, x1, x2, x3, x4, x5) ^ p6(chk)) !== encoding) throw new Error(E_CHECKSUM)
238
+ }
239
+
240
+ return { prefix, bytes }
241
+ }
242
+
243
+ // This is designed to be a very quick check, skipping all other validation
244
+ export function getPrefix(str, limit = 90) {
245
+ assertDecodeArgs(str, limit)
246
+ const split = str.lastIndexOf('1')
247
+ if (split <= 0) throw new Error(E_PREFIX)
248
+ return str.slice(0, split).toLowerCase()
249
+ }
250
+
251
+ export const toBech32 = (prefix, bytes, limit = 90) => toBech32enc(prefix, bytes, limit, BECH32)
252
+ export const fromBech32 = (str, limit = 90) => fromBech32enc(str, limit, BECH32)
253
+ export const toBech32m = (prefix, bytes, limit = 90) => toBech32enc(prefix, bytes, limit, BECH32M)
254
+ export const fromBech32m = (str, limit = 90) => fromBech32enc(str, limit, BECH32M)