envio 2.9.1 → 2.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/evm.schema.json +103 -92
- package/package.json +5 -5
- package/src/Internal.gen.ts +47 -0
- package/src/Internal.res +124 -0
- package/src/LazyLoader.res +134 -0
- package/src/ReorgDetection.res +432 -0
- package/src/TopicFilter.res +27 -0
- package/src/Utils.res +26 -0
- package/src/bindings/BigInt.res +15 -3
- package/src/bindings/Ethers.gen.ts +14 -0
- package/src/bindings/Ethers.res +259 -0
- package/src/bindings/SDSL.res +12 -0
- package/src/sources/HyperSyncJsonApi.res +376 -0
- /package/src/{bindings → sources}/HyperSyncClient.res +0 -0
|
@@ -0,0 +1,432 @@
|
|
|
1
|
+
type blockNumberAndHash = {
|
|
2
|
+
//Block hash is used for actual comparison to test for reorg
|
|
3
|
+
blockHash: string,
|
|
4
|
+
blockNumber: int,
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
type blockData = {
|
|
8
|
+
...blockNumberAndHash,
|
|
9
|
+
//Timestamp is needed for multichain to action reorgs across chains from given blocks to
|
|
10
|
+
//ensure ordering is kept constant
|
|
11
|
+
blockTimestamp: int,
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
module LastBlockScannedHashes: {
|
|
15
|
+
type t
|
|
16
|
+
/**Instantiat t with existing data*/
|
|
17
|
+
let makeWithData: (array<blockData>, ~confirmedBlockThreshold: int) => t
|
|
18
|
+
|
|
19
|
+
/**Instantiat empty t with no block data*/
|
|
20
|
+
let empty: (~confirmedBlockThreshold: int) => t
|
|
21
|
+
|
|
22
|
+
/**Add the latest scanned block data to t*/
|
|
23
|
+
let addLatestLastBlockData: (t, ~lastBlockScannedData: blockData) => t
|
|
24
|
+
|
|
25
|
+
/**Read the latest last block scanned data at the from the front of the queue*/
|
|
26
|
+
let getLatestLastBlockData: t => option<blockData>
|
|
27
|
+
/** Given the head block number, find the earliest timestamp from the data where the data
|
|
28
|
+
is still within the given block threshold from the head
|
|
29
|
+
*/
|
|
30
|
+
let getEarlistTimestampInThreshold: (~currentHeight: int, t) => option<int>
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
Prunes the back of the unneeded data on the queue.
|
|
34
|
+
|
|
35
|
+
In the case of a multichain indexer, pass in the earliest needed timestamp that
|
|
36
|
+
occurs within the chains threshold. Ensure that we keep track of one range before that
|
|
37
|
+
as this is that could be the target range block for a reorg
|
|
38
|
+
*/
|
|
39
|
+
let pruneStaleBlockData: (
|
|
40
|
+
~currentHeight: int,
|
|
41
|
+
~earliestMultiChainTimestampInThreshold: int=?,
|
|
42
|
+
t,
|
|
43
|
+
) => t
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
Return a BlockNumbersAndHashes.t rolled back to where hashes
|
|
47
|
+
match the provided blockNumberAndHashes
|
|
48
|
+
*/
|
|
49
|
+
let rollBackToValidHash: (t, ~blockNumbersAndHashes: array<blockData>) => result<t, exn>
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
A record that holds the current height of a chain and the lastBlockScannedHashes,
|
|
53
|
+
used for passing into getEarliestMultiChainTimestampInThreshold where these values
|
|
54
|
+
need to be zipped
|
|
55
|
+
*/
|
|
56
|
+
type currentHeightAndLastBlockHashes = {
|
|
57
|
+
currentHeight: int,
|
|
58
|
+
lastBlockScannedHashes: t,
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
Finds the earliest timestamp that is withtin the confirmedBlockThreshold of
|
|
63
|
+
each chain in a multi chain indexer. Returns None if its a single chain or if
|
|
64
|
+
the list is empty
|
|
65
|
+
*/
|
|
66
|
+
let getEarliestMultiChainTimestampInThreshold: array<currentHeightAndLastBlockHashes> => option<
|
|
67
|
+
int,
|
|
68
|
+
>
|
|
69
|
+
|
|
70
|
+
let getAllBlockNumbers: t => Belt.Array.t<int>
|
|
71
|
+
|
|
72
|
+
let hasReorgOccurred: (t, ~firstBlockParentNumberAndHash: option<blockNumberAndHash>) => bool
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
Return a BlockNumbersAndHashes.t rolled back to where blockData is less
|
|
76
|
+
than the provided blockNumber
|
|
77
|
+
*/
|
|
78
|
+
let rollBackToBlockNumberLt: (~blockNumber: int, t) => t
|
|
79
|
+
} = {
|
|
80
|
+
type t = {
|
|
81
|
+
// Number of blocks behind head, we want to keep track
|
|
82
|
+
// as a threshold for reorgs. If for eg. this is 200,
|
|
83
|
+
// it means we are accounting for reorgs up to 200 blocks
|
|
84
|
+
// behind the head
|
|
85
|
+
confirmedBlockThreshold: int,
|
|
86
|
+
// A cached list of recent blockdata to make comparison checks
|
|
87
|
+
// for reorgs. Should be quite short data set
|
|
88
|
+
// so using built in array for data structure.
|
|
89
|
+
lastBlockScannedDataList: list<blockData>,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
//Instantiates LastBlockHashes.t
|
|
93
|
+
let makeWithDataInternal = (lastBlockScannedDataList, ~confirmedBlockThreshold) => {
|
|
94
|
+
confirmedBlockThreshold,
|
|
95
|
+
lastBlockScannedDataList,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
let makeWithData = (lastBlockScannedDataListArr, ~confirmedBlockThreshold) =>
|
|
99
|
+
lastBlockScannedDataListArr
|
|
100
|
+
->Belt.List.fromArray
|
|
101
|
+
->Belt.List.reverse
|
|
102
|
+
->makeWithDataInternal(~confirmedBlockThreshold)
|
|
103
|
+
//Instantiates empty LastBlockHashes
|
|
104
|
+
let empty = (~confirmedBlockThreshold) => makeWithDataInternal(list{}, ~confirmedBlockThreshold)
|
|
105
|
+
|
|
106
|
+
/** Given the head block number, find the earliest timestamp from the data where the data
|
|
107
|
+
is still within the given block threshold from the head
|
|
108
|
+
*/
|
|
109
|
+
let rec getEarlistTimestampInThresholdInternal = (
|
|
110
|
+
// The current block number at the head of the chain
|
|
111
|
+
~currentHeight,
|
|
112
|
+
~confirmedBlockThreshold,
|
|
113
|
+
//reversed so that head to tail is earlist to latest
|
|
114
|
+
reversedLastBlockDataList: list<blockData>,
|
|
115
|
+
): option<int> => {
|
|
116
|
+
switch reversedLastBlockDataList {
|
|
117
|
+
| list{lastBlockScannedData, ...tail} =>
|
|
118
|
+
// If the blocknumber is not in the threshold recurse with given blockdata's
|
|
119
|
+
// timestamp , incrementing the from index
|
|
120
|
+
if lastBlockScannedData.blockNumber >= currentHeight - confirmedBlockThreshold {
|
|
121
|
+
// If it's in the threshold return the last earliest timestamp
|
|
122
|
+
Some(lastBlockScannedData.blockTimestamp)
|
|
123
|
+
} else {
|
|
124
|
+
tail->getEarlistTimestampInThresholdInternal(~currentHeight, ~confirmedBlockThreshold)
|
|
125
|
+
}
|
|
126
|
+
| list{} => None
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
let getEarlistTimestampInThreshold = (
|
|
131
|
+
~currentHeight,
|
|
132
|
+
{lastBlockScannedDataList, confirmedBlockThreshold}: t,
|
|
133
|
+
) =>
|
|
134
|
+
lastBlockScannedDataList
|
|
135
|
+
->Belt.List.reverse
|
|
136
|
+
->getEarlistTimestampInThresholdInternal(~currentHeight, ~confirmedBlockThreshold)
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
Inserts last scanned blockData in its positional order of blockNumber. Adds would usually
|
|
140
|
+
be always appending to the head with a new last scanned blockData but could be earlier in the
|
|
141
|
+
case of a dynamic contract.
|
|
142
|
+
*/
|
|
143
|
+
let rec addLatestLastBlockDataInternal = (
|
|
144
|
+
~lastBlockScannedData,
|
|
145
|
+
//Default empty, accumRev would be each item part of lastBlockScannedDataList that has
|
|
146
|
+
//a higher blockNumber than lastBlockScannedData
|
|
147
|
+
~accumRev=list{},
|
|
148
|
+
lastBlockScannedDataList,
|
|
149
|
+
) => {
|
|
150
|
+
switch lastBlockScannedDataList {
|
|
151
|
+
| list{head, ...tail} =>
|
|
152
|
+
if head.blockNumber <= lastBlockScannedData.blockNumber {
|
|
153
|
+
Belt.List.reverseConcat(accumRev, list{lastBlockScannedData, ...lastBlockScannedDataList})
|
|
154
|
+
} else {
|
|
155
|
+
tail->addLatestLastBlockDataInternal(
|
|
156
|
+
~lastBlockScannedData,
|
|
157
|
+
~accumRev=list{head, ...accumRev},
|
|
158
|
+
)
|
|
159
|
+
}
|
|
160
|
+
| list{} => Belt.List.reverseConcat(accumRev, list{lastBlockScannedData})
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// Adds the latest blockData to the head of the list
|
|
165
|
+
let addLatestLastBlockData = (
|
|
166
|
+
{confirmedBlockThreshold, lastBlockScannedDataList}: t,
|
|
167
|
+
~lastBlockScannedData,
|
|
168
|
+
) =>
|
|
169
|
+
lastBlockScannedDataList
|
|
170
|
+
->addLatestLastBlockDataInternal(~lastBlockScannedData)
|
|
171
|
+
->makeWithDataInternal(~confirmedBlockThreshold)
|
|
172
|
+
|
|
173
|
+
let getLatestLastBlockData = (self: t) => self.lastBlockScannedDataList->Belt.List.head
|
|
174
|
+
|
|
175
|
+
let blockDataIsPastThreshold = (
|
|
176
|
+
lastBlockScannedData: blockData,
|
|
177
|
+
~currentHeight: int,
|
|
178
|
+
~confirmedBlockThreshold: int,
|
|
179
|
+
) => lastBlockScannedData.blockNumber < currentHeight - confirmedBlockThreshold
|
|
180
|
+
|
|
181
|
+
type rec trampoline<'a> = Data('a) | Callback(unit => trampoline<'a>)
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
Trampolines are a method of handling mutual recursions without the risk of hitting stack limits
|
|
185
|
+
|
|
186
|
+
Tail Call Optimization is not possible on mutually recursive functions and so this is a manual optizimation
|
|
187
|
+
|
|
188
|
+
(note: this implementation of "trampoline" uses a tail call and so TCO tranfsorms it to a while loop in JS)
|
|
189
|
+
*/
|
|
190
|
+
let rec trampoline = value =>
|
|
191
|
+
switch value {
|
|
192
|
+
| Data(v) => v
|
|
193
|
+
| Callback(fn) => fn()->trampoline
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
//Prunes the back of the unneeded data on the queue
|
|
197
|
+
let rec pruneStaleBlockDataInternal = (
|
|
198
|
+
~currentHeight,
|
|
199
|
+
~earliestMultiChainTimestampInThreshold,
|
|
200
|
+
~confirmedBlockThreshold,
|
|
201
|
+
lastBlockScannedDataListReversed: list<blockData>,
|
|
202
|
+
) => {
|
|
203
|
+
switch earliestMultiChainTimestampInThreshold {
|
|
204
|
+
// If there is no "earlist multichain timestamp in threshold"
|
|
205
|
+
// simply prune the earliest block in the case that the block is
|
|
206
|
+
// outside of the confirmedBlockThreshold
|
|
207
|
+
| None =>
|
|
208
|
+
Callback(
|
|
209
|
+
() =>
|
|
210
|
+
lastBlockScannedDataListReversed->pruneEarliestBlockData(
|
|
211
|
+
~currentHeight,
|
|
212
|
+
~earliestMultiChainTimestampInThreshold,
|
|
213
|
+
~confirmedBlockThreshold,
|
|
214
|
+
),
|
|
215
|
+
)
|
|
216
|
+
| Some(timestampThresholdNeeded) =>
|
|
217
|
+
switch lastBlockScannedDataListReversed {
|
|
218
|
+
| list{_head, second, ..._tail} =>
|
|
219
|
+
// Ony prune in the case where the second lastBlockScannedData from the back
|
|
220
|
+
// Has an earlier timestamp than the timestampThresholdNeeded (this is
|
|
221
|
+
// the earliest timestamp across all chains where the lastBlockScannedData is
|
|
222
|
+
// still within the confirmedBlockThreshold)
|
|
223
|
+
if second.blockTimestamp < timestampThresholdNeeded {
|
|
224
|
+
Callback(
|
|
225
|
+
() =>
|
|
226
|
+
lastBlockScannedDataListReversed->pruneEarliestBlockData(
|
|
227
|
+
~currentHeight,
|
|
228
|
+
~earliestMultiChainTimestampInThreshold,
|
|
229
|
+
~confirmedBlockThreshold,
|
|
230
|
+
),
|
|
231
|
+
)
|
|
232
|
+
} else {
|
|
233
|
+
Data(lastBlockScannedDataListReversed)
|
|
234
|
+
}
|
|
235
|
+
| list{_} | list{} => Data(lastBlockScannedDataListReversed)
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
and pruneEarliestBlockData = (
|
|
240
|
+
lastBlockScannedDataListReversed: list<blockData>,
|
|
241
|
+
~currentHeight,
|
|
242
|
+
~earliestMultiChainTimestampInThreshold,
|
|
243
|
+
~confirmedBlockThreshold,
|
|
244
|
+
) => {
|
|
245
|
+
switch lastBlockScannedDataListReversed {
|
|
246
|
+
| list{earliestLastBlockData, ...tail} =>
|
|
247
|
+
// In the case that back is past the threshold, remove it and
|
|
248
|
+
// recurse
|
|
249
|
+
if earliestLastBlockData->blockDataIsPastThreshold(~currentHeight, ~confirmedBlockThreshold) {
|
|
250
|
+
// Recurse to check the next item
|
|
251
|
+
Callback(
|
|
252
|
+
() =>
|
|
253
|
+
tail->pruneStaleBlockDataInternal(
|
|
254
|
+
~currentHeight,
|
|
255
|
+
~earliestMultiChainTimestampInThreshold,
|
|
256
|
+
~confirmedBlockThreshold,
|
|
257
|
+
),
|
|
258
|
+
)
|
|
259
|
+
} else {
|
|
260
|
+
Data(lastBlockScannedDataListReversed)
|
|
261
|
+
}
|
|
262
|
+
| list{} => Data(list{})
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
//Prunes the back of the unneeded data on the queue
|
|
267
|
+
let pruneStaleBlockData = (
|
|
268
|
+
~currentHeight,
|
|
269
|
+
~earliestMultiChainTimestampInThreshold=?,
|
|
270
|
+
{confirmedBlockThreshold, lastBlockScannedDataList}: t,
|
|
271
|
+
) => {
|
|
272
|
+
trampoline(
|
|
273
|
+
lastBlockScannedDataList
|
|
274
|
+
->Belt.List.reverse
|
|
275
|
+
->pruneStaleBlockDataInternal(
|
|
276
|
+
~confirmedBlockThreshold,
|
|
277
|
+
~currentHeight,
|
|
278
|
+
~earliestMultiChainTimestampInThreshold,
|
|
279
|
+
),
|
|
280
|
+
)
|
|
281
|
+
->Belt.List.reverse
|
|
282
|
+
->makeWithDataInternal(~confirmedBlockThreshold)
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
type blockNumberToHashMap = Belt.Map.Int.t<string>
|
|
286
|
+
exception BlockNotIncludedInMap(int)
|
|
287
|
+
|
|
288
|
+
let doBlockHashesMatch = (lastBlockScannedData, ~latestBlockHashes: blockNumberToHashMap) => {
|
|
289
|
+
let {blockNumber, blockHash} = lastBlockScannedData
|
|
290
|
+
let matchingBlock = latestBlockHashes->Belt.Map.Int.get(blockNumber)
|
|
291
|
+
|
|
292
|
+
switch matchingBlock {
|
|
293
|
+
| None => Error(BlockNotIncludedInMap(blockNumber))
|
|
294
|
+
| Some(latestBlockHash) => Ok(blockHash == latestBlockHash)
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
let rec rollBackToValidHashInternal = (
|
|
299
|
+
latestBlockScannedData: list<blockData>,
|
|
300
|
+
~latestBlockHashes: blockNumberToHashMap,
|
|
301
|
+
) => {
|
|
302
|
+
switch latestBlockScannedData {
|
|
303
|
+
| list{} => Ok(list{}) //Nothing on the front to rollback to
|
|
304
|
+
| list{lastBlockScannedData, ...tail} =>
|
|
305
|
+
lastBlockScannedData
|
|
306
|
+
->doBlockHashesMatch(~latestBlockHashes)
|
|
307
|
+
->Belt.Result.flatMap(blockHashesDoMatch => {
|
|
308
|
+
if blockHashesDoMatch {
|
|
309
|
+
Ok(list{lastBlockScannedData, ...tail})
|
|
310
|
+
} else {
|
|
311
|
+
tail->rollBackToValidHashInternal(~latestBlockHashes)
|
|
312
|
+
}
|
|
313
|
+
})
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
Return a BlockNumbersAndHashes.t rolled back to where hashes
|
|
319
|
+
match the provided blockNumberAndHashes
|
|
320
|
+
*/
|
|
321
|
+
let rollBackToValidHash = (self: t, ~blockNumbersAndHashes: array<blockData>) => {
|
|
322
|
+
let {confirmedBlockThreshold, lastBlockScannedDataList} = self
|
|
323
|
+
let latestBlockHashes =
|
|
324
|
+
blockNumbersAndHashes
|
|
325
|
+
->Belt.Array.map(({blockNumber, blockHash}) => (blockNumber, blockHash))
|
|
326
|
+
->Belt.Map.Int.fromArray
|
|
327
|
+
|
|
328
|
+
lastBlockScannedDataList
|
|
329
|
+
->rollBackToValidHashInternal(~latestBlockHashes)
|
|
330
|
+
->Belt.Result.map(list => list->makeWithDataInternal(~confirmedBlockThreshold))
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
let min = (arrInt: array<int>) => {
|
|
334
|
+
arrInt->Belt.Array.reduce(None, (current, val) => {
|
|
335
|
+
switch current {
|
|
336
|
+
| None => Some(val)
|
|
337
|
+
| Some(current) => Js.Math.min_int(current, val)->Some
|
|
338
|
+
}
|
|
339
|
+
})
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
let rec rollBackToBlockNumberLtInternal = (
|
|
343
|
+
~blockNumber: int,
|
|
344
|
+
latestBlockScannedData: list<blockData>,
|
|
345
|
+
) => {
|
|
346
|
+
switch latestBlockScannedData {
|
|
347
|
+
| list{} => list{}
|
|
348
|
+
| list{head, ...tail} =>
|
|
349
|
+
if head.blockNumber < blockNumber {
|
|
350
|
+
latestBlockScannedData
|
|
351
|
+
} else {
|
|
352
|
+
tail->rollBackToBlockNumberLtInternal(~blockNumber)
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
/**
|
|
358
|
+
Return a BlockNumbersAndHashes.t rolled back to where blockData is less
|
|
359
|
+
than the provided blockNumber
|
|
360
|
+
*/
|
|
361
|
+
let rollBackToBlockNumberLt = (~blockNumber: int, self: t) => {
|
|
362
|
+
let {confirmedBlockThreshold, lastBlockScannedDataList} = self
|
|
363
|
+
lastBlockScannedDataList
|
|
364
|
+
->rollBackToBlockNumberLtInternal(~blockNumber)
|
|
365
|
+
->makeWithDataInternal(~confirmedBlockThreshold)
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
type currentHeightAndLastBlockHashes = {
|
|
369
|
+
currentHeight: int,
|
|
370
|
+
lastBlockScannedHashes: t,
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
/**
|
|
374
|
+
Find the the earliest block time across multiple instances of self where the block timestamp
|
|
375
|
+
falls within its own confirmed block threshold
|
|
376
|
+
|
|
377
|
+
Return None if there is only one chain (since we don't want to take this val into account for a
|
|
378
|
+
single chain indexer) or if there are no chains (should never be the case)
|
|
379
|
+
*/
|
|
380
|
+
let getEarliestMultiChainTimestampInThreshold = (
|
|
381
|
+
multiSelf: array<currentHeightAndLastBlockHashes>,
|
|
382
|
+
) => {
|
|
383
|
+
switch multiSelf {
|
|
384
|
+
| [_singleVal] =>
|
|
385
|
+
//In the case where there is only one chain, return none as there would be no need to aggregate
|
|
386
|
+
//or keep track of the lowest timestamp. The chain can purge as far back as its confirmed block range
|
|
387
|
+
None
|
|
388
|
+
| multiSelf =>
|
|
389
|
+
multiSelf
|
|
390
|
+
->Belt.Array.keepMap(({currentHeight, lastBlockScannedHashes}) => {
|
|
391
|
+
lastBlockScannedHashes->getEarlistTimestampInThreshold(~currentHeight)
|
|
392
|
+
})
|
|
393
|
+
->min
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
let getAllBlockNumbers = (self: t) =>
|
|
398
|
+
self.lastBlockScannedDataList->Belt.List.reduceReverse([], (acc, v) => {
|
|
399
|
+
Belt.Array.concat(acc, [v.blockNumber])
|
|
400
|
+
})
|
|
401
|
+
|
|
402
|
+
/**
|
|
403
|
+
Checks whether reorg has occured by comparing the parent hash with the last saved block hash.
|
|
404
|
+
*/
|
|
405
|
+
let rec hasReorgOccurredInternal = (
|
|
406
|
+
lastBlockScannedDataList,
|
|
407
|
+
~firstBlockParentNumberAndHash: option<blockNumberAndHash>,
|
|
408
|
+
) => {
|
|
409
|
+
switch (firstBlockParentNumberAndHash, lastBlockScannedDataList) {
|
|
410
|
+
| (Some({blockHash: parentHash, blockNumber: parentBlockNumber}), list{head, ...tail}) =>
|
|
411
|
+
if parentBlockNumber == head.blockNumber {
|
|
412
|
+
parentHash != head.blockHash
|
|
413
|
+
} else {
|
|
414
|
+
//if block numbers do not match, this is a dynamic contract case and should recurse
|
|
415
|
+
//through the list to look for a matching block or nothing to validate
|
|
416
|
+
tail->hasReorgOccurredInternal(~firstBlockParentNumberAndHash)
|
|
417
|
+
}
|
|
418
|
+
| _ => //If parentHash is None, either it's the genesis block (no reorg)
|
|
419
|
+
//Or its already confirmed so no Reorg
|
|
420
|
+
//If recentLastBlockData is None, we have not yet saved blockData to compare against
|
|
421
|
+
false
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
let hasReorgOccurred = (
|
|
426
|
+
lastBlockScannedHashes: t,
|
|
427
|
+
~firstBlockParentNumberAndHash: option<blockNumberAndHash>,
|
|
428
|
+
) =>
|
|
429
|
+
lastBlockScannedHashes.lastBlockScannedDataList->hasReorgOccurredInternal(
|
|
430
|
+
~firstBlockParentNumberAndHash,
|
|
431
|
+
)
|
|
432
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
let toTwosComplement = (num: bigint, ~bytesLen: int) => {
|
|
2
|
+
let maxValue = 1n->BigInt.Bitwise.shift_left(BigInt.fromInt(bytesLen * 8))
|
|
3
|
+
let mask = maxValue->BigInt.sub(1n)
|
|
4
|
+
num->BigInt.add(maxValue)->BigInt.Bitwise.logand(mask)
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
let fromSignedBigInt = val => {
|
|
8
|
+
let bytesLen = 32
|
|
9
|
+
let val = val >= 0n ? val : val->toTwosComplement(~bytesLen)
|
|
10
|
+
val->Viem.bigintToHex(~options={size: bytesLen})
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
type hex = EvmTypes.Hex.t
|
|
14
|
+
//bytes currently does not work with genType and we also currently generate bytes as a string type
|
|
15
|
+
type bytesHex = string
|
|
16
|
+
let keccak256 = Viem.keccak256
|
|
17
|
+
let bytesToHex = Viem.bytesToHex
|
|
18
|
+
let concat = Viem.concat
|
|
19
|
+
let castToHexUnsafe: 'a => hex = val => val->Utils.magic
|
|
20
|
+
let fromBigInt: bigint => hex = val => val->Viem.bigintToHex(~options={size: 32})
|
|
21
|
+
let fromDynamicString: string => hex = val => val->(Utils.magic: string => hex)->keccak256
|
|
22
|
+
let fromString: string => hex = val => val->Viem.stringToHex(~options={size: 32})
|
|
23
|
+
let fromAddress: Address.t => hex = addr => addr->(Utils.magic: Address.t => hex)->Viem.pad
|
|
24
|
+
let fromDynamicBytes: bytesHex => hex = bytes => bytes->(Utils.magic: bytesHex => hex)->keccak256
|
|
25
|
+
let fromBytes: bytesHex => hex = bytes =>
|
|
26
|
+
bytes->(Utils.magic: bytesHex => bytes)->Viem.bytesToHex(~options={size: 32})
|
|
27
|
+
let fromBool: bool => hex = b => b->Viem.boolToHex(~options={size: 32})
|
package/src/Utils.res
CHANGED
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
external magic: 'a => 'b = "%identity"
|
|
2
2
|
|
|
3
|
+
let delay = milliseconds =>
|
|
4
|
+
Js.Promise2.make((~resolve, ~reject as _) => {
|
|
5
|
+
let _interval = Js.Global.setTimeout(_ => {
|
|
6
|
+
resolve()
|
|
7
|
+
}, milliseconds)
|
|
8
|
+
})
|
|
9
|
+
|
|
3
10
|
module Option = {
|
|
4
11
|
let mapNone = (opt: option<'a>, val: 'b): option<'b> => {
|
|
5
12
|
switch opt {
|
|
@@ -224,6 +231,13 @@ external queueMicrotask: (unit => unit) => unit = "queueMicrotask"
|
|
|
224
231
|
module Schema = {
|
|
225
232
|
let enum = items => S.union(items->Belt.Array.mapU(S.literal))
|
|
226
233
|
|
|
234
|
+
// A hot fix after we use the version where it's supported
|
|
235
|
+
// https://github.com/DZakh/rescript-schema/blob/v8.4.0/docs/rescript-usage.md#removetypevalidation
|
|
236
|
+
let removeTypeValidationInPlace = schema => {
|
|
237
|
+
// The variables input is guaranteed to be an object, so we reset the rescript-schema type filter here
|
|
238
|
+
(schema->Obj.magic)["f"] = ()
|
|
239
|
+
}
|
|
240
|
+
|
|
227
241
|
let getNonOptionalFieldNames = schema => {
|
|
228
242
|
let acc = []
|
|
229
243
|
switch schema->S.classify {
|
|
@@ -362,3 +376,15 @@ module WeakMap = {
|
|
|
362
376
|
@send external has: (t<'k, 'v>, 'k) => bool = "has"
|
|
363
377
|
@send external set: (t<'k, 'v>, 'k, 'v) => t<'k, 'v> = "set"
|
|
364
378
|
}
|
|
379
|
+
|
|
380
|
+
module Map = {
|
|
381
|
+
type t<'k, 'v> = Js.Map.t<'k, 'v>
|
|
382
|
+
|
|
383
|
+
@new external make: unit => t<'k, 'v> = "Map"
|
|
384
|
+
|
|
385
|
+
@send external get: (t<'k, 'v>, 'k) => option<'v> = "get"
|
|
386
|
+
@send external unsafeGet: (t<'k, 'v>, 'k) => 'v = "get"
|
|
387
|
+
@send external has: (t<'k, 'v>, 'k) => bool = "has"
|
|
388
|
+
@send external set: (t<'k, 'v>, 'k, 'v) => t<'k, 'v> = "set"
|
|
389
|
+
@send external delete: (t<'k, 'v>, 'k) => bool = "delete"
|
|
390
|
+
}
|
package/src/bindings/BigInt.res
CHANGED
|
@@ -48,10 +48,22 @@ let schema =
|
|
|
48
48
|
S.string
|
|
49
49
|
->S.setName("BigInt")
|
|
50
50
|
->S.transform(s => {
|
|
51
|
-
parser:
|
|
51
|
+
parser: string =>
|
|
52
52
|
switch string->fromString {
|
|
53
53
|
| Some(bigInt) => bigInt
|
|
54
|
-
| None => s.fail(
|
|
54
|
+
| None => s.fail("The string is not valid BigInt")
|
|
55
55
|
},
|
|
56
|
-
serializer:
|
|
56
|
+
serializer: bigint => bigint->toString,
|
|
57
57
|
})
|
|
58
|
+
|
|
59
|
+
let nativeSchema: S.t<bigint> = S.custom("BigInt", s => {
|
|
60
|
+
{
|
|
61
|
+
parser: unknown => {
|
|
62
|
+
if Js.typeof(unknown) !== "bigint" {
|
|
63
|
+
s.fail("Expected bigint")
|
|
64
|
+
} else {
|
|
65
|
+
unknown->Obj.magic
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
}
|
|
69
|
+
})
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
/* TypeScript file generated from Ethers.res by genType. */
|
|
2
|
+
|
|
3
|
+
/* eslint-disable */
|
|
4
|
+
/* tslint:disable */
|
|
5
|
+
|
|
6
|
+
const EthersJS = require('./Ethers.bs.js');
|
|
7
|
+
|
|
8
|
+
import type {t as Address_t} from '../../src/Address.gen';
|
|
9
|
+
|
|
10
|
+
export const Addresses_mockAddresses: Address_t[] = EthersJS.Addresses.mockAddresses as any;
|
|
11
|
+
|
|
12
|
+
export const Addresses_defaultAddress: Address_t = EthersJS.Addresses.defaultAddress as any;
|
|
13
|
+
|
|
14
|
+
export const Addresses: { mockAddresses: Address_t[]; defaultAddress: Address_t } = EthersJS.Addresses as any;
|