envio 2.28.0-rc.0 → 2.28.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +6 -6
- package/src/InternalConfig.res +10 -0
- package/src/sources/EventRouter.res +113 -0
- package/src/sources/EventRouter.res.js +125 -0
- package/src/sources/HyperSync.res +15 -7
- package/src/sources/HyperSync.res.js +48 -15
- package/src/sources/HyperSyncClient.res +4 -3
- package/src/sources/HyperSyncClient.res.js +6 -1
- package/src/sources/RpcSource.res +748 -0
- package/src/sources/RpcSource.res.js +697 -0
|
@@ -0,0 +1,748 @@
|
|
|
1
|
+
open Belt
|
|
2
|
+
open Source
|
|
3
|
+
|
|
4
|
+
exception QueryTimout(string)
|
|
5
|
+
|
|
6
|
+
let getKnownBlock = (provider, blockNumber) =>
|
|
7
|
+
provider
|
|
8
|
+
->Ethers.JsonRpcProvider.getBlock(blockNumber)
|
|
9
|
+
->Promise.then(blockNullable =>
|
|
10
|
+
switch blockNullable->Js.Nullable.toOption {
|
|
11
|
+
| Some(block) => Promise.resolve(block)
|
|
12
|
+
| None =>
|
|
13
|
+
Promise.reject(
|
|
14
|
+
Js.Exn.raiseError(`RPC returned null for blockNumber ${blockNumber->Belt.Int.toString}`),
|
|
15
|
+
)
|
|
16
|
+
}
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
let rec getKnownBlockWithBackoff = async (
|
|
20
|
+
~provider,
|
|
21
|
+
~sourceName,
|
|
22
|
+
~chain,
|
|
23
|
+
~blockNumber,
|
|
24
|
+
~backoffMsOnFailure,
|
|
25
|
+
) =>
|
|
26
|
+
switch await getKnownBlock(provider, blockNumber) {
|
|
27
|
+
| exception err =>
|
|
28
|
+
Logging.warn({
|
|
29
|
+
"err": err,
|
|
30
|
+
"msg": `Issue while running fetching batch of events from the RPC. Will wait ${backoffMsOnFailure->Belt.Int.toString}ms and try again.`,
|
|
31
|
+
"source": sourceName,
|
|
32
|
+
"chainId": chain->ChainMap.Chain.toChainId,
|
|
33
|
+
"type": "EXPONENTIAL_BACKOFF",
|
|
34
|
+
})
|
|
35
|
+
await Time.resolvePromiseAfterDelay(~delayMilliseconds=backoffMsOnFailure)
|
|
36
|
+
await getKnownBlockWithBackoff(
|
|
37
|
+
~provider,
|
|
38
|
+
~sourceName,
|
|
39
|
+
~chain,
|
|
40
|
+
~blockNumber,
|
|
41
|
+
~backoffMsOnFailure=backoffMsOnFailure * 2,
|
|
42
|
+
)
|
|
43
|
+
| result => result
|
|
44
|
+
}
|
|
45
|
+
let getSuggestedBlockIntervalFromExn = {
|
|
46
|
+
// Unknown provider: "retry with the range 123-456"
|
|
47
|
+
let suggestedRangeRegExp = %re(`/retry with the range (\d+)-(\d+)/`)
|
|
48
|
+
|
|
49
|
+
// QuickNode, 1RPC, Blast: "limited to a 1000 blocks range"
|
|
50
|
+
let blockRangeLimitRegExp = %re(`/limited to a (\d+) blocks range/`)
|
|
51
|
+
|
|
52
|
+
// Alchemy: "up to a 500 block range"
|
|
53
|
+
let alchemyRangeRegExp = %re(`/up to a (\d+) block range/`)
|
|
54
|
+
|
|
55
|
+
// Cloudflare: "Max range: 3500"
|
|
56
|
+
let cloudflareRangeRegExp = %re(`/Max range: (\d+)/`)
|
|
57
|
+
|
|
58
|
+
// Thirdweb: "Maximum allowed number of requested blocks is 3500"
|
|
59
|
+
let thirdwebRangeRegExp = %re(`/Maximum allowed number of requested blocks is (\d+)/`)
|
|
60
|
+
|
|
61
|
+
// BlockPI: "limited to 2000 block"
|
|
62
|
+
let blockpiRangeRegExp = %re(`/limited to (\d+) block/`)
|
|
63
|
+
|
|
64
|
+
// Base: "block range too large" - fixed 2000 block limit
|
|
65
|
+
let baseRangeRegExp = %re(`/block range too large/`)
|
|
66
|
+
|
|
67
|
+
// evm-rpc.sei-apis.com: "block range too large (2000), maximum allowed is 1000 blocks"
|
|
68
|
+
let maxAllowedBlocksRegExp = %re(`/maximum allowed is (\d+) blocks/`)
|
|
69
|
+
|
|
70
|
+
// Blast (paid): "exceeds the range allowed for your plan (5000 > 3000)"
|
|
71
|
+
let blastPaidRegExp = %re(`/exceeds the range allowed for your plan \(\d+ > (\d+)\)/`)
|
|
72
|
+
|
|
73
|
+
// Chainstack: "Block range limit exceeded" - 10000 block limit
|
|
74
|
+
let chainstackRegExp = %re(`/Block range limit exceeded./`)
|
|
75
|
+
|
|
76
|
+
// Coinbase: "please limit the query to at most 1000 blocks"
|
|
77
|
+
let coinbaseRegExp = %re(`/please limit the query to at most (\d+) blocks/`)
|
|
78
|
+
|
|
79
|
+
// PublicNode: "maximum block range: 2000"
|
|
80
|
+
let publicNodeRegExp = %re(`/maximum block range: (\d+)/`)
|
|
81
|
+
|
|
82
|
+
// Hyperliquid: "query exceeds max block range 1000"
|
|
83
|
+
let hyperliquidRegExp = %re(`/query exceeds max block range (\d+)/`)
|
|
84
|
+
|
|
85
|
+
// TODO: Reproduce how the error message looks like
|
|
86
|
+
// when we send request with numeric block range instead of hex
|
|
87
|
+
// Infura, ZkSync: "Try with this block range [0x123,0x456]"
|
|
88
|
+
|
|
89
|
+
// Future handling needed for these providers that don't suggest ranges:
|
|
90
|
+
// - Ankr: "block range is too wide"
|
|
91
|
+
// - 1RPC: "response size should not greater than 10000000 bytes"
|
|
92
|
+
// - ZkEVM: "query returned more than 10000 results"
|
|
93
|
+
// - LlamaRPC: "query exceeds max results"
|
|
94
|
+
// - Optimism: "backend response too large" or "Block range is too large"
|
|
95
|
+
// - Arbitrum: "logs matched by query exceeds limit of 10000"
|
|
96
|
+
|
|
97
|
+
(exn): option<(
|
|
98
|
+
// The suggested block range
|
|
99
|
+
int,
|
|
100
|
+
// Whether it's the max range that the provider allows
|
|
101
|
+
bool,
|
|
102
|
+
)> =>
|
|
103
|
+
switch exn {
|
|
104
|
+
| Js.Exn.Error(error) =>
|
|
105
|
+
try {
|
|
106
|
+
let message: string = (error->Obj.magic)["error"]["message"]
|
|
107
|
+
message->S.assertOrThrow(S.string)
|
|
108
|
+
|
|
109
|
+
// Helper to extract block range from regex match
|
|
110
|
+
let extractBlockRange = (execResult, ~isMaxRange) =>
|
|
111
|
+
switch execResult->Js.Re.captures {
|
|
112
|
+
| [_, Js.Nullable.Value(blockRangeLimit)] =>
|
|
113
|
+
switch blockRangeLimit->Int.fromString {
|
|
114
|
+
| Some(blockRangeLimit) if blockRangeLimit > 0 => Some(blockRangeLimit, isMaxRange)
|
|
115
|
+
| _ => None
|
|
116
|
+
}
|
|
117
|
+
| _ => None
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// Try each regex pattern in order
|
|
121
|
+
switch suggestedRangeRegExp->Js.Re.exec_(message) {
|
|
122
|
+
| Some(execResult) =>
|
|
123
|
+
switch execResult->Js.Re.captures {
|
|
124
|
+
| [_, Js.Nullable.Value(fromBlock), Js.Nullable.Value(toBlock)] =>
|
|
125
|
+
switch (fromBlock->Int.fromString, toBlock->Int.fromString) {
|
|
126
|
+
| (Some(fromBlock), Some(toBlock)) if toBlock >= fromBlock =>
|
|
127
|
+
Some(toBlock - fromBlock + 1, false)
|
|
128
|
+
| _ => None
|
|
129
|
+
}
|
|
130
|
+
| _ => None
|
|
131
|
+
}
|
|
132
|
+
| None =>
|
|
133
|
+
// Try each provider's specific error pattern
|
|
134
|
+
switch blockRangeLimitRegExp->Js.Re.exec_(message) {
|
|
135
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
136
|
+
| None =>
|
|
137
|
+
switch alchemyRangeRegExp->Js.Re.exec_(message) {
|
|
138
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
139
|
+
| None =>
|
|
140
|
+
switch cloudflareRangeRegExp->Js.Re.exec_(message) {
|
|
141
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
142
|
+
| None =>
|
|
143
|
+
switch thirdwebRangeRegExp->Js.Re.exec_(message) {
|
|
144
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
145
|
+
| None =>
|
|
146
|
+
switch blockpiRangeRegExp->Js.Re.exec_(message) {
|
|
147
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
148
|
+
| None =>
|
|
149
|
+
switch maxAllowedBlocksRegExp->Js.Re.exec_(message) {
|
|
150
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
151
|
+
| None =>
|
|
152
|
+
switch baseRangeRegExp->Js.Re.exec_(message) {
|
|
153
|
+
| Some(_) => Some(2000, true)
|
|
154
|
+
| None =>
|
|
155
|
+
switch blastPaidRegExp->Js.Re.exec_(message) {
|
|
156
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
157
|
+
| None =>
|
|
158
|
+
switch chainstackRegExp->Js.Re.exec_(message) {
|
|
159
|
+
| Some(_) => Some(10000, true)
|
|
160
|
+
| None =>
|
|
161
|
+
switch coinbaseRegExp->Js.Re.exec_(message) {
|
|
162
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
163
|
+
| None =>
|
|
164
|
+
switch publicNodeRegExp->Js.Re.exec_(message) {
|
|
165
|
+
| Some(execResult) => extractBlockRange(execResult, ~isMaxRange=true)
|
|
166
|
+
| None =>
|
|
167
|
+
switch hyperliquidRegExp->Js.Re.exec_(message) {
|
|
168
|
+
| Some(execResult) =>
|
|
169
|
+
extractBlockRange(execResult, ~isMaxRange=true)
|
|
170
|
+
| None => None
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
} catch {
|
|
185
|
+
| _ => None
|
|
186
|
+
}
|
|
187
|
+
| _ => None
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
type eventBatchQuery = {
|
|
192
|
+
logs: array<Ethers.log>,
|
|
193
|
+
latestFetchedBlock: Ethers.JsonRpcProvider.block,
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
let maxSuggestedBlockIntervalKey = "max"
|
|
197
|
+
|
|
198
|
+
let getNextPage = (
|
|
199
|
+
~fromBlock,
|
|
200
|
+
~toBlock,
|
|
201
|
+
~addresses,
|
|
202
|
+
~topicQuery,
|
|
203
|
+
~loadBlock,
|
|
204
|
+
~syncConfig as sc: InternalConfig.sourceSync,
|
|
205
|
+
~provider,
|
|
206
|
+
~mutSuggestedBlockIntervals,
|
|
207
|
+
~partitionId,
|
|
208
|
+
): promise<eventBatchQuery> => {
|
|
209
|
+
//If the query hangs for longer than this, reject this promise to reduce the block interval
|
|
210
|
+
let queryTimoutPromise =
|
|
211
|
+
Time.resolvePromiseAfterDelay(~delayMilliseconds=sc.queryTimeoutMillis)->Promise.then(() =>
|
|
212
|
+
Promise.reject(
|
|
213
|
+
QueryTimout(
|
|
214
|
+
`Query took longer than ${Belt.Int.toString(sc.queryTimeoutMillis / 1000)} seconds`,
|
|
215
|
+
),
|
|
216
|
+
)
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
let latestFetchedBlockPromise = loadBlock(toBlock)
|
|
220
|
+
let logsPromise =
|
|
221
|
+
provider
|
|
222
|
+
->Ethers.JsonRpcProvider.getLogs(
|
|
223
|
+
~filter={
|
|
224
|
+
address: ?addresses,
|
|
225
|
+
topics: topicQuery,
|
|
226
|
+
fromBlock,
|
|
227
|
+
toBlock,
|
|
228
|
+
}->Ethers.CombinedFilter.toFilter,
|
|
229
|
+
)
|
|
230
|
+
->Promise.then(async logs => {
|
|
231
|
+
{
|
|
232
|
+
logs,
|
|
233
|
+
latestFetchedBlock: await latestFetchedBlockPromise,
|
|
234
|
+
}
|
|
235
|
+
})
|
|
236
|
+
|
|
237
|
+
[queryTimoutPromise, logsPromise]
|
|
238
|
+
->Promise.race
|
|
239
|
+
->Promise.catch(err => {
|
|
240
|
+
switch getSuggestedBlockIntervalFromExn(err) {
|
|
241
|
+
| Some((nextBlockIntervalTry, isMaxRange)) =>
|
|
242
|
+
mutSuggestedBlockIntervals->Js.Dict.set(
|
|
243
|
+
isMaxRange ? maxSuggestedBlockIntervalKey : partitionId,
|
|
244
|
+
nextBlockIntervalTry,
|
|
245
|
+
)
|
|
246
|
+
raise(
|
|
247
|
+
Source.GetItemsError(
|
|
248
|
+
FailedGettingItems({
|
|
249
|
+
exn: err,
|
|
250
|
+
attemptedToBlock: toBlock,
|
|
251
|
+
retry: WithSuggestedToBlock({
|
|
252
|
+
toBlock: fromBlock + nextBlockIntervalTry - 1,
|
|
253
|
+
}),
|
|
254
|
+
}),
|
|
255
|
+
),
|
|
256
|
+
)
|
|
257
|
+
| None =>
|
|
258
|
+
let executedBlockInterval = toBlock - fromBlock + 1
|
|
259
|
+
let nextBlockIntervalTry =
|
|
260
|
+
(executedBlockInterval->Belt.Int.toFloat *. sc.backoffMultiplicative)->Belt.Int.fromFloat
|
|
261
|
+
mutSuggestedBlockIntervals->Js.Dict.set(partitionId, nextBlockIntervalTry)
|
|
262
|
+
raise(
|
|
263
|
+
Source.GetItemsError(
|
|
264
|
+
Source.FailedGettingItems({
|
|
265
|
+
exn: err,
|
|
266
|
+
attemptedToBlock: toBlock,
|
|
267
|
+
retry: WithBackoff({
|
|
268
|
+
message: `Failed getting data for the block range. Will try smaller block range for the next attempt.`,
|
|
269
|
+
backoffMillis: sc.backoffMillis,
|
|
270
|
+
}),
|
|
271
|
+
}),
|
|
272
|
+
),
|
|
273
|
+
)
|
|
274
|
+
}
|
|
275
|
+
})
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
type logSelection = {
|
|
279
|
+
addresses: option<array<Address.t>>,
|
|
280
|
+
topicQuery: Rpc.GetLogs.topicQuery,
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
type selectionConfig = {
|
|
284
|
+
getLogSelectionOrThrow: (~addressesByContractName: dict<array<Address.t>>) => logSelection,
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
let getSelectionConfig = (selection: FetchState.selection, ~chain) => {
|
|
288
|
+
let staticTopicSelections = []
|
|
289
|
+
let dynamicEventFilters = []
|
|
290
|
+
|
|
291
|
+
selection.eventConfigs
|
|
292
|
+
->(Utils.magic: array<Internal.eventConfig> => array<Internal.evmEventConfig>)
|
|
293
|
+
->Belt.Array.forEach(({getEventFiltersOrThrow}) => {
|
|
294
|
+
switch getEventFiltersOrThrow(chain) {
|
|
295
|
+
| Static(s) => staticTopicSelections->Js.Array2.pushMany(s)->ignore
|
|
296
|
+
| Dynamic(fn) => dynamicEventFilters->Js.Array2.push(fn)->ignore
|
|
297
|
+
}
|
|
298
|
+
})
|
|
299
|
+
|
|
300
|
+
let getLogSelectionOrThrow = switch (
|
|
301
|
+
staticTopicSelections->LogSelection.compressTopicSelections,
|
|
302
|
+
dynamicEventFilters,
|
|
303
|
+
) {
|
|
304
|
+
| ([], []) =>
|
|
305
|
+
raise(
|
|
306
|
+
Source.GetItemsError(
|
|
307
|
+
UnsupportedSelection({
|
|
308
|
+
message: "Invalid events configuration for the partition. Nothing to fetch. Please, report to the Envio team.",
|
|
309
|
+
}),
|
|
310
|
+
),
|
|
311
|
+
)
|
|
312
|
+
| ([topicSelection], []) => {
|
|
313
|
+
let topicQuery = topicSelection->Rpc.GetLogs.mapTopicQuery
|
|
314
|
+
(~addressesByContractName) => {
|
|
315
|
+
addresses: switch addressesByContractName->FetchState.addressesByContractNameGetAll {
|
|
316
|
+
| [] => None
|
|
317
|
+
| addresses => Some(addresses)
|
|
318
|
+
},
|
|
319
|
+
topicQuery,
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
| ([], [dynamicEventFilter]) if selection.eventConfigs->Js.Array2.length === 1 =>
|
|
323
|
+
let eventConfig = selection.eventConfigs->Js.Array2.unsafe_get(0)
|
|
324
|
+
|
|
325
|
+
(~addressesByContractName) => {
|
|
326
|
+
let addresses = addressesByContractName->FetchState.addressesByContractNameGetAll
|
|
327
|
+
{
|
|
328
|
+
addresses: eventConfig.isWildcard ? None : Some(addresses),
|
|
329
|
+
topicQuery: switch dynamicEventFilter(addresses) {
|
|
330
|
+
| [topicSelection] => topicSelection->Rpc.GetLogs.mapTopicQuery
|
|
331
|
+
| _ =>
|
|
332
|
+
raise(
|
|
333
|
+
Source.GetItemsError(
|
|
334
|
+
UnsupportedSelection({
|
|
335
|
+
message: "RPC data-source currently doesn't support an array of event filters. Please, create a GitHub issue if it's a blocker for you.",
|
|
336
|
+
}),
|
|
337
|
+
),
|
|
338
|
+
)
|
|
339
|
+
},
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
| _ =>
|
|
343
|
+
raise(
|
|
344
|
+
Source.GetItemsError(
|
|
345
|
+
UnsupportedSelection({
|
|
346
|
+
message: "RPC data-source currently supports event filters only when there's a single wildcard event. Please, create a GitHub issue if it's a blocker for you.",
|
|
347
|
+
}),
|
|
348
|
+
),
|
|
349
|
+
)
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
{
|
|
353
|
+
getLogSelectionOrThrow: getLogSelectionOrThrow,
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
let memoGetSelectionConfig = (~chain) => {
|
|
358
|
+
let cache = Utils.WeakMap.make()
|
|
359
|
+
selection =>
|
|
360
|
+
switch cache->Utils.WeakMap.get(selection) {
|
|
361
|
+
| Some(c) => c
|
|
362
|
+
| None => {
|
|
363
|
+
let c = selection->getSelectionConfig(~chain)
|
|
364
|
+
let _ = cache->Utils.WeakMap.set(selection, c)
|
|
365
|
+
c
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
let makeThrowingGetEventBlock = (~getBlock) => {
|
|
371
|
+
async (log: Ethers.log) => {
|
|
372
|
+
await getBlock(log.blockNumber)
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
let makeThrowingGetEventTransaction = (~getTransactionFields) => {
|
|
377
|
+
let fnsCache = Utils.WeakMap.make()
|
|
378
|
+
(log, ~transactionSchema) => {
|
|
379
|
+
(
|
|
380
|
+
switch fnsCache->Utils.WeakMap.get(transactionSchema) {
|
|
381
|
+
| Some(fn) => fn
|
|
382
|
+
// This is not super expensive, but don't want to do it on every event
|
|
383
|
+
| None => {
|
|
384
|
+
let transactionSchema = transactionSchema->S.removeTypeValidation
|
|
385
|
+
|
|
386
|
+
let transactionFieldItems = switch transactionSchema->S.classify {
|
|
387
|
+
| Object({items}) => items
|
|
388
|
+
| _ => Js.Exn.raiseError("Unexpected internal error: transactionSchema is not an object")
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
let parseOrThrowReadableError = data => {
|
|
392
|
+
try data->S.parseOrThrow(transactionSchema) catch {
|
|
393
|
+
| S.Raised(error) =>
|
|
394
|
+
Js.Exn.raiseError(
|
|
395
|
+
`Invalid transaction field "${error.path
|
|
396
|
+
->S.Path.toArray
|
|
397
|
+
->Js.Array2.joinWith(
|
|
398
|
+
".",
|
|
399
|
+
)}" found in the RPC response. Error: ${error->S.Error.reason}`,
|
|
400
|
+
) // There should always be only one field, but just in case split them with a dot
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
let fn = switch transactionFieldItems {
|
|
405
|
+
| [] => _ => %raw(`{}`)->Promise.resolve
|
|
406
|
+
| [{location: "transactionIndex"}] =>
|
|
407
|
+
log => log->parseOrThrowReadableError->Promise.resolve
|
|
408
|
+
| [{location: "hash"}]
|
|
409
|
+
| [{location: "hash"}, {location: "transactionIndex"}]
|
|
410
|
+
| [{location: "transactionIndex"}, {location: "hash"}] =>
|
|
411
|
+
(log: Ethers.log) =>
|
|
412
|
+
{
|
|
413
|
+
"hash": log.transactionHash,
|
|
414
|
+
"transactionIndex": log.transactionIndex,
|
|
415
|
+
}
|
|
416
|
+
->parseOrThrowReadableError
|
|
417
|
+
->Promise.resolve
|
|
418
|
+
| _ =>
|
|
419
|
+
log =>
|
|
420
|
+
log
|
|
421
|
+
->getTransactionFields
|
|
422
|
+
->Promise.thenResolve(parseOrThrowReadableError)
|
|
423
|
+
}
|
|
424
|
+
let _ = fnsCache->Utils.WeakMap.set(transactionSchema, fn)
|
|
425
|
+
fn
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
)(log)
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
let sanitizeUrl = (url: string) => {
|
|
433
|
+
// Regular expression requiring protocol and capturing hostname
|
|
434
|
+
// - (https?:\/\/) : Required http:// or https:// (capturing group)
|
|
435
|
+
// - ([^\/?]+) : Capture hostname (one or more characters that aren't / or ?)
|
|
436
|
+
// - .* : Match rest of the string
|
|
437
|
+
let regex = %re("/https?:\/\/([^\/?]+).*/")
|
|
438
|
+
|
|
439
|
+
switch Js.Re.exec_(regex, url) {
|
|
440
|
+
| Some(result) =>
|
|
441
|
+
switch Js.Re.captures(result)->Belt.Array.get(1) {
|
|
442
|
+
| Some(host) => host->Js.Nullable.toOption
|
|
443
|
+
| None => None
|
|
444
|
+
}
|
|
445
|
+
| None => None
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
type options = {
|
|
450
|
+
sourceFor: Source.sourceFor,
|
|
451
|
+
syncConfig: InternalConfig.sourceSync,
|
|
452
|
+
url: string,
|
|
453
|
+
chain: ChainMap.Chain.t,
|
|
454
|
+
contracts: array<Internal.evmContractConfig>,
|
|
455
|
+
eventRouter: EventRouter.t<Internal.evmEventConfig>,
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
let make = ({sourceFor, syncConfig, url, chain, contracts, eventRouter}: options): t => {
|
|
459
|
+
let urlHost = switch sanitizeUrl(url) {
|
|
460
|
+
| None =>
|
|
461
|
+
Js.Exn.raiseError(
|
|
462
|
+
`EE109: The RPC url "${url}" is incorrect format. The RPC url needs to start with either http:// or https://`,
|
|
463
|
+
)
|
|
464
|
+
| Some(host) => host
|
|
465
|
+
}
|
|
466
|
+
let name = `RPC (${urlHost})`
|
|
467
|
+
|
|
468
|
+
let provider = Ethers.JsonRpcProvider.make(~rpcUrl=url, ~chainId=chain->ChainMap.Chain.toChainId)
|
|
469
|
+
|
|
470
|
+
let getSelectionConfig = memoGetSelectionConfig(~chain)
|
|
471
|
+
|
|
472
|
+
let mutSuggestedBlockIntervals = Js.Dict.empty()
|
|
473
|
+
|
|
474
|
+
let transactionLoader = LazyLoader.make(
|
|
475
|
+
~loaderFn=transactionHash => provider->Ethers.JsonRpcProvider.getTransaction(~transactionHash),
|
|
476
|
+
~onError=(am, ~exn) => {
|
|
477
|
+
Logging.error({
|
|
478
|
+
"err": exn,
|
|
479
|
+
"msg": `EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in ${(am._retryDelayMillis / 1000)
|
|
480
|
+
->Belt.Int.toString} seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the "suggestedFix" in the metadata of this command`,
|
|
481
|
+
"source": name,
|
|
482
|
+
"chainId": chain->ChainMap.Chain.toChainId,
|
|
483
|
+
"metadata": {
|
|
484
|
+
{
|
|
485
|
+
"asyncTaskName": "transactionLoader: fetching transaction data - `getTransaction` rpc call",
|
|
486
|
+
"suggestedFix": "This likely means the RPC url you are using is not responding correctly. Please try another RPC endipoint.",
|
|
487
|
+
}
|
|
488
|
+
},
|
|
489
|
+
})
|
|
490
|
+
},
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
let blockLoader = LazyLoader.make(
|
|
494
|
+
~loaderFn=blockNumber =>
|
|
495
|
+
getKnownBlockWithBackoff(
|
|
496
|
+
~provider,
|
|
497
|
+
~sourceName=name,
|
|
498
|
+
~chain,
|
|
499
|
+
~backoffMsOnFailure=1000,
|
|
500
|
+
~blockNumber,
|
|
501
|
+
),
|
|
502
|
+
~onError=(am, ~exn) => {
|
|
503
|
+
Logging.error({
|
|
504
|
+
"err": exn,
|
|
505
|
+
"msg": `EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in ${(am._retryDelayMillis / 1000)
|
|
506
|
+
->Belt.Int.toString} seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the "suggestedFix" in the metadata of this command`,
|
|
507
|
+
"source": name,
|
|
508
|
+
"chainId": chain->ChainMap.Chain.toChainId,
|
|
509
|
+
"metadata": {
|
|
510
|
+
{
|
|
511
|
+
"asyncTaskName": "blockLoader: fetching block data - `getBlock` rpc call",
|
|
512
|
+
"suggestedFix": "This likely means the RPC url you are using is not responding correctly. Please try another RPC endipoint.",
|
|
513
|
+
}
|
|
514
|
+
},
|
|
515
|
+
})
|
|
516
|
+
},
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
let getEventBlockOrThrow = makeThrowingGetEventBlock(~getBlock=blockNumber =>
|
|
520
|
+
blockLoader->LazyLoader.get(blockNumber)
|
|
521
|
+
)
|
|
522
|
+
let getEventTransactionOrThrow = makeThrowingGetEventTransaction(
|
|
523
|
+
~getTransactionFields=Ethers.JsonRpcProvider.makeGetTransactionFields(
|
|
524
|
+
~getTransactionByHash=LazyLoader.get(transactionLoader, _),
|
|
525
|
+
),
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
let contractNameAbiMapping = Js.Dict.empty()
|
|
529
|
+
contracts->Belt.Array.forEach(contract => {
|
|
530
|
+
contractNameAbiMapping->Js.Dict.set(contract.name, contract.abi)
|
|
531
|
+
})
|
|
532
|
+
|
|
533
|
+
let getItemsOrThrow = async (
|
|
534
|
+
~fromBlock,
|
|
535
|
+
~toBlock,
|
|
536
|
+
~addressesByContractName,
|
|
537
|
+
~indexingContracts,
|
|
538
|
+
~currentBlockHeight,
|
|
539
|
+
~partitionId,
|
|
540
|
+
~selection: FetchState.selection,
|
|
541
|
+
~retry as _,
|
|
542
|
+
~logger as _,
|
|
543
|
+
) => {
|
|
544
|
+
let startFetchingBatchTimeRef = Hrtime.makeTimer()
|
|
545
|
+
|
|
546
|
+
let suggestedBlockInterval = switch mutSuggestedBlockIntervals->Utils.Dict.dangerouslyGetNonOption(
|
|
547
|
+
maxSuggestedBlockIntervalKey,
|
|
548
|
+
) {
|
|
549
|
+
| Some(maxSuggestedBlockInterval) => maxSuggestedBlockInterval
|
|
550
|
+
| None =>
|
|
551
|
+
mutSuggestedBlockIntervals
|
|
552
|
+
->Utils.Dict.dangerouslyGetNonOption(partitionId)
|
|
553
|
+
->Belt.Option.getWithDefault(syncConfig.initialBlockInterval)
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
// Always have a toBlock for an RPC worker
|
|
557
|
+
let toBlock = switch toBlock {
|
|
558
|
+
| Some(toBlock) => Pervasives.min(toBlock, currentBlockHeight)
|
|
559
|
+
| None => currentBlockHeight
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
let suggestedToBlock = Pervasives.min(fromBlock + suggestedBlockInterval - 1, toBlock)
|
|
563
|
+
//Defensively ensure we never query a target block below fromBlock
|
|
564
|
+
->Pervasives.max(fromBlock)
|
|
565
|
+
|
|
566
|
+
let firstBlockParentPromise =
|
|
567
|
+
fromBlock > 0
|
|
568
|
+
? blockLoader->LazyLoader.get(fromBlock - 1)->Promise.thenResolve(res => res->Some)
|
|
569
|
+
: Promise.resolve(None)
|
|
570
|
+
|
|
571
|
+
let {getLogSelectionOrThrow} = getSelectionConfig(selection)
|
|
572
|
+
let {addresses, topicQuery} = getLogSelectionOrThrow(~addressesByContractName)
|
|
573
|
+
|
|
574
|
+
let {logs, latestFetchedBlock} = await getNextPage(
|
|
575
|
+
~fromBlock,
|
|
576
|
+
~toBlock=suggestedToBlock,
|
|
577
|
+
~addresses,
|
|
578
|
+
~topicQuery,
|
|
579
|
+
~loadBlock=blockNumber => blockLoader->LazyLoader.get(blockNumber),
|
|
580
|
+
~syncConfig,
|
|
581
|
+
~provider,
|
|
582
|
+
~mutSuggestedBlockIntervals,
|
|
583
|
+
~partitionId,
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
let executedBlockInterval = suggestedToBlock - fromBlock + 1
|
|
587
|
+
|
|
588
|
+
// Increase the suggested block interval only when it was actually applied
|
|
589
|
+
// and we didn't query to a hard toBlock
|
|
590
|
+
// We also don't care about it when we have a hard max block interval
|
|
591
|
+
if (
|
|
592
|
+
executedBlockInterval >= suggestedBlockInterval &&
|
|
593
|
+
!(mutSuggestedBlockIntervals->Utils.Dict.has(maxSuggestedBlockIntervalKey))
|
|
594
|
+
) {
|
|
595
|
+
// Increase batch size going forward, but do not increase past a configured maximum
|
|
596
|
+
// See: https://en.wikipedia.org/wiki/Additive_increase/multiplicative_decrease
|
|
597
|
+
mutSuggestedBlockIntervals->Js.Dict.set(
|
|
598
|
+
partitionId,
|
|
599
|
+
Pervasives.min(
|
|
600
|
+
executedBlockInterval + syncConfig.accelerationAdditive,
|
|
601
|
+
syncConfig.intervalCeiling,
|
|
602
|
+
),
|
|
603
|
+
)
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
let parsedQueueItems =
|
|
607
|
+
await logs
|
|
608
|
+
->Belt.Array.keepMap(log => {
|
|
609
|
+
let topic0 = log.topics->Js.Array2.unsafe_get(0)
|
|
610
|
+
switch eventRouter->EventRouter.get(
|
|
611
|
+
~tag=EventRouter.getEvmEventId(
|
|
612
|
+
~sighash=topic0->EvmTypes.Hex.toString,
|
|
613
|
+
~topicCount=log.topics->Array.length,
|
|
614
|
+
),
|
|
615
|
+
~indexingContracts,
|
|
616
|
+
~contractAddress=log.address,
|
|
617
|
+
~blockNumber=log.blockNumber,
|
|
618
|
+
) {
|
|
619
|
+
| None => None //ignore events that aren't registered
|
|
620
|
+
| Some(eventConfig) =>
|
|
621
|
+
let blockNumber = log.blockNumber
|
|
622
|
+
let logIndex = log.logIndex
|
|
623
|
+
Some(
|
|
624
|
+
(
|
|
625
|
+
async () => {
|
|
626
|
+
let (block, transaction) = try await Promise.all2((
|
|
627
|
+
log->getEventBlockOrThrow,
|
|
628
|
+
log->getEventTransactionOrThrow(~transactionSchema=eventConfig.transactionSchema),
|
|
629
|
+
)) catch {
|
|
630
|
+
// Promise.catch won't work here, because the error
|
|
631
|
+
// might be thrown before a microtask is created
|
|
632
|
+
| exn =>
|
|
633
|
+
raise(
|
|
634
|
+
Source.GetItemsError(
|
|
635
|
+
FailedGettingFieldSelection({
|
|
636
|
+
message: "Failed getting selected fields. Please double-check your RPC provider returns correct data.",
|
|
637
|
+
exn,
|
|
638
|
+
blockNumber,
|
|
639
|
+
logIndex,
|
|
640
|
+
}),
|
|
641
|
+
),
|
|
642
|
+
)
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
let decodedEvent = try contractNameAbiMapping->Viem.parseLogOrThrow(
|
|
646
|
+
~contractName=eventConfig.contractName,
|
|
647
|
+
~topics=log.topics,
|
|
648
|
+
~data=log.data,
|
|
649
|
+
) catch {
|
|
650
|
+
| exn =>
|
|
651
|
+
raise(
|
|
652
|
+
Source.GetItemsError(
|
|
653
|
+
FailedParsingItems({
|
|
654
|
+
message: "Failed to parse event with viem, please double-check your ABI.",
|
|
655
|
+
exn,
|
|
656
|
+
blockNumber,
|
|
657
|
+
logIndex,
|
|
658
|
+
}),
|
|
659
|
+
),
|
|
660
|
+
)
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
(
|
|
664
|
+
{
|
|
665
|
+
eventConfig: (eventConfig :> Internal.eventConfig),
|
|
666
|
+
timestamp: block.timestamp,
|
|
667
|
+
blockNumber: block.number,
|
|
668
|
+
chain,
|
|
669
|
+
logIndex: log.logIndex,
|
|
670
|
+
event: {
|
|
671
|
+
chainId: chain->ChainMap.Chain.toChainId,
|
|
672
|
+
params: decodedEvent.args,
|
|
673
|
+
transaction,
|
|
674
|
+
// Unreliably expect that the Ethers block fields match the types in HyperIndex
|
|
675
|
+
// I assume this is wrong in some cases, so we need to fix it in the future
|
|
676
|
+
block: block->(
|
|
677
|
+
Utils.magic: Ethers.JsonRpcProvider.block => Internal.eventBlock
|
|
678
|
+
),
|
|
679
|
+
srcAddress: log.address,
|
|
680
|
+
logIndex: log.logIndex,
|
|
681
|
+
}->Internal.fromGenericEvent,
|
|
682
|
+
}: Internal.eventItem
|
|
683
|
+
)
|
|
684
|
+
}
|
|
685
|
+
)(),
|
|
686
|
+
)
|
|
687
|
+
}
|
|
688
|
+
})
|
|
689
|
+
->Promise.all
|
|
690
|
+
|
|
691
|
+
let optFirstBlockParent = await firstBlockParentPromise
|
|
692
|
+
|
|
693
|
+
let totalTimeElapsed =
|
|
694
|
+
startFetchingBatchTimeRef->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis
|
|
695
|
+
|
|
696
|
+
let reorgGuard: ReorgDetection.reorgGuard = {
|
|
697
|
+
prevRangeLastBlock: optFirstBlockParent->Option.map(b => {
|
|
698
|
+
ReorgDetection.blockNumber: b.number,
|
|
699
|
+
blockHash: b.hash,
|
|
700
|
+
}),
|
|
701
|
+
rangeLastBlock: {
|
|
702
|
+
blockNumber: latestFetchedBlock.number,
|
|
703
|
+
blockHash: latestFetchedBlock.hash,
|
|
704
|
+
},
|
|
705
|
+
}
|
|
706
|
+
|
|
707
|
+
{
|
|
708
|
+
latestFetchedBlockTimestamp: latestFetchedBlock.timestamp,
|
|
709
|
+
latestFetchedBlockNumber: latestFetchedBlock.number,
|
|
710
|
+
parsedQueueItems,
|
|
711
|
+
stats: {
|
|
712
|
+
totalTimeElapsed: totalTimeElapsed,
|
|
713
|
+
},
|
|
714
|
+
currentBlockHeight,
|
|
715
|
+
reorgGuard,
|
|
716
|
+
fromBlockQueried: fromBlock,
|
|
717
|
+
}
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
let getBlockHashes = (~blockNumbers, ~logger as _currentlyUnusedLogger) => {
|
|
721
|
+
blockNumbers
|
|
722
|
+
->Array.map(blockNum => blockLoader->LazyLoader.get(blockNum))
|
|
723
|
+
->Promise.all
|
|
724
|
+
->Promise.thenResolve(blocks => {
|
|
725
|
+
blocks
|
|
726
|
+
->Array.map((b): ReorgDetection.blockDataWithTimestamp => {
|
|
727
|
+
blockNumber: b.number,
|
|
728
|
+
blockHash: b.hash,
|
|
729
|
+
blockTimestamp: b.timestamp,
|
|
730
|
+
})
|
|
731
|
+
->Ok
|
|
732
|
+
})
|
|
733
|
+
->Promise.catch(exn => exn->Error->Promise.resolve)
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
let client = Rest.client(url)
|
|
737
|
+
|
|
738
|
+
{
|
|
739
|
+
name,
|
|
740
|
+
sourceFor,
|
|
741
|
+
chain,
|
|
742
|
+
poweredByHyperSync: false,
|
|
743
|
+
pollingInterval: 1000,
|
|
744
|
+
getBlockHashes,
|
|
745
|
+
getHeightOrThrow: () => Rpc.GetBlockHeight.route->Rest.fetch((), ~client),
|
|
746
|
+
getItemsOrThrow,
|
|
747
|
+
}
|
|
748
|
+
}
|