envio 2.31.1-rc.0 → 2.31.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/ReorgDetection.res +10 -32
- package/src/ReorgDetection.res.js +13 -28
- package/src/sources/HyperFuelSource.res +3 -7
- package/src/sources/HyperFuelSource.res.js +2 -3
- package/src/sources/HyperSyncSource.res +3 -7
- package/src/sources/HyperSyncSource.res.js +2 -3
- package/src/sources/RpcSource.res +33 -27
- package/src/sources/RpcSource.res.js +33 -23
- package/src/sources/Source.res +1 -1
- package/src/sources/SourceManager.res +50 -11
- package/src/sources/SourceManager.res.js +91 -57
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.31.1
|
|
3
|
+
"version": "v2.31.1",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.31.1
|
|
29
|
-
"envio-linux-arm64": "v2.31.1
|
|
30
|
-
"envio-darwin-x64": "v2.31.1
|
|
31
|
-
"envio-darwin-arm64": "v2.31.1
|
|
28
|
+
"envio-linux-x64": "v2.31.1",
|
|
29
|
+
"envio-linux-arm64": "v2.31.1",
|
|
30
|
+
"envio-darwin-x64": "v2.31.1",
|
|
31
|
+
"envio-darwin-arm64": "v2.31.1"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.6",
|
package/src/ReorgDetection.res
CHANGED
|
@@ -51,18 +51,12 @@ type t = {
|
|
|
51
51
|
// A hash map of recent blockdata by block number to make comparison checks
|
|
52
52
|
// for reorgs.
|
|
53
53
|
dataByBlockNumber: dict<blockData>,
|
|
54
|
-
// The latest block which detected a reorg
|
|
55
|
-
// and should never be valid.
|
|
56
|
-
// We keep track of this to avoid responses
|
|
57
|
-
// with the stale data from other data-source instances.
|
|
58
|
-
detectedReorgBlock: option<blockData>,
|
|
59
54
|
}
|
|
60
55
|
|
|
61
56
|
let make = (
|
|
62
57
|
~chainReorgCheckpoints: array<Internal.reorgCheckpoint>,
|
|
63
58
|
~maxReorgDepth,
|
|
64
59
|
~shouldRollbackOnReorg,
|
|
65
|
-
~detectedReorgBlock=?,
|
|
66
60
|
) => {
|
|
67
61
|
let dataByBlockNumber = Js.Dict.empty()
|
|
68
62
|
|
|
@@ -80,7 +74,6 @@ let make = (
|
|
|
80
74
|
shouldRollbackOnReorg,
|
|
81
75
|
maxReorgDepth,
|
|
82
76
|
dataByBlockNumber,
|
|
83
|
-
detectedReorgBlock,
|
|
84
77
|
}
|
|
85
78
|
}
|
|
86
79
|
|
|
@@ -149,10 +142,7 @@ let registerReorgGuard = (
|
|
|
149
142
|
switch maybeReorgDetected {
|
|
150
143
|
| Some(reorgDetected) => (
|
|
151
144
|
shouldRollbackOnReorg
|
|
152
|
-
?
|
|
153
|
-
...self,
|
|
154
|
-
detectedReorgBlock: Some(reorgDetected.scannedBlock),
|
|
155
|
-
}
|
|
145
|
+
? self
|
|
156
146
|
: make(~chainReorgCheckpoints=[], ~maxReorgDepth, ~shouldRollbackOnReorg),
|
|
157
147
|
ReorgDetected(reorgDetected),
|
|
158
148
|
)
|
|
@@ -174,7 +164,6 @@ let registerReorgGuard = (
|
|
|
174
164
|
{
|
|
175
165
|
maxReorgDepth,
|
|
176
166
|
dataByBlockNumber: dataByBlockNumberCopyInThreshold,
|
|
177
|
-
detectedReorgBlock: None,
|
|
178
167
|
shouldRollbackOnReorg,
|
|
179
168
|
},
|
|
180
169
|
NoReorg,
|
|
@@ -188,43 +177,33 @@ Returns the latest block number which matches block number and hashes in the pro
|
|
|
188
177
|
If it doesn't exist in the reorg threshold it returns NotFound
|
|
189
178
|
*/
|
|
190
179
|
let getLatestValidScannedBlock = (
|
|
191
|
-
|
|
180
|
+
reorgDetection: t,
|
|
192
181
|
~blockNumbersAndHashes: array<blockDataWithTimestamp>,
|
|
193
|
-
~currentBlockHeight,
|
|
194
182
|
) => {
|
|
195
183
|
let verifiedDataByBlockNumber = Js.Dict.empty()
|
|
196
184
|
for idx in 0 to blockNumbersAndHashes->Array.length - 1 {
|
|
197
185
|
let blockData = blockNumbersAndHashes->Array.getUnsafe(idx)
|
|
198
186
|
verifiedDataByBlockNumber->Js.Dict.set(blockData.blockNumber->Int.toString, blockData)
|
|
199
187
|
}
|
|
200
|
-
|
|
201
|
-
let dataByBlockNumber = self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight)
|
|
202
188
|
// Js engine automatically orders numeric object keys
|
|
203
|
-
let ascBlockNumberKeys =
|
|
189
|
+
let ascBlockNumberKeys = verifiedDataByBlockNumber->Js.Dict.keys
|
|
204
190
|
|
|
205
191
|
let getPrevScannedBlockNumber = idx =>
|
|
206
192
|
ascBlockNumberKeys
|
|
207
193
|
->Belt.Array.get(idx - 1)
|
|
208
|
-
->Option.
|
|
209
|
-
|
|
210
|
-
switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(key) {
|
|
211
|
-
| Some(v) => Some(v.blockNumber)
|
|
212
|
-
| None => None
|
|
213
|
-
}
|
|
194
|
+
->Option.map(key => {
|
|
195
|
+
(verifiedDataByBlockNumber->Js.Dict.unsafeGet(key)).blockNumber
|
|
214
196
|
})
|
|
215
197
|
|
|
216
198
|
let rec loop = idx => {
|
|
217
199
|
switch ascBlockNumberKeys->Belt.Array.get(idx) {
|
|
218
200
|
| Some(blockNumberKey) =>
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
`Unexpected case. Couldn't find verified hash for block number ${blockNumberKey}`,
|
|
224
|
-
)
|
|
225
|
-
| Some(verifiedBlockData) if verifiedBlockData.blockHash === scannedBlock.blockHash =>
|
|
201
|
+
switch reorgDetection.dataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(blockNumberKey) {
|
|
202
|
+
| Some(scannedBlock)
|
|
203
|
+
if (verifiedDataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey)).blockHash ===
|
|
204
|
+
scannedBlock.blockHash =>
|
|
226
205
|
loop(idx + 1)
|
|
227
|
-
|
|
|
206
|
+
| _ => getPrevScannedBlockNumber(idx)
|
|
228
207
|
}
|
|
229
208
|
| None => getPrevScannedBlockNumber(idx)
|
|
230
209
|
}
|
|
@@ -265,7 +244,6 @@ let rollbackToValidBlockNumber = (
|
|
|
265
244
|
{
|
|
266
245
|
maxReorgDepth,
|
|
267
246
|
dataByBlockNumber: newDataByBlockNumber,
|
|
268
|
-
detectedReorgBlock: None,
|
|
269
247
|
shouldRollbackOnReorg,
|
|
270
248
|
}
|
|
271
249
|
}
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
// Generated by ReScript, PLEASE EDIT WITH CARE
|
|
2
2
|
'use strict';
|
|
3
3
|
|
|
4
|
-
var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
5
4
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
6
5
|
var Belt_Option = require("rescript/lib/js/belt_Option.js");
|
|
7
6
|
|
|
@@ -17,7 +16,7 @@ function reorgDetectedToLogParams(reorgDetected, shouldRollbackOnReorg) {
|
|
|
17
16
|
};
|
|
18
17
|
}
|
|
19
18
|
|
|
20
|
-
function make(chainReorgCheckpoints, maxReorgDepth, shouldRollbackOnReorg
|
|
19
|
+
function make(chainReorgCheckpoints, maxReorgDepth, shouldRollbackOnReorg) {
|
|
21
20
|
var dataByBlockNumber = {};
|
|
22
21
|
Belt_Array.forEach(chainReorgCheckpoints, (function (block) {
|
|
23
22
|
dataByBlockNumber[block.block_number] = {
|
|
@@ -28,8 +27,7 @@ function make(chainReorgCheckpoints, maxReorgDepth, shouldRollbackOnReorg, detec
|
|
|
28
27
|
return {
|
|
29
28
|
shouldRollbackOnReorg: shouldRollbackOnReorg,
|
|
30
29
|
maxReorgDepth: maxReorgDepth,
|
|
31
|
-
dataByBlockNumber: dataByBlockNumber
|
|
32
|
-
detectedReorgBlock: detectedReorgBlock
|
|
30
|
+
dataByBlockNumber: dataByBlockNumber
|
|
33
31
|
};
|
|
34
32
|
}
|
|
35
33
|
|
|
@@ -80,12 +78,7 @@ function registerReorgGuard(self, reorgGuard, currentBlockHeight) {
|
|
|
80
78
|
}
|
|
81
79
|
if (maybeReorgDetected !== undefined) {
|
|
82
80
|
return [
|
|
83
|
-
shouldRollbackOnReorg ? (
|
|
84
|
-
shouldRollbackOnReorg: self.shouldRollbackOnReorg,
|
|
85
|
-
maxReorgDepth: self.maxReorgDepth,
|
|
86
|
-
dataByBlockNumber: self.dataByBlockNumber,
|
|
87
|
-
detectedReorgBlock: maybeReorgDetected.scannedBlock
|
|
88
|
-
}) : make([], maxReorgDepth, shouldRollbackOnReorg, undefined),
|
|
81
|
+
shouldRollbackOnReorg ? self : make([], maxReorgDepth, shouldRollbackOnReorg),
|
|
89
82
|
{
|
|
90
83
|
TAG: "ReorgDetected",
|
|
91
84
|
_0: maybeReorgDetected
|
|
@@ -100,29 +93,23 @@ function registerReorgGuard(self, reorgGuard, currentBlockHeight) {
|
|
|
100
93
|
{
|
|
101
94
|
shouldRollbackOnReorg: shouldRollbackOnReorg,
|
|
102
95
|
maxReorgDepth: maxReorgDepth,
|
|
103
|
-
dataByBlockNumber: dataByBlockNumberCopyInThreshold
|
|
104
|
-
detectedReorgBlock: undefined
|
|
96
|
+
dataByBlockNumber: dataByBlockNumberCopyInThreshold
|
|
105
97
|
},
|
|
106
98
|
"NoReorg"
|
|
107
99
|
];
|
|
108
100
|
}
|
|
109
101
|
}
|
|
110
102
|
|
|
111
|
-
function getLatestValidScannedBlock(
|
|
103
|
+
function getLatestValidScannedBlock(reorgDetection, blockNumbersAndHashes) {
|
|
112
104
|
var verifiedDataByBlockNumber = {};
|
|
113
105
|
for(var idx = 0 ,idx_finish = blockNumbersAndHashes.length; idx < idx_finish; ++idx){
|
|
114
106
|
var blockData = blockNumbersAndHashes[idx];
|
|
115
107
|
verifiedDataByBlockNumber[String(blockData.blockNumber)] = blockData;
|
|
116
108
|
}
|
|
117
|
-
var
|
|
118
|
-
var ascBlockNumberKeys = Object.keys(dataByBlockNumber);
|
|
109
|
+
var ascBlockNumberKeys = Object.keys(verifiedDataByBlockNumber);
|
|
119
110
|
var getPrevScannedBlockNumber = function (idx) {
|
|
120
|
-
return Belt_Option.
|
|
121
|
-
|
|
122
|
-
if (v !== undefined) {
|
|
123
|
-
return v.blockNumber;
|
|
124
|
-
}
|
|
125
|
-
|
|
111
|
+
return Belt_Option.map(Belt_Array.get(ascBlockNumberKeys, idx - 1 | 0), (function (key) {
|
|
112
|
+
return verifiedDataByBlockNumber[key].blockNumber;
|
|
126
113
|
}));
|
|
127
114
|
};
|
|
128
115
|
var _idx = 0;
|
|
@@ -132,12 +119,11 @@ function getLatestValidScannedBlock(self, blockNumbersAndHashes, currentBlockHei
|
|
|
132
119
|
if (blockNumberKey === undefined) {
|
|
133
120
|
return getPrevScannedBlockNumber(idx$1);
|
|
134
121
|
}
|
|
135
|
-
var scannedBlock = dataByBlockNumber[blockNumberKey];
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
return Js_exn.raiseError("Unexpected case. Couldn't find verified hash for block number " + blockNumberKey);
|
|
122
|
+
var scannedBlock = reorgDetection.dataByBlockNumber[blockNumberKey];
|
|
123
|
+
if (scannedBlock === undefined) {
|
|
124
|
+
return getPrevScannedBlockNumber(idx$1);
|
|
139
125
|
}
|
|
140
|
-
if (
|
|
126
|
+
if (verifiedDataByBlockNumber[blockNumberKey].blockHash !== scannedBlock.blockHash) {
|
|
141
127
|
return getPrevScannedBlockNumber(idx$1);
|
|
142
128
|
}
|
|
143
129
|
_idx = idx$1 + 1 | 0;
|
|
@@ -170,8 +156,7 @@ function rollbackToValidBlockNumber(param, blockNumber) {
|
|
|
170
156
|
return {
|
|
171
157
|
shouldRollbackOnReorg: param.shouldRollbackOnReorg,
|
|
172
158
|
maxReorgDepth: param.maxReorgDepth,
|
|
173
|
-
dataByBlockNumber: newDataByBlockNumber
|
|
174
|
-
detectedReorgBlock: undefined
|
|
159
|
+
dataByBlockNumber: newDataByBlockNumber
|
|
175
160
|
};
|
|
176
161
|
}
|
|
177
162
|
|
|
@@ -258,14 +258,10 @@ let make = ({chain, endpointUrl}: options): t => {
|
|
|
258
258
|
backoffMillis,
|
|
259
259
|
})
|
|
260
260
|
| UnexpectedMissingParams({missingParams}) =>
|
|
261
|
-
|
|
262
|
-
message: `
|
|
263
|
-
",",
|
|
261
|
+
ImpossibleForTheQuery({
|
|
262
|
+
message: `Source returned invalid data with missing required fields: ${missingParams->Js.Array2.joinWith(
|
|
263
|
+
", ",
|
|
264
264
|
)}`,
|
|
265
|
-
backoffMillis: switch retry {
|
|
266
|
-
| 0 => 1000
|
|
267
|
-
| _ => 4000 * retry
|
|
268
|
-
},
|
|
269
265
|
})
|
|
270
266
|
},
|
|
271
267
|
}),
|
|
@@ -213,9 +213,8 @@ function make(param) {
|
|
|
213
213
|
};
|
|
214
214
|
} else {
|
|
215
215
|
tmp = {
|
|
216
|
-
TAG: "
|
|
217
|
-
message: "
|
|
218
|
-
backoffMillis: retry !== 0 ? Math.imul(4000, retry) : 1000
|
|
216
|
+
TAG: "ImpossibleForTheQuery",
|
|
217
|
+
message: "Source returned invalid data with missing required fields: " + error$1.missingParams.join(", ")
|
|
219
218
|
};
|
|
220
219
|
}
|
|
221
220
|
throw {
|
|
@@ -288,14 +288,10 @@ let make = (
|
|
|
288
288
|
backoffMillis,
|
|
289
289
|
})
|
|
290
290
|
| UnexpectedMissingParams({missingParams}) =>
|
|
291
|
-
|
|
292
|
-
message: `
|
|
293
|
-
",",
|
|
291
|
+
ImpossibleForTheQuery({
|
|
292
|
+
message: `Source returned invalid data with missing required fields: ${missingParams->Js.Array2.joinWith(
|
|
293
|
+
", ",
|
|
294
294
|
)}`,
|
|
295
|
-
backoffMillis: switch retry {
|
|
296
|
-
| 0 => 1000
|
|
297
|
-
| _ => 4000 * retry
|
|
298
|
-
},
|
|
299
295
|
})
|
|
300
296
|
},
|
|
301
297
|
}),
|
|
@@ -214,9 +214,8 @@ function make(param) {
|
|
|
214
214
|
};
|
|
215
215
|
} else {
|
|
216
216
|
tmp = {
|
|
217
|
-
TAG: "
|
|
218
|
-
message: "
|
|
219
|
-
backoffMillis: retry !== 0 ? Math.imul(4000, retry) : 1000
|
|
217
|
+
TAG: "ImpossibleForTheQuery",
|
|
218
|
+
message: "Source returned invalid data with missing required fields: " + error$1.missingParams.join(", ")
|
|
220
219
|
};
|
|
221
220
|
}
|
|
222
221
|
throw {
|
|
@@ -503,24 +503,26 @@ let make = (
|
|
|
503
503
|
|
|
504
504
|
let mutSuggestedBlockIntervals = Js.Dict.empty()
|
|
505
505
|
|
|
506
|
-
let
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
506
|
+
let makeTransactionLoader = () =>
|
|
507
|
+
LazyLoader.make(
|
|
508
|
+
~loaderFn=transactionHash =>
|
|
509
|
+
provider->Ethers.JsonRpcProvider.getTransaction(~transactionHash),
|
|
510
|
+
~onError=(am, ~exn) => {
|
|
511
|
+
Logging.error({
|
|
512
|
+
"err": exn->Utils.prettifyExn,
|
|
513
|
+
"msg": `EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in ${(am._retryDelayMillis / 1000)
|
|
514
|
+
->Belt.Int.toString} seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the "suggestedFix" in the metadata of this command`,
|
|
515
|
+
"source": name,
|
|
516
|
+
"chainId": chain->ChainMap.Chain.toChainId,
|
|
517
|
+
"metadata": {
|
|
518
|
+
{
|
|
519
|
+
"asyncTaskName": "transactionLoader: fetching transaction data - `getTransaction` rpc call",
|
|
520
|
+
"suggestedFix": "This likely means the RPC url you are using is not responding correctly. Please try another RPC endipoint.",
|
|
521
|
+
}
|
|
522
|
+
},
|
|
523
|
+
})
|
|
524
|
+
},
|
|
525
|
+
)
|
|
524
526
|
|
|
525
527
|
let makeBlockLoader = () =>
|
|
526
528
|
LazyLoader.make(
|
|
@@ -551,13 +553,14 @@ let make = (
|
|
|
551
553
|
)
|
|
552
554
|
|
|
553
555
|
let blockLoader = ref(makeBlockLoader())
|
|
556
|
+
let transactionLoader = ref(makeTransactionLoader())
|
|
554
557
|
|
|
555
558
|
let getEventBlockOrThrow = makeThrowingGetEventBlock(~getBlock=blockNumber =>
|
|
556
559
|
blockLoader.contents->LazyLoader.get(blockNumber)
|
|
557
560
|
)
|
|
558
561
|
let getEventTransactionOrThrow = makeThrowingGetEventTransaction(
|
|
559
562
|
~getTransactionFields=Ethers.JsonRpcProvider.makeGetTransactionFields(
|
|
560
|
-
~getTransactionByHash=LazyLoader.get(transactionLoader, _),
|
|
563
|
+
~getTransactionByHash=LazyLoader.get(transactionLoader.contents, _),
|
|
561
564
|
~lowercaseAddresses,
|
|
562
565
|
),
|
|
563
566
|
)
|
|
@@ -675,11 +678,12 @@ let make = (
|
|
|
675
678
|
| exn =>
|
|
676
679
|
raise(
|
|
677
680
|
Source.GetItemsError(
|
|
678
|
-
|
|
679
|
-
message: "Failed to parse events using hypersync client decoder. Please double-check your ABI.",
|
|
681
|
+
FailedGettingItems({
|
|
680
682
|
exn,
|
|
681
|
-
|
|
682
|
-
|
|
683
|
+
attemptedToBlock: toBlock,
|
|
684
|
+
retry: ImpossibleForTheQuery({
|
|
685
|
+
message: "Failed to parse events using hypersync client decoder. Please double-check your ABI.",
|
|
686
|
+
}),
|
|
683
687
|
}),
|
|
684
688
|
),
|
|
685
689
|
)
|
|
@@ -809,11 +813,12 @@ let make = (
|
|
|
809
813
|
| exn =>
|
|
810
814
|
raise(
|
|
811
815
|
Source.GetItemsError(
|
|
812
|
-
|
|
813
|
-
message: "Failed to parse event with viem, please double-check your ABI.",
|
|
816
|
+
FailedGettingItems({
|
|
814
817
|
exn,
|
|
815
|
-
|
|
816
|
-
|
|
818
|
+
attemptedToBlock: toBlock,
|
|
819
|
+
retry: ImpossibleForTheQuery({
|
|
820
|
+
message: `Failed to parse event with viem, please double-check your ABI. Block number: ${blockNumber->Int.toString}, log index: ${logIndex->Int.toString}`,
|
|
821
|
+
}),
|
|
817
822
|
}),
|
|
818
823
|
),
|
|
819
824
|
)
|
|
@@ -880,6 +885,7 @@ let make = (
|
|
|
880
885
|
// This is important, since we call this
|
|
881
886
|
// function when a reorg is detected
|
|
882
887
|
blockLoader := makeBlockLoader()
|
|
888
|
+
transactionLoader := makeTransactionLoader()
|
|
883
889
|
|
|
884
890
|
blockNumbers
|
|
885
891
|
->Array.map(blockNum => blockLoader.contents->LazyLoader.get(blockNum))
|
|
@@ -519,20 +519,22 @@ function make(param) {
|
|
|
519
519
|
var provider = Ethers.JsonRpcProvider.make(url, chain);
|
|
520
520
|
var getSelectionConfig = memoGetSelectionConfig(chain);
|
|
521
521
|
var mutSuggestedBlockIntervals = {};
|
|
522
|
-
var
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
522
|
+
var makeTransactionLoader = function () {
|
|
523
|
+
return LazyLoader.make((function (transactionHash) {
|
|
524
|
+
return provider.getTransaction(transactionHash);
|
|
525
|
+
}), (function (am, exn) {
|
|
526
|
+
Logging.error({
|
|
527
|
+
err: Utils.prettifyExn(exn),
|
|
528
|
+
msg: "EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in " + String(am._retryDelayMillis / 1000 | 0) + " seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the \"suggestedFix\" in the metadata of this command",
|
|
529
|
+
source: name,
|
|
530
|
+
chainId: chain,
|
|
531
|
+
metadata: {
|
|
532
|
+
asyncTaskName: "transactionLoader: fetching transaction data - `getTransaction` rpc call",
|
|
533
|
+
suggestedFix: "This likely means the RPC url you are using is not responding correctly. Please try another RPC endipoint."
|
|
534
|
+
}
|
|
535
|
+
});
|
|
536
|
+
}), undefined, undefined, undefined, undefined);
|
|
537
|
+
};
|
|
536
538
|
var makeBlockLoader = function () {
|
|
537
539
|
return LazyLoader.make((function (blockNumber) {
|
|
538
540
|
return getKnownBlockWithBackoff(provider, name, chain, blockNumber, 1000, lowercaseAddresses);
|
|
@@ -552,11 +554,14 @@ function make(param) {
|
|
|
552
554
|
var blockLoader = {
|
|
553
555
|
contents: makeBlockLoader()
|
|
554
556
|
};
|
|
557
|
+
var transactionLoader = {
|
|
558
|
+
contents: makeTransactionLoader()
|
|
559
|
+
};
|
|
555
560
|
var getEventBlockOrThrow = makeThrowingGetEventBlock(function (blockNumber) {
|
|
556
561
|
return LazyLoader.get(blockLoader.contents, blockNumber);
|
|
557
562
|
});
|
|
558
563
|
var getEventTransactionOrThrow = makeThrowingGetEventTransaction(Ethers.JsonRpcProvider.makeGetTransactionFields((function (__x) {
|
|
559
|
-
return LazyLoader.get(transactionLoader, __x);
|
|
564
|
+
return LazyLoader.get(transactionLoader.contents, __x);
|
|
560
565
|
}), lowercaseAddresses));
|
|
561
566
|
var contractNameAbiMapping = {};
|
|
562
567
|
Belt_Array.forEach(param.contracts, (function (contract) {
|
|
@@ -632,11 +637,13 @@ function make(param) {
|
|
|
632
637
|
throw {
|
|
633
638
|
RE_EXN_ID: Source.GetItemsError,
|
|
634
639
|
_1: {
|
|
635
|
-
TAG: "
|
|
640
|
+
TAG: "FailedGettingItems",
|
|
636
641
|
exn: exn,
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
642
|
+
attemptedToBlock: toBlock$1,
|
|
643
|
+
retry: {
|
|
644
|
+
TAG: "ImpossibleForTheQuery",
|
|
645
|
+
message: "Failed to parse events using hypersync client decoder. Please double-check your ABI."
|
|
646
|
+
}
|
|
640
647
|
},
|
|
641
648
|
Error: new Error()
|
|
642
649
|
};
|
|
@@ -732,11 +739,13 @@ function make(param) {
|
|
|
732
739
|
throw {
|
|
733
740
|
RE_EXN_ID: Source.GetItemsError,
|
|
734
741
|
_1: {
|
|
735
|
-
TAG: "
|
|
742
|
+
TAG: "FailedGettingItems",
|
|
736
743
|
exn: exn$1,
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
744
|
+
attemptedToBlock: toBlock$1,
|
|
745
|
+
retry: {
|
|
746
|
+
TAG: "ImpossibleForTheQuery",
|
|
747
|
+
message: "Failed to parse event with viem, please double-check your ABI. Block number: " + String(blockNumber) + ", log index: " + String(logIndex)
|
|
748
|
+
}
|
|
740
749
|
},
|
|
741
750
|
Error: new Error()
|
|
742
751
|
};
|
|
@@ -790,6 +799,7 @@ function make(param) {
|
|
|
790
799
|
};
|
|
791
800
|
var getBlockHashes = function (blockNumbers, _currentlyUnusedLogger) {
|
|
792
801
|
blockLoader.contents = makeBlockLoader();
|
|
802
|
+
transactionLoader.contents = makeTransactionLoader();
|
|
793
803
|
return $$Promise.$$catch(Promise.all(Belt_Array.map(blockNumbers, (function (blockNum) {
|
|
794
804
|
return LazyLoader.get(blockLoader.contents, blockNum);
|
|
795
805
|
}))).then(function (blocks) {
|
package/src/sources/Source.res
CHANGED
|
@@ -23,11 +23,11 @@ type blockRangeFetchResponse = {
|
|
|
23
23
|
type getItemsRetry =
|
|
24
24
|
| WithSuggestedToBlock({toBlock: int})
|
|
25
25
|
| WithBackoff({message: string, backoffMillis: int})
|
|
26
|
+
| ImpossibleForTheQuery({message: string})
|
|
26
27
|
|
|
27
28
|
type getItemsError =
|
|
28
29
|
| UnsupportedSelection({message: string})
|
|
29
30
|
| FailedGettingFieldSelection({exn: exn, blockNumber: int, logIndex: int, message: string})
|
|
30
|
-
| FailedParsingItems({exn: exn, blockNumber: int, logIndex: int, message: string})
|
|
31
31
|
| FailedGettingItems({exn: exn, attemptedToBlock: int, retry: getItemsRetry})
|
|
32
32
|
|
|
33
33
|
exception GetItemsError(getItemsError)
|
|
@@ -304,6 +304,7 @@ let getNextSyncSource = (
|
|
|
304
304
|
sourceManager,
|
|
305
305
|
// This is needed to include the Fallback source to rotation
|
|
306
306
|
~initialSource,
|
|
307
|
+
~currentSource,
|
|
307
308
|
// After multiple failures start returning fallback sources as well
|
|
308
309
|
// But don't try it when main sync sources fail because of invalid configuration
|
|
309
310
|
// note: The logic might be changed in the future
|
|
@@ -315,7 +316,7 @@ let getNextSyncSource = (
|
|
|
315
316
|
let hasActive = ref(false)
|
|
316
317
|
|
|
317
318
|
sourceManager.sources->Utils.Set.forEach(source => {
|
|
318
|
-
if source ===
|
|
319
|
+
if source === currentSource {
|
|
319
320
|
hasActive := true
|
|
320
321
|
} else if (
|
|
321
322
|
switch source.sourceFor {
|
|
@@ -332,7 +333,7 @@ let getNextSyncSource = (
|
|
|
332
333
|
| None =>
|
|
333
334
|
switch before->Array.get(0) {
|
|
334
335
|
| Some(s) => s
|
|
335
|
-
| None =>
|
|
336
|
+
| None => currentSource
|
|
336
337
|
}
|
|
337
338
|
}
|
|
338
339
|
}
|
|
@@ -349,9 +350,11 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
349
350
|
let responseRef = ref(None)
|
|
350
351
|
let retryRef = ref(0)
|
|
351
352
|
let initialSource = sourceManager.activeSource
|
|
353
|
+
let sourceRef = ref(initialSource)
|
|
354
|
+
let shouldUpdateActiveSource = ref(false)
|
|
352
355
|
|
|
353
356
|
while responseRef.contents->Option.isNone {
|
|
354
|
-
let source =
|
|
357
|
+
let source = sourceRef.contents
|
|
355
358
|
let toBlock = toBlockRef.contents
|
|
356
359
|
let retry = retryRef.contents
|
|
357
360
|
|
|
@@ -391,9 +394,8 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
391
394
|
| Source.GetItemsError(error) =>
|
|
392
395
|
switch error {
|
|
393
396
|
| UnsupportedSelection(_)
|
|
394
|
-
| FailedGettingFieldSelection(_)
|
|
395
|
-
|
|
396
|
-
let nextSource = sourceManager->getNextSyncSource(~initialSource)
|
|
397
|
+
| FailedGettingFieldSelection(_) => {
|
|
398
|
+
let nextSource = sourceManager->getNextSyncSource(~initialSource, ~currentSource=source)
|
|
397
399
|
|
|
398
400
|
// These errors are impossible to recover, so we delete the source
|
|
399
401
|
// from sourceManager so it's not attempted anymore
|
|
@@ -404,8 +406,7 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
404
406
|
if notAlreadyDeleted {
|
|
405
407
|
switch error {
|
|
406
408
|
| UnsupportedSelection({message}) => logger->Logging.childError(message)
|
|
407
|
-
| FailedGettingFieldSelection({exn, message, blockNumber, logIndex})
|
|
408
|
-
| FailedParsingItems({exn, message, blockNumber, logIndex}) =>
|
|
409
|
+
| FailedGettingFieldSelection({exn, message, blockNumber, logIndex}) =>
|
|
409
410
|
logger->Logging.childError({
|
|
410
411
|
"msg": message,
|
|
411
412
|
"err": exn->Utils.prettifyExn,
|
|
@@ -426,7 +427,8 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
426
427
|
"msg": "Switching to another data-source",
|
|
427
428
|
"source": nextSource.name,
|
|
428
429
|
})
|
|
429
|
-
|
|
430
|
+
sourceRef := nextSource
|
|
431
|
+
shouldUpdateActiveSource := true
|
|
430
432
|
retryRef := 0
|
|
431
433
|
}
|
|
432
434
|
}
|
|
@@ -438,6 +440,33 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
438
440
|
})
|
|
439
441
|
toBlockRef := Some(toBlock)
|
|
440
442
|
retryRef := 0
|
|
443
|
+
| FailedGettingItems({exn, attemptedToBlock, retry: ImpossibleForTheQuery({message})}) =>
|
|
444
|
+
let nextSource =
|
|
445
|
+
sourceManager->getNextSyncSource(
|
|
446
|
+
~initialSource,
|
|
447
|
+
~currentSource=source,
|
|
448
|
+
~attemptFallbacks=true,
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
let hasAnotherSource = nextSource !== initialSource
|
|
452
|
+
|
|
453
|
+
logger->Logging.childWarn({
|
|
454
|
+
"msg": message ++ (hasAnotherSource ? " - Attempting to another source" : ""),
|
|
455
|
+
"toBlock": attemptedToBlock,
|
|
456
|
+
"err": exn->Utils.prettifyExn,
|
|
457
|
+
})
|
|
458
|
+
|
|
459
|
+
if !hasAnotherSource {
|
|
460
|
+
%raw(`null`)->ErrorHandling.mkLogAndRaise(
|
|
461
|
+
~logger,
|
|
462
|
+
~msg="The indexer doesn't have data-sources which can continue fetching. Please, check the error logs or reach out to the Envio team.",
|
|
463
|
+
)
|
|
464
|
+
} else {
|
|
465
|
+
sourceRef := nextSource
|
|
466
|
+
shouldUpdateActiveSource := false
|
|
467
|
+
retryRef := 0
|
|
468
|
+
}
|
|
469
|
+
|
|
441
470
|
| FailedGettingItems({exn, attemptedToBlock, retry: WithBackoff({message, backoffMillis})}) =>
|
|
442
471
|
// Starting from the 11th failure (retry=10)
|
|
443
472
|
// include fallback sources for switch
|
|
@@ -454,7 +483,11 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
454
483
|
| _ =>
|
|
455
484
|
// Then try to switch every second failure
|
|
456
485
|
if retry->mod(2) === 0 {
|
|
457
|
-
sourceManager->getNextSyncSource(
|
|
486
|
+
sourceManager->getNextSyncSource(
|
|
487
|
+
~initialSource,
|
|
488
|
+
~attemptFallbacks,
|
|
489
|
+
~currentSource=source,
|
|
490
|
+
)
|
|
458
491
|
} else {
|
|
459
492
|
source
|
|
460
493
|
}
|
|
@@ -476,16 +509,22 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
476
509
|
"msg": "Switching to another data-source",
|
|
477
510
|
"source": nextSource.name,
|
|
478
511
|
})
|
|
479
|
-
|
|
512
|
+
sourceRef := nextSource
|
|
513
|
+
shouldUpdateActiveSource := true
|
|
480
514
|
} else {
|
|
481
515
|
await Utils.delay(Pervasives.min(backoffMillis, 60_000))
|
|
482
516
|
}
|
|
483
517
|
retryRef := retryRef.contents + 1
|
|
484
518
|
}
|
|
519
|
+
|
|
485
520
|
// TODO: Handle more error cases and hang/retry instead of throwing
|
|
486
521
|
| exn => exn->ErrorHandling.mkLogAndRaise(~logger, ~msg="Failed to fetch block Range")
|
|
487
522
|
}
|
|
488
523
|
}
|
|
489
524
|
|
|
525
|
+
if shouldUpdateActiveSource.contents {
|
|
526
|
+
sourceManager.activeSource = sourceRef.contents
|
|
527
|
+
}
|
|
528
|
+
|
|
490
529
|
responseRef.contents->Option.getUnsafe
|
|
491
530
|
}
|
|
@@ -203,7 +203,7 @@ async function waitForNewBlock(sourceManager, currentBlockHeight) {
|
|
|
203
203
|
return newBlockHeight;
|
|
204
204
|
}
|
|
205
205
|
|
|
206
|
-
function getNextSyncSource(sourceManager, initialSource, attemptFallbacksOpt) {
|
|
206
|
+
function getNextSyncSource(sourceManager, initialSource, currentSource, attemptFallbacksOpt) {
|
|
207
207
|
var attemptFallbacks = attemptFallbacksOpt !== undefined ? attemptFallbacksOpt : false;
|
|
208
208
|
var before = [];
|
|
209
209
|
var after = [];
|
|
@@ -211,7 +211,7 @@ function getNextSyncSource(sourceManager, initialSource, attemptFallbacksOpt) {
|
|
|
211
211
|
contents: false
|
|
212
212
|
};
|
|
213
213
|
sourceManager.sources.forEach(function (source) {
|
|
214
|
-
if (source ===
|
|
214
|
+
if (source === currentSource) {
|
|
215
215
|
hasActive.contents = true;
|
|
216
216
|
return ;
|
|
217
217
|
}
|
|
@@ -234,7 +234,7 @@ function getNextSyncSource(sourceManager, initialSource, attemptFallbacksOpt) {
|
|
|
234
234
|
if (s$1 !== undefined) {
|
|
235
235
|
return s$1;
|
|
236
236
|
} else {
|
|
237
|
-
return
|
|
237
|
+
return currentSource;
|
|
238
238
|
}
|
|
239
239
|
}
|
|
240
240
|
|
|
@@ -247,8 +247,10 @@ async function executeQuery(sourceManager, query, currentBlockHeight) {
|
|
|
247
247
|
var responseRef;
|
|
248
248
|
var retryRef = 0;
|
|
249
249
|
var initialSource = sourceManager.activeSource;
|
|
250
|
+
var sourceRef = initialSource;
|
|
251
|
+
var shouldUpdateActiveSource = false;
|
|
250
252
|
while(Belt_Option.isNone(responseRef)) {
|
|
251
|
-
var source =
|
|
253
|
+
var source = sourceRef;
|
|
252
254
|
var toBlock = toBlockRef;
|
|
253
255
|
var retry = retryRef;
|
|
254
256
|
var logger = Logging.createChild({
|
|
@@ -275,85 +277,117 @@ async function executeQuery(sourceManager, query, currentBlockHeight) {
|
|
|
275
277
|
var error = Caml_js_exceptions.internalToOCamlException(raw_error);
|
|
276
278
|
if (error.RE_EXN_ID === Source.GetItemsError) {
|
|
277
279
|
var error$1 = error._1;
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
280
|
+
var exit = 0;
|
|
281
|
+
switch (error$1.TAG) {
|
|
282
|
+
case "UnsupportedSelection" :
|
|
283
|
+
case "FailedGettingFieldSelection" :
|
|
284
|
+
exit = 1;
|
|
285
|
+
break;
|
|
286
|
+
case "FailedGettingItems" :
|
|
287
|
+
var match$1 = error$1.retry;
|
|
288
|
+
var attemptedToBlock = error$1.attemptedToBlock;
|
|
289
|
+
var exn = error$1.exn;
|
|
290
|
+
switch (match$1.TAG) {
|
|
291
|
+
case "WithSuggestedToBlock" :
|
|
292
|
+
var toBlock$1 = match$1.toBlock;
|
|
293
|
+
Logging.childTrace(logger, {
|
|
294
|
+
msg: "Failed getting data for the block range. Immediately retrying with the suggested block range from response.",
|
|
295
|
+
toBlock: attemptedToBlock,
|
|
296
|
+
suggestedToBlock: toBlock$1
|
|
297
|
+
});
|
|
298
|
+
toBlockRef = toBlock$1;
|
|
299
|
+
retryRef = 0;
|
|
300
|
+
break;
|
|
301
|
+
case "WithBackoff" :
|
|
302
|
+
var backoffMillis = match$1.backoffMillis;
|
|
303
|
+
var attemptFallbacks = retry >= 10;
|
|
304
|
+
var nextSource = !(retry === 0 || retry === 1) && retry % 2 === 0 ? getNextSyncSource(sourceManager, initialSource, source, attemptFallbacks) : source;
|
|
305
|
+
var log = retry >= 4 ? Logging.childWarn : Logging.childTrace;
|
|
306
|
+
log(logger, {
|
|
307
|
+
msg: match$1.message,
|
|
308
|
+
toBlock: attemptedToBlock,
|
|
309
|
+
backOffMilliseconds: backoffMillis,
|
|
310
|
+
retry: retry,
|
|
311
|
+
err: Utils.prettifyExn(exn)
|
|
312
|
+
});
|
|
313
|
+
var shouldSwitch = nextSource !== source;
|
|
314
|
+
if (shouldSwitch) {
|
|
315
|
+
Logging.childInfo(logger, {
|
|
316
|
+
msg: "Switching to another data-source",
|
|
317
|
+
source: nextSource.name
|
|
318
|
+
});
|
|
319
|
+
sourceRef = nextSource;
|
|
320
|
+
shouldUpdateActiveSource = true;
|
|
321
|
+
} else {
|
|
322
|
+
await Utils.delay(backoffMillis < 60000 ? backoffMillis : 60000);
|
|
323
|
+
}
|
|
324
|
+
retryRef = retryRef + 1 | 0;
|
|
325
|
+
break;
|
|
326
|
+
case "ImpossibleForTheQuery" :
|
|
327
|
+
var nextSource$1 = getNextSyncSource(sourceManager, initialSource, source, true);
|
|
328
|
+
var hasAnotherSource = nextSource$1 !== initialSource;
|
|
329
|
+
Logging.childWarn(logger, {
|
|
330
|
+
msg: match$1.message + (
|
|
331
|
+
hasAnotherSource ? " - Attempting to another source" : ""
|
|
332
|
+
),
|
|
333
|
+
toBlock: attemptedToBlock,
|
|
334
|
+
err: Utils.prettifyExn(exn)
|
|
335
|
+
});
|
|
336
|
+
if (hasAnotherSource) {
|
|
337
|
+
sourceRef = nextSource$1;
|
|
338
|
+
shouldUpdateActiveSource = false;
|
|
339
|
+
retryRef = 0;
|
|
340
|
+
} else {
|
|
341
|
+
ErrorHandling.mkLogAndRaise(logger, "The indexer doesn't have data-sources which can continue fetching. Please, check the error logs or reach out to the Envio team.", null);
|
|
342
|
+
}
|
|
343
|
+
break;
|
|
344
|
+
|
|
345
|
+
}
|
|
346
|
+
break;
|
|
347
|
+
|
|
348
|
+
}
|
|
349
|
+
if (exit === 1) {
|
|
350
|
+
var nextSource$2 = getNextSyncSource(sourceManager, initialSource, source, undefined);
|
|
316
351
|
var notAlreadyDeleted = sourceManager.sources.delete(source);
|
|
317
352
|
if (notAlreadyDeleted) {
|
|
318
|
-
var exit = 0;
|
|
319
353
|
switch (error$1.TAG) {
|
|
320
354
|
case "UnsupportedSelection" :
|
|
321
355
|
Logging.childError(logger, error$1.message);
|
|
322
356
|
break;
|
|
323
357
|
case "FailedGettingFieldSelection" :
|
|
324
|
-
|
|
325
|
-
|
|
358
|
+
Logging.childError(logger, {
|
|
359
|
+
msg: error$1.message,
|
|
360
|
+
err: Utils.prettifyExn(error$1.exn),
|
|
361
|
+
blockNumber: error$1.blockNumber,
|
|
362
|
+
logIndex: error$1.logIndex
|
|
363
|
+
});
|
|
326
364
|
break;
|
|
327
365
|
case "FailedGettingItems" :
|
|
328
366
|
break;
|
|
329
367
|
|
|
330
368
|
}
|
|
331
|
-
if (exit === 1) {
|
|
332
|
-
Logging.childError(logger, {
|
|
333
|
-
msg: error$1.message,
|
|
334
|
-
err: Utils.prettifyExn(error$1.exn),
|
|
335
|
-
blockNumber: error$1.blockNumber,
|
|
336
|
-
logIndex: error$1.logIndex
|
|
337
|
-
});
|
|
338
|
-
}
|
|
339
|
-
|
|
340
369
|
}
|
|
341
|
-
if (nextSource$
|
|
370
|
+
if (nextSource$2 === source) {
|
|
342
371
|
ErrorHandling.mkLogAndRaise(logger, "The indexer doesn't have data-sources which can continue fetching. Please, check the error logs or reach out to the Envio team.", null);
|
|
343
372
|
} else {
|
|
344
373
|
Logging.childInfo(logger, {
|
|
345
374
|
msg: "Switching to another data-source",
|
|
346
|
-
source: nextSource$
|
|
375
|
+
source: nextSource$2.name
|
|
347
376
|
});
|
|
348
|
-
|
|
377
|
+
sourceRef = nextSource$2;
|
|
378
|
+
shouldUpdateActiveSource = true;
|
|
349
379
|
retryRef = 0;
|
|
350
380
|
}
|
|
351
381
|
}
|
|
382
|
+
|
|
352
383
|
} else {
|
|
353
384
|
ErrorHandling.mkLogAndRaise(logger, "Failed to fetch block Range", error);
|
|
354
385
|
}
|
|
355
386
|
}
|
|
356
387
|
};
|
|
388
|
+
if (shouldUpdateActiveSource) {
|
|
389
|
+
sourceManager.activeSource = sourceRef;
|
|
390
|
+
}
|
|
357
391
|
return responseRef;
|
|
358
392
|
}
|
|
359
393
|
|