envio 2.29.2 → 2.30.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/evm.schema.json +18 -0
- package/package.json +5 -5
- package/src/Address.res +23 -0
- package/src/Address.res.js +14 -0
- package/src/Batch.res +103 -90
- package/src/Batch.res.js +81 -101
- package/src/FetchState.res +73 -129
- package/src/FetchState.res.js +87 -149
- package/src/Hasura.res +178 -124
- package/src/Hasura.res.js +115 -54
- package/src/Persistence.res +1 -13
- package/src/Persistence.res.js +1 -7
- package/src/PgStorage.res +0 -7
- package/src/PgStorage.res.js +1 -5
- package/src/Utils.res +10 -0
- package/src/Utils.res.js +5 -0
- package/src/bindings/Ethers.res +35 -11
- package/src/bindings/Ethers.res.js +21 -1
- package/src/bindings/PromClient.res +10 -0
- package/src/db/InternalTable.res +1 -59
- package/src/db/InternalTable.res.js +2 -34
- package/src/sources/HyperSyncClient.res +8 -2
- package/src/sources/HyperSyncClient.res.js +3 -2
- package/src/sources/HyperSyncSource.res +8 -1
- package/src/sources/HyperSyncSource.res.js +7 -2
- package/src/sources/RpcSource.res +153 -3
- package/src/sources/RpcSource.res.js +195 -73
package/src/FetchState.res
CHANGED
|
@@ -61,7 +61,7 @@ type t = {
|
|
|
61
61
|
contractConfigs: dict<contractConfig>,
|
|
62
62
|
// Registered dynamic contracts that need to be stored in the db
|
|
63
63
|
// Should read them at the same time when getting items for the batch
|
|
64
|
-
dcsToStore:
|
|
64
|
+
dcsToStore: array<indexingContract>,
|
|
65
65
|
// Not used for logic - only metadata
|
|
66
66
|
chainId: int,
|
|
67
67
|
// The block number of the latest block fetched
|
|
@@ -75,35 +75,14 @@ type t = {
|
|
|
75
75
|
// How much blocks behind the head we should query
|
|
76
76
|
// Needed to query before entering reorg threshold
|
|
77
77
|
blockLag: int,
|
|
78
|
-
//
|
|
79
|
-
|
|
78
|
+
// Buffer of items ordered from earliest to latest
|
|
79
|
+
buffer: array<Internal.item>,
|
|
80
80
|
// How many items we should aim to have in the buffer
|
|
81
81
|
// ready for processing
|
|
82
82
|
targetBufferSize: int,
|
|
83
83
|
onBlockConfigs: array<Internal.onBlockConfig>,
|
|
84
84
|
}
|
|
85
85
|
|
|
86
|
-
let copy = (fetchState: t) => {
|
|
87
|
-
{
|
|
88
|
-
maxAddrInPartition: fetchState.maxAddrInPartition,
|
|
89
|
-
partitions: fetchState.partitions,
|
|
90
|
-
startBlock: fetchState.startBlock,
|
|
91
|
-
endBlock: fetchState.endBlock,
|
|
92
|
-
nextPartitionIndex: fetchState.nextPartitionIndex,
|
|
93
|
-
latestFullyFetchedBlock: fetchState.latestFullyFetchedBlock,
|
|
94
|
-
latestOnBlockBlockNumber: fetchState.latestOnBlockBlockNumber,
|
|
95
|
-
normalSelection: fetchState.normalSelection,
|
|
96
|
-
chainId: fetchState.chainId,
|
|
97
|
-
contractConfigs: fetchState.contractConfigs,
|
|
98
|
-
indexingContracts: fetchState.indexingContracts,
|
|
99
|
-
dcsToStore: fetchState.dcsToStore,
|
|
100
|
-
blockLag: fetchState.blockLag,
|
|
101
|
-
queue: fetchState.queue->Array.copy,
|
|
102
|
-
onBlockConfigs: fetchState.onBlockConfigs,
|
|
103
|
-
targetBufferSize: fetchState.targetBufferSize,
|
|
104
|
-
}
|
|
105
|
-
}
|
|
106
|
-
|
|
107
86
|
let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) => {
|
|
108
87
|
switch (p, target) {
|
|
109
88
|
| ({selection: {dependsOnAddresses: true}}, {selection: {dependsOnAddresses: true}}) => {
|
|
@@ -206,9 +185,9 @@ let bufferBlock = ({latestFullyFetchedBlock, latestOnBlockBlockNumber}: t) => {
|
|
|
206
185
|
Comparitor for two events from the same chain. No need for chain id or timestamp
|
|
207
186
|
*/
|
|
208
187
|
let compareBufferItem = (a: Internal.item, b: Internal.item) => {
|
|
209
|
-
let blockDiff =
|
|
188
|
+
let blockDiff = a->Internal.getItemBlockNumber - b->Internal.getItemBlockNumber
|
|
210
189
|
if blockDiff === 0 {
|
|
211
|
-
|
|
190
|
+
a->Internal.getItemLogIndex - b->Internal.getItemLogIndex
|
|
212
191
|
} else {
|
|
213
192
|
blockDiff
|
|
214
193
|
}
|
|
@@ -245,7 +224,7 @@ let updateInternal = (
|
|
|
245
224
|
| [] => latestFullyFetchedBlock.blockNumber
|
|
246
225
|
| onBlockConfigs => {
|
|
247
226
|
// Calculate the max block number we are going to create items for
|
|
248
|
-
// Use
|
|
227
|
+
// Use targetBufferSize to get the last target item in the buffer
|
|
249
228
|
//
|
|
250
229
|
// mutItems is not very reliable, since it might not be sorted,
|
|
251
230
|
// but the chances for it happen are very low and not critical
|
|
@@ -253,15 +232,15 @@ let updateInternal = (
|
|
|
253
232
|
// All this needed to prevent OOM when adding too many block items to the queue
|
|
254
233
|
let maxBlockNumber = switch switch mutItemsRef.contents {
|
|
255
234
|
| Some(mutItems) => mutItems
|
|
256
|
-
| None => fetchState.
|
|
257
|
-
}->
|
|
235
|
+
| None => fetchState.buffer
|
|
236
|
+
}->Belt.Array.get(fetchState.targetBufferSize - 1) {
|
|
258
237
|
| Some(item) => item->Internal.getItemBlockNumber
|
|
259
238
|
| None => latestFullyFetchedBlock.blockNumber
|
|
260
239
|
}
|
|
261
240
|
|
|
262
241
|
let mutItems = switch mutItemsRef.contents {
|
|
263
242
|
| Some(mutItems) => mutItems
|
|
264
|
-
| None => fetchState.
|
|
243
|
+
| None => fetchState.buffer->Array.copy
|
|
265
244
|
}
|
|
266
245
|
mutItemsRef := Some(mutItems)
|
|
267
246
|
|
|
@@ -327,12 +306,12 @@ let updateInternal = (
|
|
|
327
306
|
indexingContracts,
|
|
328
307
|
dcsToStore,
|
|
329
308
|
blockLag,
|
|
330
|
-
|
|
309
|
+
buffer: switch mutItemsRef.contents {
|
|
331
310
|
// Theoretically it could be faster to asume that
|
|
332
311
|
// the items are sorted, but there are cases
|
|
333
312
|
// when the data source returns them unsorted
|
|
334
313
|
| Some(mutItems) => mutItems->Js.Array2.sortInPlaceWith(compareBufferItem)
|
|
335
|
-
| None => fetchState.
|
|
314
|
+
| None => fetchState.buffer
|
|
336
315
|
},
|
|
337
316
|
}
|
|
338
317
|
|
|
@@ -341,7 +320,7 @@ let updateInternal = (
|
|
|
341
320
|
~chainId=fetchState.chainId,
|
|
342
321
|
)
|
|
343
322
|
Prometheus.IndexingBufferSize.set(
|
|
344
|
-
~bufferSize=updatedFetchState.
|
|
323
|
+
~bufferSize=updatedFetchState.buffer->Array.length,
|
|
345
324
|
~chainId=fetchState.chainId,
|
|
346
325
|
)
|
|
347
326
|
Prometheus.IndexingBufferBlockNumber.set(
|
|
@@ -590,8 +569,8 @@ let registerDynamicContracts = (
|
|
|
590
569
|
fetchState->updateInternal(
|
|
591
570
|
~partitions=fetchState.partitions->Js.Array2.concat(newPartitions),
|
|
592
571
|
~dcsToStore=switch fetchState.dcsToStore {
|
|
593
|
-
|
|
|
594
|
-
|
|
|
572
|
+
| [] => dcsToStore
|
|
573
|
+
| existingDcs => Array.concat(existingDcs, dcsToStore)
|
|
595
574
|
},
|
|
596
575
|
~indexingContracts=// We don't need registeringContracts anymore,
|
|
597
576
|
// so we can safely mixin indexingContracts in it
|
|
@@ -694,7 +673,7 @@ let handleQueryResult = (
|
|
|
694
673
|
~mutItems=?{
|
|
695
674
|
switch newItems {
|
|
696
675
|
| [] => None
|
|
697
|
-
| _ => Some(fetchState.
|
|
676
|
+
| _ => Some(fetchState.buffer->Array.concat(newItems))
|
|
698
677
|
}
|
|
699
678
|
},
|
|
700
679
|
)
|
|
@@ -776,7 +755,7 @@ let isFullPartition = (p: partition, ~maxAddrInPartition) => {
|
|
|
776
755
|
|
|
777
756
|
let getNextQuery = (
|
|
778
757
|
{
|
|
779
|
-
|
|
758
|
+
buffer,
|
|
780
759
|
partitions,
|
|
781
760
|
targetBufferSize,
|
|
782
761
|
maxAddrInPartition,
|
|
@@ -874,14 +853,11 @@ let getNextQuery = (
|
|
|
874
853
|
// If a partition fetched further than 3 * batchSize,
|
|
875
854
|
// it should be skipped until the buffer is consumed
|
|
876
855
|
let maxQueryBlockNumber = {
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
| Some(item) => Pervasives.min(item->Internal.getItemBlockNumber, currentBlockHeight) // Just in case check that we don't query beyond the current block
|
|
883
|
-
| None => currentBlockHeight
|
|
884
|
-
}
|
|
856
|
+
switch buffer->Array.get(targetBufferSize - 1) {
|
|
857
|
+
| Some(item) =>
|
|
858
|
+
// Just in case check that we don't query beyond the current block
|
|
859
|
+
Pervasives.min(item->Internal.getItemBlockNumber, currentBlockHeight)
|
|
860
|
+
| None => currentBlockHeight
|
|
885
861
|
}
|
|
886
862
|
}
|
|
887
863
|
let queries = []
|
|
@@ -953,45 +929,43 @@ let getNextQuery = (
|
|
|
953
929
|
}
|
|
954
930
|
}
|
|
955
931
|
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
| NoItem({latestFetchedBlock: blockNumberAndTimestamp})
|
|
965
|
-
|
|
966
|
-
/**
|
|
967
|
-
Simple constructor for no item from partition
|
|
968
|
-
*/
|
|
969
|
-
let makeNoItem = ({latestFetchedBlock}: partition) => NoItem({
|
|
970
|
-
latestFetchedBlock: latestFetchedBlock,
|
|
971
|
-
})
|
|
932
|
+
let getTimestampAt = (fetchState: t, ~index) => {
|
|
933
|
+
switch fetchState.buffer->Belt.Array.get(index) {
|
|
934
|
+
| Some(Event({timestamp})) => timestamp
|
|
935
|
+
| Some(Block(_)) =>
|
|
936
|
+
Js.Exn.raiseError("Block handlers are not supported for ordered multichain mode.")
|
|
937
|
+
| None => (fetchState->bufferBlock).blockTimestamp
|
|
938
|
+
}
|
|
939
|
+
}
|
|
972
940
|
|
|
973
|
-
|
|
974
|
-
|
|
941
|
+
let hasReadyItem = ({buffer} as fetchState: t) => {
|
|
942
|
+
switch buffer->Belt.Array.get(0) {
|
|
943
|
+
| Some(item) => item->Internal.getItemBlockNumber <= fetchState->bufferBlockNumber
|
|
944
|
+
| None => false
|
|
945
|
+
}
|
|
946
|
+
}
|
|
975
947
|
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
let
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
948
|
+
let getReadyItemsCount = (fetchState: t, ~targetSize: int, ~fromItem) => {
|
|
949
|
+
let readyBlockNumber = ref(fetchState->bufferBlockNumber)
|
|
950
|
+
let acc = ref(0)
|
|
951
|
+
let isFinished = ref(false)
|
|
952
|
+
while !isFinished.contents {
|
|
953
|
+
switch fetchState.buffer->Belt.Array.get(fromItem + acc.contents) {
|
|
954
|
+
| Some(item) =>
|
|
955
|
+
let itemBlockNumber = item->Internal.getItemBlockNumber
|
|
956
|
+
if itemBlockNumber <= readyBlockNumber.contents {
|
|
957
|
+
acc := acc.contents + 1
|
|
958
|
+
if acc.contents === targetSize {
|
|
959
|
+
// Should finish accumulating items from the same block
|
|
960
|
+
readyBlockNumber := itemBlockNumber
|
|
961
|
+
}
|
|
962
|
+
} else {
|
|
963
|
+
isFinished := true
|
|
964
|
+
}
|
|
965
|
+
| None => isFinished := true
|
|
989
966
|
}
|
|
990
|
-
| None =>
|
|
991
|
-
NoItem({
|
|
992
|
-
latestFetchedBlock: fetchState->bufferBlock,
|
|
993
|
-
})
|
|
994
967
|
}
|
|
968
|
+
acc.contents
|
|
995
969
|
}
|
|
996
970
|
|
|
997
971
|
/**
|
|
@@ -1130,21 +1104,21 @@ let make = (
|
|
|
1130
1104
|
latestOnBlockBlockNumber: progressBlockNumber,
|
|
1131
1105
|
normalSelection,
|
|
1132
1106
|
indexingContracts,
|
|
1133
|
-
dcsToStore:
|
|
1107
|
+
dcsToStore: [],
|
|
1134
1108
|
blockLag,
|
|
1135
1109
|
onBlockConfigs,
|
|
1136
1110
|
targetBufferSize,
|
|
1137
|
-
|
|
1111
|
+
buffer: [],
|
|
1138
1112
|
}
|
|
1139
1113
|
}
|
|
1140
1114
|
|
|
1141
|
-
let bufferSize = ({
|
|
1115
|
+
let bufferSize = ({buffer}: t) => buffer->Array.length
|
|
1142
1116
|
|
|
1143
1117
|
let pruneQueueFromFirstChangeEvent = (
|
|
1144
|
-
|
|
1118
|
+
buffer: array<Internal.item>,
|
|
1145
1119
|
~firstChangeEvent: blockNumberAndLogIndex,
|
|
1146
1120
|
) => {
|
|
1147
|
-
|
|
1121
|
+
buffer->Array.keep(item =>
|
|
1148
1122
|
switch item {
|
|
1149
1123
|
| Event({blockNumber, logIndex})
|
|
1150
1124
|
| Block({blockNumber, logIndex}) => (blockNumber, logIndex)
|
|
@@ -1239,16 +1213,11 @@ let rollback = (fetchState: t, ~firstChangeEvent) => {
|
|
|
1239
1213
|
}->updateInternal(
|
|
1240
1214
|
~partitions,
|
|
1241
1215
|
~indexingContracts,
|
|
1242
|
-
~mutItems=fetchState.
|
|
1216
|
+
~mutItems=fetchState.buffer->pruneQueueFromFirstChangeEvent(~firstChangeEvent),
|
|
1243
1217
|
~dcsToStore=switch fetchState.dcsToStore {
|
|
1244
|
-
|
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
switch filtered {
|
|
1248
|
-
| [] => None
|
|
1249
|
-
| _ => Some(filtered)
|
|
1250
|
-
}
|
|
1251
|
-
| None => None
|
|
1218
|
+
| [] as empty => empty
|
|
1219
|
+
| dcsToStore =>
|
|
1220
|
+
dcsToStore->Js.Array2.filter(dc => !(addressesToRemove->Utils.Set.has(dc.address)))
|
|
1252
1221
|
},
|
|
1253
1222
|
)
|
|
1254
1223
|
}
|
|
@@ -1271,7 +1240,7 @@ let isActivelyIndexing = ({endBlock} as fetchState: t) => {
|
|
|
1271
1240
|
}
|
|
1272
1241
|
|
|
1273
1242
|
let isReadyToEnterReorgThreshold = (
|
|
1274
|
-
{endBlock, blockLag,
|
|
1243
|
+
{endBlock, blockLag, buffer} as fetchState: t,
|
|
1275
1244
|
~currentBlockHeight,
|
|
1276
1245
|
) => {
|
|
1277
1246
|
let bufferBlockNumber = fetchState->bufferBlockNumber
|
|
@@ -1280,42 +1249,26 @@ let isReadyToEnterReorgThreshold = (
|
|
|
1280
1249
|
| Some(endBlock) if bufferBlockNumber >= endBlock => true
|
|
1281
1250
|
| _ => bufferBlockNumber >= currentBlockHeight - blockLag
|
|
1282
1251
|
} &&
|
|
1283
|
-
|
|
1252
|
+
buffer->Utils.Array.isEmpty
|
|
1284
1253
|
}
|
|
1285
1254
|
|
|
1286
1255
|
let filterAndSortForUnorderedBatch = {
|
|
1287
|
-
let
|
|
1288
|
-
switch
|
|
1256
|
+
let hasFullBatch = ({buffer} as fetchState: t, ~batchSizeTarget) => {
|
|
1257
|
+
switch buffer->Belt.Array.get(batchSizeTarget - 1) {
|
|
1289
1258
|
| Some(item) => item->Internal.getItemBlockNumber <= fetchState->bufferBlockNumber
|
|
1290
1259
|
| None => false
|
|
1291
1260
|
}
|
|
1292
1261
|
}
|
|
1293
1262
|
|
|
1294
|
-
|
|
1295
|
-
// Queue is ordered from latest to earliest, so the earliest eligible
|
|
1296
|
-
// item for a full batch of size B is at index (length - B).
|
|
1297
|
-
// Do NOT subtract an extra 1 here; when length === B we should still
|
|
1298
|
-
// classify the queue as full and probe index 0.
|
|
1299
|
-
let targetBlockIdx = queue->Array.length - maxBatchSize
|
|
1300
|
-
if targetBlockIdx < 0 {
|
|
1301
|
-
false
|
|
1302
|
-
} else {
|
|
1303
|
-
// Unsafe can fail when maxBatchSize is 0,
|
|
1304
|
-
// but we ignore the case
|
|
1305
|
-
queue->Js.Array2.unsafe_get(targetBlockIdx)->Internal.getItemBlockNumber <=
|
|
1306
|
-
fetchState->bufferBlockNumber
|
|
1307
|
-
}
|
|
1308
|
-
}
|
|
1309
|
-
|
|
1310
|
-
(fetchStates: array<t>, ~maxBatchSize: int) => {
|
|
1263
|
+
(fetchStates: array<t>, ~batchSizeTarget: int) => {
|
|
1311
1264
|
fetchStates
|
|
1312
|
-
->Array.keepU(
|
|
1265
|
+
->Array.keepU(hasReadyItem)
|
|
1313
1266
|
->Js.Array2.sortInPlaceWith((a: t, b: t) => {
|
|
1314
|
-
switch (a->hasFullBatch(~
|
|
1267
|
+
switch (a->hasFullBatch(~batchSizeTarget), b->hasFullBatch(~batchSizeTarget)) {
|
|
1315
1268
|
| (true, true)
|
|
1316
1269
|
| (false, false) =>
|
|
1317
1270
|
// Use unsafe since we filtered out all queues without batch items
|
|
1318
|
-
switch (a.
|
|
1271
|
+
switch (a.buffer->Belt.Array.getUnsafe(0), b.buffer->Belt.Array.getUnsafe(0)) {
|
|
1319
1272
|
| (Event({timestamp: aTimestamp}), Event({timestamp: bTimestamp})) =>
|
|
1320
1273
|
aTimestamp - bTimestamp
|
|
1321
1274
|
| (Block(_), _)
|
|
@@ -1331,20 +1284,11 @@ let filterAndSortForUnorderedBatch = {
|
|
|
1331
1284
|
}
|
|
1332
1285
|
}
|
|
1333
1286
|
|
|
1334
|
-
let getProgressBlockNumber = ({
|
|
1287
|
+
let getProgressBlockNumber = ({buffer} as fetchState: t) => {
|
|
1335
1288
|
let bufferBlockNumber = fetchState->bufferBlockNumber
|
|
1336
|
-
switch
|
|
1289
|
+
switch buffer->Belt.Array.get(0) {
|
|
1337
1290
|
| Some(item) if bufferBlockNumber >= item->Internal.getItemBlockNumber =>
|
|
1338
1291
|
item->Internal.getItemBlockNumber - 1
|
|
1339
1292
|
| _ => bufferBlockNumber
|
|
1340
1293
|
}
|
|
1341
1294
|
}
|
|
1342
|
-
|
|
1343
|
-
let getProgressNextBlockLogIndex = ({queue} as fetchState: t) => {
|
|
1344
|
-
switch queue->Utils.Array.last {
|
|
1345
|
-
| Some(Event({logIndex, blockNumber}))
|
|
1346
|
-
if fetchState->bufferBlockNumber >= blockNumber && logIndex > 0 =>
|
|
1347
|
-
Some(logIndex - 1)
|
|
1348
|
-
| _ => None
|
|
1349
|
-
}
|
|
1350
|
-
}
|