envio 2.21.3 → 2.21.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/FetchState.res +62 -122
- package/src/Prometheus.res +18 -8
- package/src/sources/SourceManager.res +2 -2
- package/src/sources/SourceManager.resi +6 -2
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.21.
|
|
3
|
+
"version": "v2.21.4",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.21.
|
|
29
|
-
"envio-linux-arm64": "v2.21.
|
|
30
|
-
"envio-darwin-x64": "v2.21.
|
|
31
|
-
"envio-darwin-arm64": "v2.21.
|
|
28
|
+
"envio-linux-x64": "v2.21.4",
|
|
29
|
+
"envio-linux-arm64": "v2.21.4",
|
|
30
|
+
"envio-darwin-x64": "v2.21.4",
|
|
31
|
+
"envio-darwin-arm64": "v2.21.4"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.5",
|
package/src/FetchState.res
CHANGED
|
@@ -44,8 +44,6 @@ type partition = {
|
|
|
44
44
|
latestFetchedBlock: blockNumberAndTimestamp,
|
|
45
45
|
selection: selection,
|
|
46
46
|
addressesByContractName: dict<array<Address.t>>,
|
|
47
|
-
//Events ordered from latest to earliest
|
|
48
|
-
fetchedEventQueue: array<Internal.eventItem>,
|
|
49
47
|
}
|
|
50
48
|
|
|
51
49
|
type t = {
|
|
@@ -69,27 +67,21 @@ type t = {
|
|
|
69
67
|
chainId: int,
|
|
70
68
|
// Fields computed by updateInternal
|
|
71
69
|
latestFullyFetchedBlock: blockNumberAndTimestamp,
|
|
72
|
-
queueSize: int,
|
|
73
70
|
// How much blocks behind the head we should query
|
|
74
71
|
// Added for the purpose of avoiding reorg handling
|
|
75
72
|
blockLag: option<int>,
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
let shallowCopyPartition = (p: partition) => {
|
|
79
|
-
...p,
|
|
80
|
-
fetchedEventQueue: p.fetchedEventQueue->Array.copy,
|
|
73
|
+
//Items ordered from latest to earliest
|
|
74
|
+
queue: array<Internal.eventItem>,
|
|
81
75
|
}
|
|
82
76
|
|
|
83
77
|
let copy = (fetchState: t) => {
|
|
84
|
-
let partitions = fetchState.partitions->Js.Array2.map(shallowCopyPartition)
|
|
85
78
|
{
|
|
86
79
|
maxAddrInPartition: fetchState.maxAddrInPartition,
|
|
87
|
-
partitions,
|
|
80
|
+
partitions: fetchState.partitions,
|
|
88
81
|
endBlock: fetchState.endBlock,
|
|
89
82
|
nextPartitionIndex: fetchState.nextPartitionIndex,
|
|
90
83
|
isFetchingAtHead: fetchState.isFetchingAtHead,
|
|
91
84
|
latestFullyFetchedBlock: fetchState.latestFullyFetchedBlock,
|
|
92
|
-
queueSize: fetchState.queueSize,
|
|
93
85
|
normalSelection: fetchState.normalSelection,
|
|
94
86
|
firstEventBlockNumber: fetchState.firstEventBlockNumber,
|
|
95
87
|
chainId: fetchState.chainId,
|
|
@@ -97,6 +89,7 @@ let copy = (fetchState: t) => {
|
|
|
97
89
|
indexingContracts: fetchState.indexingContracts,
|
|
98
90
|
dcsToStore: fetchState.dcsToStore,
|
|
99
91
|
blockLag: fetchState.blockLag,
|
|
92
|
+
queue: fetchState.queue->Array.copy,
|
|
100
93
|
}
|
|
101
94
|
}
|
|
102
95
|
|
|
@@ -171,7 +164,6 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition)
|
|
|
171
164
|
status: {
|
|
172
165
|
fetchingStateId: None,
|
|
173
166
|
},
|
|
174
|
-
fetchedEventQueue: [],
|
|
175
167
|
selection: target.selection,
|
|
176
168
|
addressesByContractName: restAddresses,
|
|
177
169
|
latestFetchedBlock,
|
|
@@ -188,7 +180,6 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition)
|
|
|
188
180
|
},
|
|
189
181
|
selection: target.selection,
|
|
190
182
|
addressesByContractName: mergedAddresses,
|
|
191
|
-
fetchedEventQueue: mergeSortedEventList(p.fetchedEventQueue, target.fetchedEventQueue),
|
|
192
183
|
latestFetchedBlock,
|
|
193
184
|
},
|
|
194
185
|
rest,
|
|
@@ -199,26 +190,6 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition)
|
|
|
199
190
|
}
|
|
200
191
|
}
|
|
201
192
|
|
|
202
|
-
/**
|
|
203
|
-
Updates a given partition with new latest block values and new fetched
|
|
204
|
-
events.
|
|
205
|
-
*/
|
|
206
|
-
let addItemsToPartition = (
|
|
207
|
-
p: partition,
|
|
208
|
-
~latestFetchedBlock,
|
|
209
|
-
//Events ordered latest to earliest
|
|
210
|
-
~reversedNewItems: array<Internal.eventItem>,
|
|
211
|
-
) => {
|
|
212
|
-
{
|
|
213
|
-
...p,
|
|
214
|
-
status: {
|
|
215
|
-
fetchingStateId: None,
|
|
216
|
-
},
|
|
217
|
-
latestFetchedBlock,
|
|
218
|
-
fetchedEventQueue: Array.concat(reversedNewItems, p.fetchedEventQueue),
|
|
219
|
-
}
|
|
220
|
-
}
|
|
221
|
-
|
|
222
193
|
/* strategy for TUI synced status:
|
|
223
194
|
* Firstly -> only update synced status after batch is processed (not on batch creation). But also set when a batch tries to be created and there is no batch
|
|
224
195
|
*
|
|
@@ -254,28 +225,19 @@ let updateInternal = (
|
|
|
254
225
|
fetchState: t,
|
|
255
226
|
~partitions=fetchState.partitions,
|
|
256
227
|
~nextPartitionIndex=fetchState.nextPartitionIndex,
|
|
257
|
-
~firstEventBlockNumber=fetchState.firstEventBlockNumber,
|
|
258
228
|
~indexingContracts=fetchState.indexingContracts,
|
|
259
229
|
~dcsToStore=fetchState.dcsToStore,
|
|
260
230
|
~currentBlockHeight=?,
|
|
231
|
+
~queue=fetchState.queue,
|
|
261
232
|
): t => {
|
|
262
233
|
let firstPartition = partitions->Js.Array2.unsafe_get(0)
|
|
263
|
-
|
|
264
|
-
let queueSize = ref(0)
|
|
265
234
|
let latestFullyFetchedBlock = ref(firstPartition.latestFetchedBlock)
|
|
266
|
-
|
|
267
235
|
for idx in 0 to partitions->Array.length - 1 {
|
|
268
236
|
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
269
|
-
|
|
270
|
-
let partitionQueueSize = p.fetchedEventQueue->Array.length
|
|
271
|
-
|
|
272
|
-
queueSize := queueSize.contents + partitionQueueSize
|
|
273
|
-
|
|
274
237
|
if latestFullyFetchedBlock.contents.blockNumber > p.latestFetchedBlock.blockNumber {
|
|
275
238
|
latestFullyFetchedBlock := p.latestFetchedBlock
|
|
276
239
|
}
|
|
277
240
|
}
|
|
278
|
-
|
|
279
241
|
let latestFullyFetchedBlock = latestFullyFetchedBlock.contents
|
|
280
242
|
|
|
281
243
|
let isFetchingAtHead = switch currentBlockHeight {
|
|
@@ -295,11 +257,12 @@ let updateInternal = (
|
|
|
295
257
|
}
|
|
296
258
|
}
|
|
297
259
|
|
|
260
|
+
let queueSize = queue->Array.length
|
|
298
261
|
Prometheus.IndexingPartitions.set(
|
|
299
262
|
~partitionsCount=partitions->Array.length,
|
|
300
263
|
~chainId=fetchState.chainId,
|
|
301
264
|
)
|
|
302
|
-
Prometheus.IndexingBufferSize.set(~bufferSize=queueSize
|
|
265
|
+
Prometheus.IndexingBufferSize.set(~bufferSize=queueSize, ~chainId=fetchState.chainId)
|
|
303
266
|
Prometheus.IndexingBufferBlockNumber.set(
|
|
304
267
|
~blockNumber=latestFullyFetchedBlock.blockNumber,
|
|
305
268
|
~chainId=fetchState.chainId,
|
|
@@ -312,14 +275,17 @@ let updateInternal = (
|
|
|
312
275
|
normalSelection: fetchState.normalSelection,
|
|
313
276
|
chainId: fetchState.chainId,
|
|
314
277
|
nextPartitionIndex,
|
|
315
|
-
firstEventBlockNumber
|
|
278
|
+
firstEventBlockNumber: switch queue->Utils.Array.last {
|
|
279
|
+
| Some(item) => Utils.Math.minOptInt(fetchState.firstEventBlockNumber, Some(item.blockNumber))
|
|
280
|
+
| None => fetchState.firstEventBlockNumber
|
|
281
|
+
},
|
|
316
282
|
partitions,
|
|
317
283
|
isFetchingAtHead,
|
|
318
284
|
latestFullyFetchedBlock,
|
|
319
|
-
queueSize: queueSize.contents,
|
|
320
285
|
indexingContracts,
|
|
321
286
|
dcsToStore,
|
|
322
287
|
blockLag: fetchState.blockLag,
|
|
288
|
+
queue,
|
|
323
289
|
}
|
|
324
290
|
}
|
|
325
291
|
|
|
@@ -454,7 +420,6 @@ let registerDynamicContracts = (
|
|
|
454
420
|
},
|
|
455
421
|
selection: fetchState.normalSelection,
|
|
456
422
|
addressesByContractName,
|
|
457
|
-
fetchedEventQueue: [],
|
|
458
423
|
},
|
|
459
424
|
]
|
|
460
425
|
} else {
|
|
@@ -476,7 +441,6 @@ let registerDynamicContracts = (
|
|
|
476
441
|
},
|
|
477
442
|
selection: fetchState.normalSelection,
|
|
478
443
|
addressesByContractName: pendingAddressesByContractName.contents,
|
|
479
|
-
fetchedEventQueue: [],
|
|
480
444
|
})
|
|
481
445
|
|
|
482
446
|
// I use for loops instead of forEach, so ReScript better inlines ref access
|
|
@@ -520,7 +484,6 @@ let registerDynamicContracts = (
|
|
|
520
484
|
},
|
|
521
485
|
selection: fetchState.normalSelection,
|
|
522
486
|
addressesByContractName,
|
|
523
|
-
fetchedEventQueue: [],
|
|
524
487
|
})
|
|
525
488
|
})
|
|
526
489
|
} else {
|
|
@@ -621,7 +584,13 @@ let handleQueryResult = (
|
|
|
621
584
|
switch partitions->Array.getIndexBy(p => p.id === partitionId) {
|
|
622
585
|
| Some(pIndex) =>
|
|
623
586
|
let p = partitions->Js.Array2.unsafe_get(pIndex)
|
|
624
|
-
let updatedPartition =
|
|
587
|
+
let updatedPartition = {
|
|
588
|
+
...p,
|
|
589
|
+
status: {
|
|
590
|
+
fetchingStateId: None,
|
|
591
|
+
},
|
|
592
|
+
latestFetchedBlock,
|
|
593
|
+
}
|
|
625
594
|
|
|
626
595
|
switch query.target {
|
|
627
596
|
| Head
|
|
@@ -663,11 +632,7 @@ let handleQueryResult = (
|
|
|
663
632
|
fetchState->updateInternal(
|
|
664
633
|
~partitions,
|
|
665
634
|
~currentBlockHeight,
|
|
666
|
-
~
|
|
667
|
-
| Some(newFirstItem) =>
|
|
668
|
-
Utils.Math.minOptInt(fetchState.firstEventBlockNumber, Some(newFirstItem.blockNumber))
|
|
669
|
-
| None => fetchState.firstEventBlockNumber
|
|
670
|
-
},
|
|
635
|
+
~queue=mergeSortedEventList(reversedNewItems, fetchState.queue),
|
|
671
636
|
)
|
|
672
637
|
})
|
|
673
638
|
|
|
@@ -746,16 +711,9 @@ let isFullPartition = (p: partition, ~maxAddrInPartition) => {
|
|
|
746
711
|
}
|
|
747
712
|
|
|
748
713
|
let getNextQuery = (
|
|
749
|
-
{
|
|
750
|
-
partitions,
|
|
751
|
-
maxAddrInPartition,
|
|
752
|
-
endBlock,
|
|
753
|
-
latestFullyFetchedBlock,
|
|
754
|
-
indexingContracts,
|
|
755
|
-
blockLag,
|
|
756
|
-
}: t,
|
|
714
|
+
{queue, partitions, maxAddrInPartition, endBlock, indexingContracts, blockLag}: t,
|
|
757
715
|
~concurrencyLimit,
|
|
758
|
-
~
|
|
716
|
+
~targetBufferSize,
|
|
759
717
|
~currentBlockHeight,
|
|
760
718
|
~stateId,
|
|
761
719
|
) => {
|
|
@@ -840,22 +798,27 @@ let getNextQuery = (
|
|
|
840
798
|
}
|
|
841
799
|
}
|
|
842
800
|
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
801
|
+
// We want to limit the buffer size to targetBufferSize (usually 3 * batchSize)
|
|
802
|
+
// To make sure the processing always has some buffer
|
|
803
|
+
// and not increase the memory usage too much
|
|
804
|
+
// If a partition fetched further than 3 * batchSize,
|
|
805
|
+
// it should be skipped until the buffer is consumed
|
|
806
|
+
let maxQueryBlockNumber = {
|
|
807
|
+
let targetBlockIdx = queue->Array.length - targetBufferSize
|
|
808
|
+
if targetBlockIdx < 0 {
|
|
809
|
+
currentBlockHeight
|
|
810
|
+
} else {
|
|
811
|
+
switch queue->Array.get(targetBlockIdx) {
|
|
812
|
+
| Some(item) => Pervasives.min(item.blockNumber, currentBlockHeight) // Just in case check that we don't query beyond the current block
|
|
813
|
+
| None => currentBlockHeight
|
|
814
|
+
}
|
|
815
|
+
}
|
|
816
|
+
}
|
|
848
817
|
let queries = []
|
|
849
818
|
|
|
850
|
-
let registerPartitionQuery = (p, ~
|
|
819
|
+
let registerPartitionQuery = (p, ~mergeTarget=?) => {
|
|
851
820
|
if (
|
|
852
|
-
p->checkIsFetchingPartition->not &&
|
|
853
|
-
p.latestFetchedBlock.blockNumber < currentBlockHeight &&
|
|
854
|
-
(checkQueueSize ? p.fetchedEventQueue->Array.length < maxPartitionQueueSize : true) && (
|
|
855
|
-
isWithinSyncRange
|
|
856
|
-
? true
|
|
857
|
-
: !checkIsWithinSyncRange(~latestFetchedBlock=p.latestFetchedBlock, ~currentBlockHeight)
|
|
858
|
-
)
|
|
821
|
+
p->checkIsFetchingPartition->not && p.latestFetchedBlock.blockNumber < maxQueryBlockNumber
|
|
859
822
|
) {
|
|
860
823
|
switch p->makePartitionQuery(
|
|
861
824
|
~indexingContracts,
|
|
@@ -877,23 +840,16 @@ let getNextQuery = (
|
|
|
877
840
|
}
|
|
878
841
|
}
|
|
879
842
|
|
|
880
|
-
fullPartitions->Array.forEach(p => p->registerPartitionQuery
|
|
843
|
+
fullPartitions->Array.forEach(p => p->registerPartitionQuery)
|
|
881
844
|
|
|
882
845
|
if areMergingPartitionsFetching.contents->not {
|
|
883
846
|
switch mergingPartitions {
|
|
884
847
|
| [] => ()
|
|
885
|
-
| [p] =>
|
|
886
|
-
// If there's only one non-full partition without merge target,
|
|
887
|
-
// check that it didn't exceed queue size
|
|
888
|
-
p->registerPartitionQuery(~checkQueueSize=true)
|
|
848
|
+
| [p] => p->registerPartitionQuery
|
|
889
849
|
| _ =>
|
|
890
850
|
switch (mostBehindMergingPartition.contents, mergingPartitionTarget.contents) {
|
|
891
|
-
| (Some(p), None) =>
|
|
892
|
-
|
|
893
|
-
// we still have partitions to merge, so don't check for the queue size here
|
|
894
|
-
p->registerPartitionQuery(~checkQueueSize=false)
|
|
895
|
-
| (Some(p), Some(mergeTarget)) =>
|
|
896
|
-
p->registerPartitionQuery(~checkQueueSize=false, ~mergeTarget)
|
|
851
|
+
| (Some(p), None) => p->registerPartitionQuery
|
|
852
|
+
| (Some(p), Some(mergeTarget)) => p->registerPartitionQuery(~mergeTarget)
|
|
897
853
|
| (None, _) =>
|
|
898
854
|
Js.Exn.raiseError("Unexpected case, should always have a most behind partition.")
|
|
899
855
|
}
|
|
@@ -983,34 +939,27 @@ let qItemLt = (a, b) => {
|
|
|
983
939
|
}
|
|
984
940
|
}
|
|
985
941
|
|
|
986
|
-
/**
|
|
987
|
-
Returns queue item WITHOUT the updated fetch state. Used for checking values
|
|
988
|
-
not updating state
|
|
989
|
-
*/
|
|
990
|
-
let getEarliestEventInPartition = (p: partition) => {
|
|
991
|
-
switch p.fetchedEventQueue->Utils.Array.last {
|
|
992
|
-
| Some(head) =>
|
|
993
|
-
Item({item: head, popItemOffQueue: () => p.fetchedEventQueue->Js.Array2.pop->ignore})
|
|
994
|
-
| None => makeNoItem(p)
|
|
995
|
-
}
|
|
996
|
-
}
|
|
997
|
-
|
|
998
942
|
/**
|
|
999
943
|
Gets the earliest queueItem from thgetNodeEarliestEventWithUpdatedQueue.
|
|
1000
944
|
|
|
1001
945
|
Finds the earliest queue item across all partitions and then returns that
|
|
1002
946
|
queue item with an update fetch state.
|
|
1003
947
|
*/
|
|
1004
|
-
let getEarliestEvent = ({
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
948
|
+
let getEarliestEvent = ({queue, latestFullyFetchedBlock}: t) => {
|
|
949
|
+
switch queue->Utils.Array.last {
|
|
950
|
+
| Some(item) =>
|
|
951
|
+
if item.blockNumber <= latestFullyFetchedBlock.blockNumber {
|
|
952
|
+
Item({item, popItemOffQueue: () => queue->Js.Array2.pop->ignore})
|
|
953
|
+
} else {
|
|
954
|
+
NoItem({
|
|
955
|
+
latestFetchedBlock: latestFullyFetchedBlock,
|
|
956
|
+
})
|
|
1011
957
|
}
|
|
958
|
+
| None =>
|
|
959
|
+
NoItem({
|
|
960
|
+
latestFetchedBlock: latestFullyFetchedBlock,
|
|
961
|
+
})
|
|
1012
962
|
}
|
|
1013
|
-
item.contents
|
|
1014
963
|
}
|
|
1015
964
|
|
|
1016
965
|
/**
|
|
@@ -1028,8 +977,7 @@ let make = (
|
|
|
1028
977
|
): t => {
|
|
1029
978
|
let latestFetchedBlock = {
|
|
1030
979
|
blockTimestamp: 0,
|
|
1031
|
-
|
|
1032
|
-
blockNumber: Pervasives.max(startBlock - 1, 0),
|
|
980
|
+
blockNumber: startBlock - 1,
|
|
1033
981
|
}
|
|
1034
982
|
|
|
1035
983
|
let notDependingOnAddresses = []
|
|
@@ -1071,7 +1019,6 @@ let make = (
|
|
|
1071
1019
|
eventConfigs: notDependingOnAddresses,
|
|
1072
1020
|
},
|
|
1073
1021
|
addressesByContractName: Js.Dict.empty(),
|
|
1074
|
-
fetchedEventQueue: [],
|
|
1075
1022
|
})
|
|
1076
1023
|
}
|
|
1077
1024
|
|
|
@@ -1092,7 +1039,6 @@ let make = (
|
|
|
1092
1039
|
latestFetchedBlock,
|
|
1093
1040
|
selection: normalSelection,
|
|
1094
1041
|
addressesByContractName: Js.Dict.empty(),
|
|
1095
|
-
fetchedEventQueue: [],
|
|
1096
1042
|
}
|
|
1097
1043
|
}
|
|
1098
1044
|
|
|
@@ -1175,16 +1121,16 @@ let make = (
|
|
|
1175
1121
|
chainId,
|
|
1176
1122
|
endBlock,
|
|
1177
1123
|
latestFullyFetchedBlock: latestFetchedBlock,
|
|
1178
|
-
queueSize: 0,
|
|
1179
1124
|
firstEventBlockNumber: None,
|
|
1180
1125
|
normalSelection,
|
|
1181
1126
|
indexingContracts,
|
|
1182
1127
|
dcsToStore: None,
|
|
1183
1128
|
blockLag,
|
|
1129
|
+
queue: [],
|
|
1184
1130
|
}
|
|
1185
1131
|
}
|
|
1186
1132
|
|
|
1187
|
-
let queueSize = ({
|
|
1133
|
+
let queueSize = ({queue}: t) => queue->Array.length
|
|
1188
1134
|
|
|
1189
1135
|
/**
|
|
1190
1136
|
* Returns the latest block number fetched for the lowest fetcher queue (ie the earliest un-fetched dynamic contract)
|
|
@@ -1231,12 +1177,6 @@ let rollbackPartition = (
|
|
|
1231
1177
|
} else {
|
|
1232
1178
|
let shouldRollbackFetched = p.latestFetchedBlock.blockNumber >= firstChangeEvent.blockNumber
|
|
1233
1179
|
|
|
1234
|
-
let fetchedEventQueue = if shouldRollbackFetched {
|
|
1235
|
-
p.fetchedEventQueue->pruneQueueFromFirstChangeEvent(~firstChangeEvent)
|
|
1236
|
-
} else {
|
|
1237
|
-
p.fetchedEventQueue
|
|
1238
|
-
}
|
|
1239
|
-
|
|
1240
1180
|
Some({
|
|
1241
1181
|
id: p.id,
|
|
1242
1182
|
selection: p.selection,
|
|
@@ -1244,10 +1184,9 @@ let rollbackPartition = (
|
|
|
1244
1184
|
fetchingStateId: None,
|
|
1245
1185
|
},
|
|
1246
1186
|
addressesByContractName: rollbackedAddressesByContractName,
|
|
1247
|
-
fetchedEventQueue,
|
|
1248
1187
|
latestFetchedBlock: shouldRollbackFetched
|
|
1249
1188
|
? {
|
|
1250
|
-
blockNumber:
|
|
1189
|
+
blockNumber: firstChangeEvent.blockNumber - 1,
|
|
1251
1190
|
blockTimestamp: 0,
|
|
1252
1191
|
}
|
|
1253
1192
|
: p.latestFetchedBlock,
|
|
@@ -1289,6 +1228,7 @@ let rollback = (fetchState: t, ~firstChangeEvent) => {
|
|
|
1289
1228
|
fetchState->updateInternal(
|
|
1290
1229
|
~partitions,
|
|
1291
1230
|
~indexingContracts,
|
|
1231
|
+
~queue=fetchState.queue->pruneQueueFromFirstChangeEvent(~firstChangeEvent),
|
|
1292
1232
|
~dcsToStore=switch fetchState.dcsToStore {
|
|
1293
1233
|
| Some(dcsToStore) =>
|
|
1294
1234
|
let filtered =
|
package/src/Prometheus.res
CHANGED
|
@@ -400,15 +400,14 @@ module IndexingBufferSize = {
|
|
|
400
400
|
}
|
|
401
401
|
}
|
|
402
402
|
|
|
403
|
-
module
|
|
404
|
-
let gauge =
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
)
|
|
403
|
+
module IndexingTargetBufferSize = {
|
|
404
|
+
let gauge = PromClient.Gauge.makeGauge({
|
|
405
|
+
"name": "envio_indexing_target_buffer_size",
|
|
406
|
+
"help": "The target buffer size per chain for indexing. The actual number of items in the queue may exceed this value, but the indexer always tries to keep the buffer filled up to this target.",
|
|
407
|
+
})
|
|
409
408
|
|
|
410
|
-
let set = (~
|
|
411
|
-
gauge->
|
|
409
|
+
let set = (~targetBufferSize) => {
|
|
410
|
+
gauge->PromClient.Gauge.set(targetBufferSize)
|
|
412
411
|
}
|
|
413
412
|
}
|
|
414
413
|
|
|
@@ -545,3 +544,14 @@ module RollbackTargetBlockNumber = {
|
|
|
545
544
|
gauge->SafeGauge.handleInt(~labels=chain->ChainMap.Chain.toChainId, ~value=blockNumber)
|
|
546
545
|
}
|
|
547
546
|
}
|
|
547
|
+
|
|
548
|
+
module ProcessingMaxBatchSize = {
|
|
549
|
+
let gauge = PromClient.Gauge.makeGauge({
|
|
550
|
+
"name": "envio_processing_max_batch_size",
|
|
551
|
+
"help": "The maximum number of items to process in a single batch.",
|
|
552
|
+
})
|
|
553
|
+
|
|
554
|
+
let set = (~maxBatchSize) => {
|
|
555
|
+
gauge->PromClient.Gauge.set(maxBatchSize)
|
|
556
|
+
}
|
|
557
|
+
}
|
|
@@ -96,7 +96,7 @@ let fetchNext = async (
|
|
|
96
96
|
~executeQuery,
|
|
97
97
|
~waitForNewBlock,
|
|
98
98
|
~onNewBlock,
|
|
99
|
-
~
|
|
99
|
+
~targetBufferSize,
|
|
100
100
|
~stateId,
|
|
101
101
|
) => {
|
|
102
102
|
let {maxPartitionConcurrency} = sourceManager
|
|
@@ -105,7 +105,7 @@ let fetchNext = async (
|
|
|
105
105
|
~concurrencyLimit={
|
|
106
106
|
maxPartitionConcurrency - sourceManager.fetchingPartitionsCount
|
|
107
107
|
},
|
|
108
|
-
~
|
|
108
|
+
~targetBufferSize,
|
|
109
109
|
~currentBlockHeight,
|
|
110
110
|
~stateId,
|
|
111
111
|
) {
|
|
@@ -17,7 +17,7 @@ let fetchNext: (
|
|
|
17
17
|
~executeQuery: FetchState.query => promise<unit>,
|
|
18
18
|
~waitForNewBlock: (~currentBlockHeight: int) => promise<int>,
|
|
19
19
|
~onNewBlock: (~currentBlockHeight: int) => unit,
|
|
20
|
-
~
|
|
20
|
+
~targetBufferSize: int,
|
|
21
21
|
~stateId: int,
|
|
22
22
|
) => promise<unit>
|
|
23
23
|
|
|
@@ -29,4 +29,8 @@ let executeQuery: (
|
|
|
29
29
|
~currentBlockHeight: int,
|
|
30
30
|
) => promise<Source.blockRangeFetchResponse>
|
|
31
31
|
|
|
32
|
-
let makeGetHeightRetryInterval: (
|
|
32
|
+
let makeGetHeightRetryInterval: (
|
|
33
|
+
~initialRetryInterval: int,
|
|
34
|
+
~backoffMultiplicative: int,
|
|
35
|
+
~maxRetryInterval: int,
|
|
36
|
+
) => (~retry: int) => int
|