envio 2.29.0-alpha.1 → 2.29.0-alpha.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +7 -5
- package/src/Batch.res +12 -0
- package/src/Batch.res.js +15 -0
- package/src/Envio.gen.ts +7 -3
- package/src/Envio.res +5 -8
- package/src/EventRegister.res +51 -15
- package/src/EventRegister.res.js +34 -9
- package/src/EventRegister.resi +1 -1
- package/src/EventUtils.res +4 -0
- package/src/EventUtils.res.js +3 -0
- package/src/FetchState.res +195 -145
- package/src/FetchState.res.js +218 -121
- package/src/Internal.res +3 -0
- package/src/Logging.res.js +3 -4
- package/src/Utils.res +2 -0
- package/src/bindings/Pino.res +1 -0
- package/src/bindings/Pino.res.js +10 -3
- package/src/db/EntityHistory.res +0 -3
- package/src/db/EntityHistory.res.js +30 -33
- package/src/db/InternalTable.res +0 -6
- package/src/db/InternalTable.res.js +0 -1
- package/src/sources/SourceManager.res +0 -2
- package/src/sources/SourceManager.res.js +2 -2
- package/src/sources/SourceManager.resi +0 -1
package/src/FetchState.res
CHANGED
|
@@ -51,7 +51,7 @@ type t = {
|
|
|
51
51
|
// Used for the incremental partition id. Can't use the partitions length,
|
|
52
52
|
// since partitions might be deleted on merge or cleaned up
|
|
53
53
|
nextPartitionIndex: int,
|
|
54
|
-
|
|
54
|
+
startBlock: int,
|
|
55
55
|
endBlock: option<int>,
|
|
56
56
|
maxAddrInPartition: int,
|
|
57
57
|
normalSelection: selection,
|
|
@@ -64,24 +64,34 @@ type t = {
|
|
|
64
64
|
dcsToStore: option<array<indexingContract>>,
|
|
65
65
|
// Not used for logic - only metadata
|
|
66
66
|
chainId: int,
|
|
67
|
-
//
|
|
67
|
+
// The block number of the latest block fetched
|
|
68
|
+
// which added all its events to the queue
|
|
68
69
|
latestFullyFetchedBlock: blockNumberAndTimestamp,
|
|
70
|
+
// The block number of the latest block which was added to the queue
|
|
71
|
+
// by the onBlock configs
|
|
72
|
+
// Need a separate pointer for this
|
|
73
|
+
// to prevent OOM when adding too many items to the queue
|
|
74
|
+
latestOnBlockBlockNumber: int,
|
|
69
75
|
// How much blocks behind the head we should query
|
|
70
76
|
// Needed to query before entering reorg threshold
|
|
71
77
|
blockLag: int,
|
|
72
78
|
//Items ordered from latest to earliest
|
|
73
79
|
queue: array<Internal.item>,
|
|
74
|
-
|
|
80
|
+
// How many items we should aim to have in the buffer
|
|
81
|
+
// ready for processing
|
|
82
|
+
targetBufferSize: int,
|
|
83
|
+
onBlockConfigs: array<Internal.onBlockConfig>,
|
|
75
84
|
}
|
|
76
85
|
|
|
77
86
|
let copy = (fetchState: t) => {
|
|
78
87
|
{
|
|
79
88
|
maxAddrInPartition: fetchState.maxAddrInPartition,
|
|
80
89
|
partitions: fetchState.partitions,
|
|
90
|
+
startBlock: fetchState.startBlock,
|
|
81
91
|
endBlock: fetchState.endBlock,
|
|
82
92
|
nextPartitionIndex: fetchState.nextPartitionIndex,
|
|
83
|
-
isFetchingAtHead: fetchState.isFetchingAtHead,
|
|
84
93
|
latestFullyFetchedBlock: fetchState.latestFullyFetchedBlock,
|
|
94
|
+
latestOnBlockBlockNumber: fetchState.latestOnBlockBlockNumber,
|
|
85
95
|
normalSelection: fetchState.normalSelection,
|
|
86
96
|
chainId: fetchState.chainId,
|
|
87
97
|
contractConfigs: fetchState.contractConfigs,
|
|
@@ -90,6 +100,7 @@ let copy = (fetchState: t) => {
|
|
|
90
100
|
blockLag: fetchState.blockLag,
|
|
91
101
|
queue: fetchState.queue->Array.copy,
|
|
92
102
|
onBlockConfigs: fetchState.onBlockConfigs,
|
|
103
|
+
targetBufferSize: fetchState.targetBufferSize,
|
|
93
104
|
}
|
|
94
105
|
}
|
|
95
106
|
|
|
@@ -171,33 +182,40 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition)
|
|
|
171
182
|
}
|
|
172
183
|
}
|
|
173
184
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
let
|
|
199
|
-
|
|
200
|
-
|
|
185
|
+
@inline
|
|
186
|
+
let bufferBlockNumber = ({latestFullyFetchedBlock, latestOnBlockBlockNumber}: t) => {
|
|
187
|
+
latestOnBlockBlockNumber < latestFullyFetchedBlock.blockNumber
|
|
188
|
+
? latestOnBlockBlockNumber
|
|
189
|
+
: latestFullyFetchedBlock.blockNumber
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* Returns the latest block which is ready to be consumed
|
|
194
|
+
*/
|
|
195
|
+
@inline
|
|
196
|
+
let bufferBlock = ({latestFullyFetchedBlock, latestOnBlockBlockNumber}: t) => {
|
|
197
|
+
latestOnBlockBlockNumber < latestFullyFetchedBlock.blockNumber
|
|
198
|
+
? {
|
|
199
|
+
blockNumber: latestOnBlockBlockNumber,
|
|
200
|
+
blockTimestamp: 0,
|
|
201
|
+
}
|
|
202
|
+
: latestFullyFetchedBlock
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/*
|
|
206
|
+
Comparitor for two events from the same chain. No need for chain id or timestamp
|
|
207
|
+
*/
|
|
208
|
+
let compareBufferItem = (a: Internal.item, b: Internal.item) => {
|
|
209
|
+
let blockDiff = b->Internal.getItemBlockNumber - a->Internal.getItemBlockNumber
|
|
210
|
+
if blockDiff === 0 {
|
|
211
|
+
b->Internal.getItemLogIndex - a->Internal.getItemLogIndex
|
|
212
|
+
} else {
|
|
213
|
+
blockDiff
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
// Some big number which should be bigger than any log index
|
|
218
|
+
let blockItemLogIndex = 16777216
|
|
201
219
|
|
|
202
220
|
/*
|
|
203
221
|
Update fetchState, merge registers and recompute derived values
|
|
@@ -208,8 +226,7 @@ let updateInternal = (
|
|
|
208
226
|
~nextPartitionIndex=fetchState.nextPartitionIndex,
|
|
209
227
|
~indexingContracts=fetchState.indexingContracts,
|
|
210
228
|
~dcsToStore=fetchState.dcsToStore,
|
|
211
|
-
~
|
|
212
|
-
~queue=fetchState.queue,
|
|
229
|
+
~mutItems=?,
|
|
213
230
|
~blockLag=fetchState.blockLag,
|
|
214
231
|
): t => {
|
|
215
232
|
let firstPartition = partitions->Js.Array2.unsafe_get(0)
|
|
@@ -222,50 +239,117 @@ let updateInternal = (
|
|
|
222
239
|
}
|
|
223
240
|
let latestFullyFetchedBlock = latestFullyFetchedBlock.contents
|
|
224
241
|
|
|
225
|
-
let
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
//
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
242
|
+
let mutItemsRef = ref(mutItems)
|
|
243
|
+
|
|
244
|
+
let latestOnBlockBlockNumber = switch fetchState.onBlockConfigs {
|
|
245
|
+
| [] => latestFullyFetchedBlock.blockNumber
|
|
246
|
+
| onBlockConfigs => {
|
|
247
|
+
// Calculate the max block number we are going to create items for
|
|
248
|
+
// Use -targetBufferSize to get the last target item in the queue (which is reversed)
|
|
249
|
+
//
|
|
250
|
+
// mutItems is not very reliable, since it might not be sorted,
|
|
251
|
+
// but the chances for it happen are very low and not critical
|
|
252
|
+
//
|
|
253
|
+
// All this needed to prevent OOM when adding too many block items to the queue
|
|
254
|
+
let maxBlockNumber = switch switch mutItemsRef.contents {
|
|
255
|
+
| Some(mutItems) => mutItems
|
|
256
|
+
| None => fetchState.queue
|
|
257
|
+
}->Utils.Array.at(-fetchState.targetBufferSize) {
|
|
258
|
+
| Some(item) => item->Internal.getItemBlockNumber
|
|
259
|
+
| None => latestFullyFetchedBlock.blockNumber
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
let mutItems = switch mutItemsRef.contents {
|
|
263
|
+
| Some(mutItems) => mutItems
|
|
264
|
+
| None => fetchState.queue->Array.copy
|
|
265
|
+
}
|
|
266
|
+
mutItemsRef := Some(mutItems)
|
|
267
|
+
|
|
268
|
+
let newItemsCounter = ref(0)
|
|
269
|
+
let latestOnBlockBlockNumber = ref(fetchState.latestOnBlockBlockNumber)
|
|
270
|
+
|
|
271
|
+
// Simply iterate over every block
|
|
272
|
+
// could have a better algorithm to iterate over blocks in a more efficient way
|
|
273
|
+
// but raw loops are fast enough
|
|
274
|
+
while (
|
|
275
|
+
latestOnBlockBlockNumber.contents < maxBlockNumber &&
|
|
276
|
+
// Additional safeguard to prevent OOM
|
|
277
|
+
newItemsCounter.contents <= fetchState.targetBufferSize
|
|
278
|
+
) {
|
|
279
|
+
let blockNumber = latestOnBlockBlockNumber.contents + 1
|
|
280
|
+
latestOnBlockBlockNumber := blockNumber
|
|
281
|
+
|
|
282
|
+
for configIdx in 0 to onBlockConfigs->Array.length - 1 {
|
|
283
|
+
let onBlockConfig = onBlockConfigs->Js.Array2.unsafe_get(configIdx)
|
|
284
|
+
|
|
285
|
+
let handlerStartBlock = switch onBlockConfig.startBlock {
|
|
286
|
+
| Some(startBlock) => startBlock
|
|
287
|
+
| None => fetchState.startBlock
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
if (
|
|
291
|
+
blockNumber >= handlerStartBlock &&
|
|
292
|
+
switch onBlockConfig.endBlock {
|
|
293
|
+
| Some(endBlock) => blockNumber <= endBlock
|
|
294
|
+
| None => true
|
|
295
|
+
} &&
|
|
296
|
+
(blockNumber - handlerStartBlock)->Pervasives.mod(onBlockConfig.interval) === 0
|
|
297
|
+
) {
|
|
298
|
+
mutItems->Array.push(
|
|
299
|
+
Block({
|
|
300
|
+
onBlockConfig,
|
|
301
|
+
blockNumber,
|
|
302
|
+
logIndex: blockItemLogIndex + onBlockConfig.index,
|
|
303
|
+
}),
|
|
304
|
+
)
|
|
305
|
+
newItemsCounter := newItemsCounter.contents + 1
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
latestOnBlockBlockNumber.contents
|
|
239
311
|
}
|
|
240
312
|
}
|
|
241
313
|
|
|
242
|
-
let
|
|
243
|
-
Prometheus.IndexingPartitions.set(
|
|
244
|
-
~partitionsCount=partitions->Array.length,
|
|
245
|
-
~chainId=fetchState.chainId,
|
|
246
|
-
)
|
|
247
|
-
Prometheus.IndexingBufferSize.set(~bufferSize, ~chainId=fetchState.chainId)
|
|
248
|
-
Prometheus.IndexingBufferBlockNumber.set(
|
|
249
|
-
~blockNumber=latestFullyFetchedBlock.blockNumber,
|
|
250
|
-
~chainId=fetchState.chainId,
|
|
251
|
-
)
|
|
252
|
-
|
|
253
|
-
{
|
|
314
|
+
let updatedFetchState = {
|
|
254
315
|
maxAddrInPartition: fetchState.maxAddrInPartition,
|
|
316
|
+
startBlock: fetchState.startBlock,
|
|
255
317
|
endBlock: fetchState.endBlock,
|
|
256
318
|
contractConfigs: fetchState.contractConfigs,
|
|
257
319
|
normalSelection: fetchState.normalSelection,
|
|
258
320
|
chainId: fetchState.chainId,
|
|
259
321
|
onBlockConfigs: fetchState.onBlockConfigs,
|
|
322
|
+
targetBufferSize: fetchState.targetBufferSize,
|
|
260
323
|
nextPartitionIndex,
|
|
261
324
|
partitions,
|
|
262
|
-
|
|
325
|
+
latestOnBlockBlockNumber,
|
|
263
326
|
latestFullyFetchedBlock,
|
|
264
327
|
indexingContracts,
|
|
265
328
|
dcsToStore,
|
|
266
329
|
blockLag,
|
|
267
|
-
queue
|
|
330
|
+
queue: switch mutItemsRef.contents {
|
|
331
|
+
// Theoretically it could be faster to asume that
|
|
332
|
+
// the items are sorted, but there are cases
|
|
333
|
+
// when the data source returns them unsorted
|
|
334
|
+
| Some(mutItems) => mutItems->Js.Array2.sortInPlaceWith(compareBufferItem)
|
|
335
|
+
| None => fetchState.queue
|
|
336
|
+
},
|
|
268
337
|
}
|
|
338
|
+
|
|
339
|
+
Prometheus.IndexingPartitions.set(
|
|
340
|
+
~partitionsCount=partitions->Array.length,
|
|
341
|
+
~chainId=fetchState.chainId,
|
|
342
|
+
)
|
|
343
|
+
Prometheus.IndexingBufferSize.set(
|
|
344
|
+
~bufferSize=updatedFetchState.queue->Array.length,
|
|
345
|
+
~chainId=fetchState.chainId,
|
|
346
|
+
)
|
|
347
|
+
Prometheus.IndexingBufferBlockNumber.set(
|
|
348
|
+
~blockNumber=updatedFetchState->bufferBlockNumber,
|
|
349
|
+
~chainId=fetchState.chainId,
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
updatedFetchState
|
|
269
353
|
}
|
|
270
354
|
|
|
271
355
|
let numAddresses = fetchState => fetchState.indexingContracts->Js.Dict.keys->Array.length
|
|
@@ -287,7 +371,6 @@ let registerDynamicContracts = (
|
|
|
287
371
|
// These are raw dynamic contracts received from contractRegister call.
|
|
288
372
|
// Might contain duplicates which we should filter out
|
|
289
373
|
dynamicContracts: array<indexingContract>,
|
|
290
|
-
~currentBlockHeight,
|
|
291
374
|
) => {
|
|
292
375
|
if fetchState.normalSelection.eventConfigs->Utils.Array.isEmpty {
|
|
293
376
|
// Can the normalSelection be empty?
|
|
@@ -506,7 +589,6 @@ let registerDynamicContracts = (
|
|
|
506
589
|
|
|
507
590
|
fetchState->updateInternal(
|
|
508
591
|
~partitions=fetchState.partitions->Js.Array2.concat(newPartitions),
|
|
509
|
-
~currentBlockHeight,
|
|
510
592
|
~dcsToStore=switch fetchState.dcsToStore {
|
|
511
593
|
| Some(existingDcs) => Some(Array.concat(existingDcs, dcsToStore))
|
|
512
594
|
| None => Some(dcsToStore)
|
|
@@ -543,21 +625,6 @@ type query = {
|
|
|
543
625
|
exception UnexpectedPartitionNotFound({partitionId: string})
|
|
544
626
|
exception UnexpectedMergeQueryResponse({message: string})
|
|
545
627
|
|
|
546
|
-
/*
|
|
547
|
-
Comparitor for two events from the same chain. No need for chain id or timestamp
|
|
548
|
-
*/
|
|
549
|
-
let compareBufferItem = (a: Internal.item, b: Internal.item) => {
|
|
550
|
-
let blockDiff = b->Internal.getItemBlockNumber - a->Internal.getItemBlockNumber
|
|
551
|
-
if blockDiff === 0 {
|
|
552
|
-
b->Internal.getItemLogIndex - a->Internal.getItemLogIndex
|
|
553
|
-
} else {
|
|
554
|
-
blockDiff
|
|
555
|
-
}
|
|
556
|
-
}
|
|
557
|
-
|
|
558
|
-
// Some big number which should be bigger than any log index
|
|
559
|
-
let blockItemLogIndex = 16777216
|
|
560
|
-
|
|
561
628
|
/*
|
|
562
629
|
Updates fetchState with a response for a given query.
|
|
563
630
|
Returns Error if the partition with given query cannot be found (unexpected)
|
|
@@ -570,7 +637,6 @@ let handleQueryResult = (
|
|
|
570
637
|
~query: query,
|
|
571
638
|
~latestFetchedBlock: blockNumberAndTimestamp,
|
|
572
639
|
~newItems,
|
|
573
|
-
~currentBlockHeight,
|
|
574
640
|
): result<t, exn> =>
|
|
575
641
|
{
|
|
576
642
|
let partitionId = query.partitionId
|
|
@@ -623,43 +689,14 @@ let handleQueryResult = (
|
|
|
623
689
|
)
|
|
624
690
|
}
|
|
625
691
|
}->Result.map(partitions => {
|
|
626
|
-
let newQueue = fetchState.queue->Array.concat(newItems)
|
|
627
|
-
|
|
628
|
-
switch fetchState.onBlockConfigs {
|
|
629
|
-
| Some(onBlockConfigs) => {
|
|
630
|
-
let prevLatestFetchedBlockNumber = fetchState.latestFullyFetchedBlock.blockNumber
|
|
631
|
-
let nextLatestFullyFetchedBlockNumber = {
|
|
632
|
-
let nextLatestFullyFetchedBlockNumber = ref(latestFetchedBlock.blockNumber)
|
|
633
|
-
for idx in 0 to partitions->Array.length - 1 {
|
|
634
|
-
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
635
|
-
if nextLatestFullyFetchedBlockNumber.contents > p.latestFetchedBlock.blockNumber {
|
|
636
|
-
nextLatestFullyFetchedBlockNumber := p.latestFetchedBlock.blockNumber
|
|
637
|
-
}
|
|
638
|
-
}
|
|
639
|
-
nextLatestFullyFetchedBlockNumber.contents
|
|
640
|
-
}
|
|
641
|
-
|
|
642
|
-
if nextLatestFullyFetchedBlockNumber > prevLatestFetchedBlockNumber {
|
|
643
|
-
for blockNumber in prevLatestFetchedBlockNumber + 1 to nextLatestFullyFetchedBlockNumber {
|
|
644
|
-
for configIdx in 0 to onBlockConfigs->Array.length - 1 {
|
|
645
|
-
let onBlockConfig = onBlockConfigs->Js.Array2.unsafe_get(configIdx)
|
|
646
|
-
newQueue->Array.push(Block({onBlockConfig, blockNumber, logIndex: blockItemLogIndex}))
|
|
647
|
-
}
|
|
648
|
-
}
|
|
649
|
-
}
|
|
650
|
-
}
|
|
651
|
-
|
|
652
|
-
| None => ()
|
|
653
|
-
}
|
|
654
|
-
|
|
655
692
|
fetchState->updateInternal(
|
|
656
693
|
~partitions,
|
|
657
|
-
~
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
694
|
+
~mutItems=?{
|
|
695
|
+
switch newItems {
|
|
696
|
+
| [] => None
|
|
697
|
+
| _ => Some(fetchState.queue->Array.concat(newItems))
|
|
698
|
+
}
|
|
699
|
+
},
|
|
663
700
|
)
|
|
664
701
|
})
|
|
665
702
|
|
|
@@ -738,9 +775,16 @@ let isFullPartition = (p: partition, ~maxAddrInPartition) => {
|
|
|
738
775
|
}
|
|
739
776
|
|
|
740
777
|
let getNextQuery = (
|
|
741
|
-
{
|
|
778
|
+
{
|
|
779
|
+
queue,
|
|
780
|
+
partitions,
|
|
781
|
+
targetBufferSize,
|
|
782
|
+
maxAddrInPartition,
|
|
783
|
+
endBlock,
|
|
784
|
+
indexingContracts,
|
|
785
|
+
blockLag,
|
|
786
|
+
}: t,
|
|
742
787
|
~concurrencyLimit,
|
|
743
|
-
~targetBufferSize,
|
|
744
788
|
~currentBlockHeight,
|
|
745
789
|
~stateId,
|
|
746
790
|
) => {
|
|
@@ -932,19 +976,20 @@ Gets the earliest queueItem from thgetNodeEarliestEventWithUpdatedQueue.
|
|
|
932
976
|
Finds the earliest queue item across all partitions and then returns that
|
|
933
977
|
queue item with an update fetch state.
|
|
934
978
|
*/
|
|
935
|
-
let getEarliestEvent = (
|
|
936
|
-
|
|
979
|
+
let getEarliestEvent = (fetchState: t) => {
|
|
980
|
+
let {queue} = fetchState
|
|
981
|
+
switch fetchState.queue->Utils.Array.last {
|
|
937
982
|
| Some(item) =>
|
|
938
|
-
if item->Internal.getItemBlockNumber <=
|
|
983
|
+
if item->Internal.getItemBlockNumber <= fetchState->bufferBlockNumber {
|
|
939
984
|
Item({item, popItemOffQueue: () => queue->Js.Array2.pop->ignore})
|
|
940
985
|
} else {
|
|
941
986
|
NoItem({
|
|
942
|
-
latestFetchedBlock:
|
|
987
|
+
latestFetchedBlock: fetchState->bufferBlock,
|
|
943
988
|
})
|
|
944
989
|
}
|
|
945
990
|
| None =>
|
|
946
991
|
NoItem({
|
|
947
|
-
latestFetchedBlock:
|
|
992
|
+
latestFetchedBlock: fetchState->bufferBlock,
|
|
948
993
|
})
|
|
949
994
|
}
|
|
950
995
|
}
|
|
@@ -959,12 +1004,14 @@ let make = (
|
|
|
959
1004
|
~contracts: array<indexingContract>,
|
|
960
1005
|
~maxAddrInPartition,
|
|
961
1006
|
~chainId,
|
|
962
|
-
~
|
|
1007
|
+
~targetBufferSize,
|
|
1008
|
+
~progressBlockNumber=startBlock - 1,
|
|
1009
|
+
~onBlockConfigs=[],
|
|
963
1010
|
~blockLag=0,
|
|
964
1011
|
): t => {
|
|
965
1012
|
let latestFetchedBlock = {
|
|
966
1013
|
blockTimestamp: 0,
|
|
967
|
-
blockNumber:
|
|
1014
|
+
blockNumber: progressBlockNumber,
|
|
968
1015
|
}
|
|
969
1016
|
|
|
970
1017
|
let notDependingOnAddresses = []
|
|
@@ -1075,27 +1122,24 @@ let make = (
|
|
|
1075
1122
|
partitions,
|
|
1076
1123
|
nextPartitionIndex: partitions->Array.length,
|
|
1077
1124
|
contractConfigs,
|
|
1078
|
-
isFetchingAtHead: false,
|
|
1079
1125
|
maxAddrInPartition,
|
|
1080
1126
|
chainId,
|
|
1127
|
+
startBlock,
|
|
1081
1128
|
endBlock,
|
|
1082
1129
|
latestFullyFetchedBlock: latestFetchedBlock,
|
|
1130
|
+
latestOnBlockBlockNumber: progressBlockNumber,
|
|
1083
1131
|
normalSelection,
|
|
1084
1132
|
indexingContracts,
|
|
1085
1133
|
dcsToStore: None,
|
|
1086
1134
|
blockLag,
|
|
1087
1135
|
onBlockConfigs,
|
|
1136
|
+
targetBufferSize,
|
|
1088
1137
|
queue: [],
|
|
1089
1138
|
}
|
|
1090
1139
|
}
|
|
1091
1140
|
|
|
1092
1141
|
let bufferSize = ({queue}: t) => queue->Array.length
|
|
1093
1142
|
|
|
1094
|
-
/**
|
|
1095
|
-
* Returns the latest block number fetched for the lowest fetcher queue (ie the earliest un-fetched dynamic contract)
|
|
1096
|
-
*/
|
|
1097
|
-
let getLatestFullyFetchedBlock = ({latestFullyFetchedBlock}: t) => latestFullyFetchedBlock
|
|
1098
|
-
|
|
1099
1143
|
let pruneQueueFromFirstChangeEvent = (
|
|
1100
1144
|
queue: array<Internal.item>,
|
|
1101
1145
|
~firstChangeEvent: blockNumberAndLogIndex,
|
|
@@ -1121,6 +1165,7 @@ let rollbackPartition = (
|
|
|
1121
1165
|
| {selection: {dependsOnAddresses: false}} =>
|
|
1122
1166
|
Some({
|
|
1123
1167
|
...p,
|
|
1168
|
+
// FIXME: Should rollback latestFetchedBlock???
|
|
1124
1169
|
status: {
|
|
1125
1170
|
fetchingStateId: None,
|
|
1126
1171
|
},
|
|
@@ -1188,10 +1233,13 @@ let rollback = (fetchState: t, ~firstChangeEvent) => {
|
|
|
1188
1233
|
p->rollbackPartition(~firstChangeEvent, ~addressesToRemove)
|
|
1189
1234
|
)
|
|
1190
1235
|
|
|
1191
|
-
|
|
1236
|
+
{
|
|
1237
|
+
...fetchState,
|
|
1238
|
+
latestOnBlockBlockNumber: firstChangeEvent.blockNumber - 1, // TODO: This is not tested
|
|
1239
|
+
}->updateInternal(
|
|
1192
1240
|
~partitions,
|
|
1193
1241
|
~indexingContracts,
|
|
1194
|
-
~
|
|
1242
|
+
~mutItems=fetchState.queue->pruneQueueFromFirstChangeEvent(~firstChangeEvent),
|
|
1195
1243
|
~dcsToStore=switch fetchState.dcsToStore {
|
|
1196
1244
|
| Some(dcsToStore) =>
|
|
1197
1245
|
let filtered =
|
|
@@ -1209,10 +1257,10 @@ let rollback = (fetchState: t, ~firstChangeEvent) => {
|
|
|
1209
1257
|
* Returns a boolean indicating whether the fetch state is actively indexing
|
|
1210
1258
|
* used for comparing event queues in the chain manager
|
|
1211
1259
|
*/
|
|
1212
|
-
let isActivelyIndexing = ({
|
|
1260
|
+
let isActivelyIndexing = ({endBlock} as fetchState: t) => {
|
|
1213
1261
|
switch endBlock {
|
|
1214
1262
|
| Some(endBlock) =>
|
|
1215
|
-
let isPastEndblock =
|
|
1263
|
+
let isPastEndblock = fetchState->bufferBlockNumber >= endBlock
|
|
1216
1264
|
if isPastEndblock {
|
|
1217
1265
|
fetchState->bufferSize > 0
|
|
1218
1266
|
} else {
|
|
@@ -1223,26 +1271,27 @@ let isActivelyIndexing = ({latestFullyFetchedBlock, endBlock} as fetchState: t)
|
|
|
1223
1271
|
}
|
|
1224
1272
|
|
|
1225
1273
|
let isReadyToEnterReorgThreshold = (
|
|
1226
|
-
{
|
|
1274
|
+
{endBlock, blockLag, queue} as fetchState: t,
|
|
1227
1275
|
~currentBlockHeight,
|
|
1228
1276
|
) => {
|
|
1277
|
+
let bufferBlockNumber = fetchState->bufferBlockNumber
|
|
1229
1278
|
currentBlockHeight !== 0 &&
|
|
1230
1279
|
switch endBlock {
|
|
1231
|
-
| Some(endBlock) if
|
|
1232
|
-
| _ =>
|
|
1280
|
+
| Some(endBlock) if bufferBlockNumber >= endBlock => true
|
|
1281
|
+
| _ => bufferBlockNumber >= currentBlockHeight - blockLag
|
|
1233
1282
|
} &&
|
|
1234
1283
|
queue->Utils.Array.isEmpty
|
|
1235
1284
|
}
|
|
1236
1285
|
|
|
1237
1286
|
let filterAndSortForUnorderedBatch = {
|
|
1238
|
-
let hasBatchItem = ({queue
|
|
1287
|
+
let hasBatchItem = ({queue} as fetchState: t) => {
|
|
1239
1288
|
switch queue->Utils.Array.last {
|
|
1240
|
-
| Some(item) => item->Internal.getItemBlockNumber <=
|
|
1289
|
+
| Some(item) => item->Internal.getItemBlockNumber <= fetchState->bufferBlockNumber
|
|
1241
1290
|
| None => false
|
|
1242
1291
|
}
|
|
1243
1292
|
}
|
|
1244
1293
|
|
|
1245
|
-
let hasFullBatch = ({queue
|
|
1294
|
+
let hasFullBatch = ({queue} as fetchState: t, ~maxBatchSize) => {
|
|
1246
1295
|
// Queue is ordered from latest to earliest, so the earliest eligible
|
|
1247
1296
|
// item for a full batch of size B is at index (length - B).
|
|
1248
1297
|
// Do NOT subtract an extra 1 here; when length === B we should still
|
|
@@ -1254,7 +1303,7 @@ let filterAndSortForUnorderedBatch = {
|
|
|
1254
1303
|
// Unsafe can fail when maxBatchSize is 0,
|
|
1255
1304
|
// but we ignore the case
|
|
1256
1305
|
queue->Js.Array2.unsafe_get(targetBlockIdx)->Internal.getItemBlockNumber <=
|
|
1257
|
-
|
|
1306
|
+
fetchState->bufferBlockNumber
|
|
1258
1307
|
}
|
|
1259
1308
|
}
|
|
1260
1309
|
|
|
@@ -1282,18 +1331,19 @@ let filterAndSortForUnorderedBatch = {
|
|
|
1282
1331
|
}
|
|
1283
1332
|
}
|
|
1284
1333
|
|
|
1285
|
-
let getProgressBlockNumber = ({
|
|
1334
|
+
let getProgressBlockNumber = ({queue} as fetchState: t) => {
|
|
1335
|
+
let bufferBlockNumber = fetchState->bufferBlockNumber
|
|
1286
1336
|
switch queue->Utils.Array.last {
|
|
1287
|
-
| Some(item) if
|
|
1337
|
+
| Some(item) if bufferBlockNumber >= item->Internal.getItemBlockNumber =>
|
|
1288
1338
|
item->Internal.getItemBlockNumber - 1
|
|
1289
|
-
| _ =>
|
|
1339
|
+
| _ => bufferBlockNumber
|
|
1290
1340
|
}
|
|
1291
1341
|
}
|
|
1292
1342
|
|
|
1293
|
-
let getProgressNextBlockLogIndex = ({queue
|
|
1343
|
+
let getProgressNextBlockLogIndex = ({queue} as fetchState: t) => {
|
|
1294
1344
|
switch queue->Utils.Array.last {
|
|
1295
1345
|
| Some(Event({logIndex, blockNumber}))
|
|
1296
|
-
if
|
|
1346
|
+
if fetchState->bufferBlockNumber >= blockNumber && logIndex > 0 =>
|
|
1297
1347
|
Some(logIndex - 1)
|
|
1298
1348
|
| _ => None
|
|
1299
1349
|
}
|