envio 2.21.0 → 2.21.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/FetchState.res +1320 -0
- package/src/Prometheus.res +45 -1
- package/src/ReorgDetection.res +104 -56
- package/src/sources/Source.res +59 -0
- package/src/sources/SourceManager.res +493 -0
- package/src/sources/SourceManager.resi +32 -0
|
@@ -0,0 +1,1320 @@
|
|
|
1
|
+
open Belt
|
|
2
|
+
|
|
3
|
+
type dcData = {
|
|
4
|
+
registeringEventBlockTimestamp: int,
|
|
5
|
+
registeringEventLogIndex: int,
|
|
6
|
+
registeringEventContractName: string,
|
|
7
|
+
registeringEventName: string,
|
|
8
|
+
registeringEventSrcAddress: Address.t,
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
@unboxed
|
|
12
|
+
type contractRegister =
|
|
13
|
+
| Config
|
|
14
|
+
| DC(dcData)
|
|
15
|
+
type indexingContract = {
|
|
16
|
+
address: Address.t,
|
|
17
|
+
contractName: string,
|
|
18
|
+
startBlock: int,
|
|
19
|
+
register: contractRegister,
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
type contractConfig = {filterByAddresses: bool}
|
|
23
|
+
|
|
24
|
+
type blockNumberAndTimestamp = {
|
|
25
|
+
blockNumber: int,
|
|
26
|
+
blockTimestamp: int,
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
type blockNumberAndLogIndex = {blockNumber: int, logIndex: int}
|
|
30
|
+
|
|
31
|
+
type selection = {eventConfigs: array<Internal.eventConfig>, dependsOnAddresses: bool}
|
|
32
|
+
|
|
33
|
+
type status = {mutable fetchingStateId: option<int>}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
A state that holds a queue of events and data regarding what to fetch next
|
|
37
|
+
for specific contract events with a given contract address.
|
|
38
|
+
When partitions for the same events are caught up to each other
|
|
39
|
+
the are getting merged until the maxAddrInPartition is reached.
|
|
40
|
+
*/
|
|
41
|
+
type partition = {
|
|
42
|
+
id: string,
|
|
43
|
+
status: status,
|
|
44
|
+
latestFetchedBlock: blockNumberAndTimestamp,
|
|
45
|
+
selection: selection,
|
|
46
|
+
addressesByContractName: dict<array<Address.t>>,
|
|
47
|
+
//Events ordered from latest to earliest
|
|
48
|
+
fetchedEventQueue: array<Internal.eventItem>,
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
type t = {
|
|
52
|
+
partitions: array<partition>,
|
|
53
|
+
// Used for the incremental partition id. Can't use the partitions length,
|
|
54
|
+
// since partitions might be deleted on merge or cleaned up
|
|
55
|
+
nextPartitionIndex: int,
|
|
56
|
+
isFetchingAtHead: bool,
|
|
57
|
+
endBlock: option<int>,
|
|
58
|
+
maxAddrInPartition: int,
|
|
59
|
+
firstEventBlockNumber: option<int>,
|
|
60
|
+
normalSelection: selection,
|
|
61
|
+
// By address
|
|
62
|
+
indexingContracts: dict<indexingContract>,
|
|
63
|
+
// By contract name
|
|
64
|
+
contractConfigs: dict<contractConfig>,
|
|
65
|
+
// Registered dynamic contracts that need to be stored in the db
|
|
66
|
+
// Should read them at the same time when getting items for the batch
|
|
67
|
+
dcsToStore: option<array<indexingContract>>,
|
|
68
|
+
// Not used for logic - only metadata
|
|
69
|
+
chainId: int,
|
|
70
|
+
// Fields computed by updateInternal
|
|
71
|
+
latestFullyFetchedBlock: blockNumberAndTimestamp,
|
|
72
|
+
queueSize: int,
|
|
73
|
+
// How much blocks behind the head we should query
|
|
74
|
+
// Added for the purpose of avoiding reorg handling
|
|
75
|
+
blockLag: option<int>,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
let shallowCopyPartition = (p: partition) => {
|
|
79
|
+
...p,
|
|
80
|
+
fetchedEventQueue: p.fetchedEventQueue->Array.copy,
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
let copy = (fetchState: t) => {
|
|
84
|
+
let partitions = fetchState.partitions->Js.Array2.map(shallowCopyPartition)
|
|
85
|
+
{
|
|
86
|
+
maxAddrInPartition: fetchState.maxAddrInPartition,
|
|
87
|
+
partitions,
|
|
88
|
+
endBlock: fetchState.endBlock,
|
|
89
|
+
nextPartitionIndex: fetchState.nextPartitionIndex,
|
|
90
|
+
isFetchingAtHead: fetchState.isFetchingAtHead,
|
|
91
|
+
latestFullyFetchedBlock: fetchState.latestFullyFetchedBlock,
|
|
92
|
+
queueSize: fetchState.queueSize,
|
|
93
|
+
normalSelection: fetchState.normalSelection,
|
|
94
|
+
firstEventBlockNumber: fetchState.firstEventBlockNumber,
|
|
95
|
+
chainId: fetchState.chainId,
|
|
96
|
+
contractConfigs: fetchState.contractConfigs,
|
|
97
|
+
indexingContracts: fetchState.indexingContracts,
|
|
98
|
+
dcsToStore: fetchState.dcsToStore,
|
|
99
|
+
blockLag: fetchState.blockLag,
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/*
|
|
104
|
+
Comapritor for two events from the same chain. No need for chain id or timestamp
|
|
105
|
+
*/
|
|
106
|
+
let eventItemGt = (a: Internal.eventItem, b: Internal.eventItem) =>
|
|
107
|
+
if a.blockNumber > b.blockNumber {
|
|
108
|
+
true
|
|
109
|
+
} else if a.blockNumber === b.blockNumber {
|
|
110
|
+
a.logIndex > b.logIndex
|
|
111
|
+
} else {
|
|
112
|
+
false
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
/*
|
|
116
|
+
Merges two event queues on a single event fetcher
|
|
117
|
+
|
|
118
|
+
Pass the shorter list into A for better performance
|
|
119
|
+
*/
|
|
120
|
+
let mergeSortedEventList = (a, b) => Utils.Array.mergeSorted(eventItemGt, a, b)
|
|
121
|
+
|
|
122
|
+
let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) => {
|
|
123
|
+
switch (p, target) {
|
|
124
|
+
| ({selection: {dependsOnAddresses: true}}, {selection: {dependsOnAddresses: true}}) => {
|
|
125
|
+
let latestFetchedBlock = target.latestFetchedBlock
|
|
126
|
+
|
|
127
|
+
let mergedAddresses = Js.Dict.empty()
|
|
128
|
+
|
|
129
|
+
let allowedAddressesNumber = ref(maxAddrInPartition)
|
|
130
|
+
|
|
131
|
+
target.addressesByContractName->Utils.Dict.forEachWithKey((contractName, addresses) => {
|
|
132
|
+
allowedAddressesNumber := allowedAddressesNumber.contents - addresses->Array.length
|
|
133
|
+
mergedAddresses->Js.Dict.set(contractName, addresses)
|
|
134
|
+
})
|
|
135
|
+
|
|
136
|
+
// Start with putting all addresses to the merging dict
|
|
137
|
+
// And if they exceed the limit, start removing from the merging dict
|
|
138
|
+
// and putting into the rest dict
|
|
139
|
+
p.addressesByContractName->Utils.Dict.forEachWithKey((contractName, addresses) => {
|
|
140
|
+
allowedAddressesNumber := allowedAddressesNumber.contents - addresses->Array.length
|
|
141
|
+
switch mergedAddresses->Utils.Dict.dangerouslyGetNonOption(contractName) {
|
|
142
|
+
| Some(targetAddresses) =>
|
|
143
|
+
mergedAddresses->Js.Dict.set(contractName, Array.concat(targetAddresses, addresses))
|
|
144
|
+
| None => mergedAddresses->Js.Dict.set(contractName, addresses)
|
|
145
|
+
}
|
|
146
|
+
})
|
|
147
|
+
|
|
148
|
+
let rest = if allowedAddressesNumber.contents < 0 {
|
|
149
|
+
let restAddresses = Js.Dict.empty()
|
|
150
|
+
|
|
151
|
+
mergedAddresses->Utils.Dict.forEachWithKey((contractName, addresses) => {
|
|
152
|
+
if allowedAddressesNumber.contents === 0 {
|
|
153
|
+
()
|
|
154
|
+
} else if addresses->Array.length <= -allowedAddressesNumber.contents {
|
|
155
|
+
allowedAddressesNumber := allowedAddressesNumber.contents + addresses->Array.length
|
|
156
|
+
mergedAddresses->Utils.Dict.deleteInPlace(contractName)
|
|
157
|
+
restAddresses->Js.Dict.set(contractName, addresses)
|
|
158
|
+
} else {
|
|
159
|
+
let restFrom = addresses->Array.length + allowedAddressesNumber.contents
|
|
160
|
+
mergedAddresses->Js.Dict.set(
|
|
161
|
+
contractName,
|
|
162
|
+
addresses->Js.Array2.slice(~start=0, ~end_=restFrom),
|
|
163
|
+
)
|
|
164
|
+
restAddresses->Js.Dict.set(contractName, addresses->Js.Array2.sliceFrom(restFrom))
|
|
165
|
+
allowedAddressesNumber := 0
|
|
166
|
+
}
|
|
167
|
+
})
|
|
168
|
+
|
|
169
|
+
Some({
|
|
170
|
+
id: p.id,
|
|
171
|
+
status: {
|
|
172
|
+
fetchingStateId: None,
|
|
173
|
+
},
|
|
174
|
+
fetchedEventQueue: [],
|
|
175
|
+
selection: target.selection,
|
|
176
|
+
addressesByContractName: restAddresses,
|
|
177
|
+
latestFetchedBlock,
|
|
178
|
+
})
|
|
179
|
+
} else {
|
|
180
|
+
None
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
(
|
|
184
|
+
{
|
|
185
|
+
id: target.id,
|
|
186
|
+
status: {
|
|
187
|
+
fetchingStateId: None,
|
|
188
|
+
},
|
|
189
|
+
selection: target.selection,
|
|
190
|
+
addressesByContractName: mergedAddresses,
|
|
191
|
+
fetchedEventQueue: mergeSortedEventList(p.fetchedEventQueue, target.fetchedEventQueue),
|
|
192
|
+
latestFetchedBlock,
|
|
193
|
+
},
|
|
194
|
+
rest,
|
|
195
|
+
)
|
|
196
|
+
}
|
|
197
|
+
| ({selection: {dependsOnAddresses: false}}, _)
|
|
198
|
+
| (_, {selection: {dependsOnAddresses: false}}) => (p, Some(target))
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
Updates a given partition with new latest block values and new fetched
|
|
204
|
+
events.
|
|
205
|
+
*/
|
|
206
|
+
let addItemsToPartition = (
|
|
207
|
+
p: partition,
|
|
208
|
+
~latestFetchedBlock,
|
|
209
|
+
//Events ordered latest to earliest
|
|
210
|
+
~reversedNewItems: array<Internal.eventItem>,
|
|
211
|
+
) => {
|
|
212
|
+
{
|
|
213
|
+
...p,
|
|
214
|
+
status: {
|
|
215
|
+
fetchingStateId: None,
|
|
216
|
+
},
|
|
217
|
+
latestFetchedBlock,
|
|
218
|
+
fetchedEventQueue: Array.concat(reversedNewItems, p.fetchedEventQueue),
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/* strategy for TUI synced status:
|
|
223
|
+
* Firstly -> only update synced status after batch is processed (not on batch creation). But also set when a batch tries to be created and there is no batch
|
|
224
|
+
*
|
|
225
|
+
* Secondly -> reset timestampCaughtUpToHead and isFetching at head when dynamic contracts get registered to a chain if they are not within 0.001 percent of the current block height
|
|
226
|
+
*
|
|
227
|
+
* New conditions for valid synced:
|
|
228
|
+
*
|
|
229
|
+
* CASE 1 (chains are being synchronised at the head)
|
|
230
|
+
*
|
|
231
|
+
* All chain fetchers are fetching at the head AND
|
|
232
|
+
* No events that can be processed on the queue (even if events still exist on the individual queues)
|
|
233
|
+
* CASE 2 (chain finishes earlier than any other chain)
|
|
234
|
+
*
|
|
235
|
+
* CASE 3 endblock has been reached and latest processed block is greater than or equal to endblock (both fields must be Some)
|
|
236
|
+
*
|
|
237
|
+
* The given chain fetcher is fetching at the head or latest processed block >= endblock
|
|
238
|
+
* The given chain has processed all events on the queue
|
|
239
|
+
* see https://github.com/Float-Capital/indexer/pull/1388 */
|
|
240
|
+
|
|
241
|
+
/* Dynamic contracts pose a unique case when calculated whether a chain is synced or not.
|
|
242
|
+
* Specifically, in the initial syncing state from SearchingForEvents -> Synced, where although a chain has technically processed up to all blocks
|
|
243
|
+
* for a contract that emits events with dynamic contracts, it is possible that those dynamic contracts will need to be indexed from blocks way before
|
|
244
|
+
* the current block height. This is a toleration check where if there are dynamic contracts within a batch, check how far are they from the currentblock height.
|
|
245
|
+
* If it is less than 1 thousandth of a percent, then we deem that contract to be within the synced range, and therefore do not reset the synced status of the chain */
|
|
246
|
+
let checkIsWithinSyncRange = (~latestFetchedBlock: blockNumberAndTimestamp, ~currentBlockHeight) =>
|
|
247
|
+
(currentBlockHeight->Int.toFloat -. latestFetchedBlock.blockNumber->Int.toFloat) /.
|
|
248
|
+
currentBlockHeight->Int.toFloat <= 0.001
|
|
249
|
+
|
|
250
|
+
/*
|
|
251
|
+
Update fetchState, merge registers and recompute derived values
|
|
252
|
+
*/
|
|
253
|
+
let updateInternal = (
|
|
254
|
+
fetchState: t,
|
|
255
|
+
~partitions=fetchState.partitions,
|
|
256
|
+
~nextPartitionIndex=fetchState.nextPartitionIndex,
|
|
257
|
+
~firstEventBlockNumber=fetchState.firstEventBlockNumber,
|
|
258
|
+
~indexingContracts=fetchState.indexingContracts,
|
|
259
|
+
~dcsToStore=fetchState.dcsToStore,
|
|
260
|
+
~currentBlockHeight=?,
|
|
261
|
+
): t => {
|
|
262
|
+
let firstPartition = partitions->Js.Array2.unsafe_get(0)
|
|
263
|
+
|
|
264
|
+
let queueSize = ref(0)
|
|
265
|
+
let latestFullyFetchedBlock = ref(firstPartition.latestFetchedBlock)
|
|
266
|
+
|
|
267
|
+
for idx in 0 to partitions->Array.length - 1 {
|
|
268
|
+
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
269
|
+
|
|
270
|
+
let partitionQueueSize = p.fetchedEventQueue->Array.length
|
|
271
|
+
|
|
272
|
+
queueSize := queueSize.contents + partitionQueueSize
|
|
273
|
+
|
|
274
|
+
if latestFullyFetchedBlock.contents.blockNumber > p.latestFetchedBlock.blockNumber {
|
|
275
|
+
latestFullyFetchedBlock := p.latestFetchedBlock
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
let latestFullyFetchedBlock = latestFullyFetchedBlock.contents
|
|
280
|
+
|
|
281
|
+
let isFetchingAtHead = switch currentBlockHeight {
|
|
282
|
+
| None => fetchState.isFetchingAtHead
|
|
283
|
+
| Some(currentBlockHeight) =>
|
|
284
|
+
// Sync isFetchingAtHead when currentBlockHeight is provided
|
|
285
|
+
if latestFullyFetchedBlock.blockNumber >= currentBlockHeight {
|
|
286
|
+
true
|
|
287
|
+
} else if (
|
|
288
|
+
// For dc registration reset the state only when dcs are not in the sync range
|
|
289
|
+
fetchState.isFetchingAtHead &&
|
|
290
|
+
checkIsWithinSyncRange(~latestFetchedBlock=latestFullyFetchedBlock, ~currentBlockHeight)
|
|
291
|
+
) {
|
|
292
|
+
true
|
|
293
|
+
} else {
|
|
294
|
+
false
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
Prometheus.IndexingPartitions.set(
|
|
299
|
+
~partitionsCount=partitions->Array.length,
|
|
300
|
+
~chainId=fetchState.chainId,
|
|
301
|
+
)
|
|
302
|
+
Prometheus.IndexingBufferSize.set(~bufferSize=queueSize.contents, ~chainId=fetchState.chainId)
|
|
303
|
+
Prometheus.IndexingBufferBlockNumber.set(
|
|
304
|
+
~blockNumber=latestFullyFetchedBlock.blockNumber,
|
|
305
|
+
~chainId=fetchState.chainId,
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
{
|
|
309
|
+
maxAddrInPartition: fetchState.maxAddrInPartition,
|
|
310
|
+
endBlock: fetchState.endBlock,
|
|
311
|
+
contractConfigs: fetchState.contractConfigs,
|
|
312
|
+
normalSelection: fetchState.normalSelection,
|
|
313
|
+
chainId: fetchState.chainId,
|
|
314
|
+
nextPartitionIndex,
|
|
315
|
+
firstEventBlockNumber,
|
|
316
|
+
partitions,
|
|
317
|
+
isFetchingAtHead,
|
|
318
|
+
latestFullyFetchedBlock,
|
|
319
|
+
queueSize: queueSize.contents,
|
|
320
|
+
indexingContracts,
|
|
321
|
+
dcsToStore,
|
|
322
|
+
blockLag: fetchState.blockLag,
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
let numAddresses = fetchState => fetchState.indexingContracts->Js.Dict.keys->Array.length
|
|
327
|
+
|
|
328
|
+
let warnDifferentContractType = (fetchState, ~existingContract, ~dc: indexingContract) => {
|
|
329
|
+
let logger = Logging.createChild(
|
|
330
|
+
~params={
|
|
331
|
+
"chainId": fetchState.chainId,
|
|
332
|
+
"contractAddress": dc.address->Address.toString,
|
|
333
|
+
"existingContractType": existingContract.contractName,
|
|
334
|
+
"newContractType": dc.contractName,
|
|
335
|
+
},
|
|
336
|
+
)
|
|
337
|
+
logger->Logging.childWarn(`Skipping contract registration: Contract address is already registered for one contract and cannot be registered for another contract.`)
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
let registerDynamicContracts = (
|
|
341
|
+
fetchState: t,
|
|
342
|
+
// These are raw dynamic contracts received from contractRegister call.
|
|
343
|
+
// Might contain duplicates which we should filter out
|
|
344
|
+
dynamicContracts: array<indexingContract>,
|
|
345
|
+
~currentBlockHeight,
|
|
346
|
+
) => {
|
|
347
|
+
if fetchState.normalSelection.eventConfigs->Utils.Array.isEmpty {
|
|
348
|
+
// Can the normalSelection be empty?
|
|
349
|
+
// Probably only on pre-registration, but we don't
|
|
350
|
+
// register dynamic contracts during it
|
|
351
|
+
Js.Exn.raiseError(
|
|
352
|
+
"Invalid configuration. No events to fetch for the dynamic contract registration.",
|
|
353
|
+
)
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
let indexingContracts = fetchState.indexingContracts
|
|
357
|
+
let registeringContracts = Js.Dict.empty()
|
|
358
|
+
let addressesByContractName = Js.Dict.empty()
|
|
359
|
+
let earliestRegisteringEventBlockNumber = ref(%raw(`Infinity`))
|
|
360
|
+
let hasDCWithFilterByAddresses = ref(false)
|
|
361
|
+
|
|
362
|
+
for idx in 0 to dynamicContracts->Array.length - 1 {
|
|
363
|
+
let dc = dynamicContracts->Js.Array2.unsafe_get(idx)
|
|
364
|
+
switch fetchState.contractConfigs->Utils.Dict.dangerouslyGetNonOption(dc.contractName) {
|
|
365
|
+
| Some({filterByAddresses}) =>
|
|
366
|
+
// Prevent registering already indexing contracts
|
|
367
|
+
switch indexingContracts->Utils.Dict.dangerouslyGetNonOption(dc.address->Address.toString) {
|
|
368
|
+
| Some(existingContract) =>
|
|
369
|
+
// FIXME: Instead of filtering out duplicates,
|
|
370
|
+
// we should check the block number first.
|
|
371
|
+
// If new registration with earlier block number
|
|
372
|
+
// we should register it for the missing block range
|
|
373
|
+
if existingContract.contractName != dc.contractName {
|
|
374
|
+
fetchState->warnDifferentContractType(~existingContract, ~dc)
|
|
375
|
+
} else if existingContract.startBlock > dc.startBlock {
|
|
376
|
+
let logger = Logging.createChild(
|
|
377
|
+
~params={
|
|
378
|
+
"chainId": fetchState.chainId,
|
|
379
|
+
"contractAddress": dc.address->Address.toString,
|
|
380
|
+
"existingBlockNumber": existingContract.startBlock,
|
|
381
|
+
"newBlockNumber": dc.startBlock,
|
|
382
|
+
},
|
|
383
|
+
)
|
|
384
|
+
logger->Logging.childWarn(`Skipping contract registration: Contract address is already registered at a later block number. Currently registration of the same contract address is not supported by Envio. Reach out to us if it's a problem for you.`)
|
|
385
|
+
}
|
|
386
|
+
()
|
|
387
|
+
| None =>
|
|
388
|
+
let shouldUpdate = switch registeringContracts->Utils.Dict.dangerouslyGetNonOption(
|
|
389
|
+
dc.address->Address.toString,
|
|
390
|
+
) {
|
|
391
|
+
| Some(registeringContract) if registeringContract.contractName != dc.contractName =>
|
|
392
|
+
fetchState->warnDifferentContractType(~existingContract=registeringContract, ~dc)
|
|
393
|
+
false
|
|
394
|
+
| Some(registeringContract) =>
|
|
395
|
+
switch (registeringContract.register, dc.register) {
|
|
396
|
+
| (
|
|
397
|
+
DC({registeringEventLogIndex}),
|
|
398
|
+
DC({registeringEventLogIndex: newRegisteringEventLogIndex}),
|
|
399
|
+
) =>
|
|
400
|
+
// Update DC registration if the new one from the batch has an earlier registration log
|
|
401
|
+
registeringContract.startBlock > dc.startBlock ||
|
|
402
|
+
(registeringContract.startBlock === dc.startBlock &&
|
|
403
|
+
registeringEventLogIndex > newRegisteringEventLogIndex)
|
|
404
|
+
| (Config, _) | (_, Config) =>
|
|
405
|
+
Js.Exn.raiseError(
|
|
406
|
+
"Unexpected case: Config registration should be handled in a different function",
|
|
407
|
+
)
|
|
408
|
+
}
|
|
409
|
+
| None =>
|
|
410
|
+
hasDCWithFilterByAddresses := hasDCWithFilterByAddresses.contents || filterByAddresses
|
|
411
|
+
addressesByContractName->Utils.Dict.push(dc.contractName, dc.address)
|
|
412
|
+
true
|
|
413
|
+
}
|
|
414
|
+
if shouldUpdate {
|
|
415
|
+
earliestRegisteringEventBlockNumber :=
|
|
416
|
+
Pervasives.min(earliestRegisteringEventBlockNumber.contents, dc.startBlock)
|
|
417
|
+
registeringContracts->Js.Dict.set(dc.address->Address.toString, dc)
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
| None => {
|
|
421
|
+
let logger = Logging.createChild(
|
|
422
|
+
~params={
|
|
423
|
+
"chainId": fetchState.chainId,
|
|
424
|
+
"contractAddress": dc.address->Address.toString,
|
|
425
|
+
"contractName": dc.contractName,
|
|
426
|
+
},
|
|
427
|
+
)
|
|
428
|
+
logger->Logging.childWarn(`Skipping contract registration: Contract doesn't have any events to fetch.`)
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
let dcsToStore = registeringContracts->Js.Dict.values
|
|
434
|
+
switch dcsToStore {
|
|
435
|
+
// Dont update anything when everything was filter out
|
|
436
|
+
| [] => fetchState
|
|
437
|
+
| _ => {
|
|
438
|
+
let newPartitions = if (
|
|
439
|
+
// This case is more like a simple case when we need to create a single partition.
|
|
440
|
+
// Theoretically, we can only keep else, but don't want to iterate over the addresses again.
|
|
441
|
+
|
|
442
|
+
dcsToStore->Array.length <= fetchState.maxAddrInPartition &&
|
|
443
|
+
!hasDCWithFilterByAddresses.contents
|
|
444
|
+
) {
|
|
445
|
+
[
|
|
446
|
+
{
|
|
447
|
+
id: fetchState.nextPartitionIndex->Int.toString,
|
|
448
|
+
status: {
|
|
449
|
+
fetchingStateId: None,
|
|
450
|
+
},
|
|
451
|
+
latestFetchedBlock: {
|
|
452
|
+
blockNumber: earliestRegisteringEventBlockNumber.contents - 1,
|
|
453
|
+
blockTimestamp: 0,
|
|
454
|
+
},
|
|
455
|
+
selection: fetchState.normalSelection,
|
|
456
|
+
addressesByContractName,
|
|
457
|
+
fetchedEventQueue: [],
|
|
458
|
+
},
|
|
459
|
+
]
|
|
460
|
+
} else {
|
|
461
|
+
let partitions = []
|
|
462
|
+
|
|
463
|
+
let earliestRegisteringEventBlockNumber = ref(%raw(`Infinity`))
|
|
464
|
+
let pendingAddressesByContractName = ref(Js.Dict.empty())
|
|
465
|
+
let pendingCount = ref(0)
|
|
466
|
+
|
|
467
|
+
let addPartition = () =>
|
|
468
|
+
partitions->Array.push({
|
|
469
|
+
id: (fetchState.nextPartitionIndex + partitions->Array.length)->Int.toString,
|
|
470
|
+
status: {
|
|
471
|
+
fetchingStateId: None,
|
|
472
|
+
},
|
|
473
|
+
latestFetchedBlock: {
|
|
474
|
+
blockNumber: earliestRegisteringEventBlockNumber.contents - 1,
|
|
475
|
+
blockTimestamp: 0,
|
|
476
|
+
},
|
|
477
|
+
selection: fetchState.normalSelection,
|
|
478
|
+
addressesByContractName: pendingAddressesByContractName.contents,
|
|
479
|
+
fetchedEventQueue: [],
|
|
480
|
+
})
|
|
481
|
+
|
|
482
|
+
// I use for loops instead of forEach, so ReScript better inlines ref access
|
|
483
|
+
for idx in 0 to addressesByContractName->Js.Dict.keys->Array.length - 1 {
|
|
484
|
+
let contractName = addressesByContractName->Js.Dict.keys->Js.Array2.unsafe_get(idx)
|
|
485
|
+
let addresses = addressesByContractName->Js.Dict.unsafeGet(contractName)
|
|
486
|
+
|
|
487
|
+
// Can unsafely get it, because we already filtered out the contracts
|
|
488
|
+
// that don't have any events to fetch
|
|
489
|
+
let contractConfig = fetchState.contractConfigs->Js.Dict.unsafeGet(contractName)
|
|
490
|
+
|
|
491
|
+
// For this case we can't filter out events earlier than contract registration
|
|
492
|
+
// on the client side, so we need to keep the old logic of creating
|
|
493
|
+
// a partition for every block range, so there are no irrelevant events
|
|
494
|
+
if contractConfig.filterByAddresses {
|
|
495
|
+
let byStartBlock = Js.Dict.empty()
|
|
496
|
+
|
|
497
|
+
for jdx in 0 to addresses->Array.length - 1 {
|
|
498
|
+
let address = addresses->Js.Array2.unsafe_get(jdx)
|
|
499
|
+
let indexingContract =
|
|
500
|
+
registeringContracts->Js.Dict.unsafeGet(address->Address.toString)
|
|
501
|
+
|
|
502
|
+
byStartBlock->Utils.Dict.push(indexingContract.startBlock->Int.toString, address)
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
// Will be in the ASC order by Js spec
|
|
506
|
+
byStartBlock
|
|
507
|
+
->Js.Dict.keys
|
|
508
|
+
->Js.Array2.forEach(startBlockKey => {
|
|
509
|
+
let addresses = byStartBlock->Js.Dict.unsafeGet(startBlockKey)
|
|
510
|
+
let addressesByContractName = Js.Dict.empty()
|
|
511
|
+
addressesByContractName->Js.Dict.set(contractName, addresses)
|
|
512
|
+
partitions->Array.push({
|
|
513
|
+
id: (fetchState.nextPartitionIndex + partitions->Array.length)->Int.toString,
|
|
514
|
+
status: {
|
|
515
|
+
fetchingStateId: None,
|
|
516
|
+
},
|
|
517
|
+
latestFetchedBlock: {
|
|
518
|
+
blockNumber: Pervasives.max(startBlockKey->Int.fromString->Option.getExn - 1, 0),
|
|
519
|
+
blockTimestamp: 0,
|
|
520
|
+
},
|
|
521
|
+
selection: fetchState.normalSelection,
|
|
522
|
+
addressesByContractName,
|
|
523
|
+
fetchedEventQueue: [],
|
|
524
|
+
})
|
|
525
|
+
})
|
|
526
|
+
} else {
|
|
527
|
+
// The goal is to try to split partitions the way,
|
|
528
|
+
// so there are mostly addresses of the same contract in each partition
|
|
529
|
+
// TODO: Should do the same for the initial FetchState creation
|
|
530
|
+
for jdx in 0 to addresses->Array.length - 1 {
|
|
531
|
+
let address = addresses->Js.Array2.unsafe_get(jdx)
|
|
532
|
+
if pendingCount.contents === fetchState.maxAddrInPartition {
|
|
533
|
+
addPartition()
|
|
534
|
+
pendingAddressesByContractName := Js.Dict.empty()
|
|
535
|
+
pendingCount := 0
|
|
536
|
+
earliestRegisteringEventBlockNumber := %raw(`Infinity`)
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
let indexingContract =
|
|
540
|
+
registeringContracts->Js.Dict.unsafeGet(address->Address.toString)
|
|
541
|
+
|
|
542
|
+
pendingCount := pendingCount.contents + 1
|
|
543
|
+
pendingAddressesByContractName.contents->Utils.Dict.push(contractName, address)
|
|
544
|
+
earliestRegisteringEventBlockNumber :=
|
|
545
|
+
Pervasives.min(
|
|
546
|
+
earliestRegisteringEventBlockNumber.contents,
|
|
547
|
+
indexingContract.startBlock,
|
|
548
|
+
)
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
if pendingCount.contents > 0 {
|
|
554
|
+
addPartition()
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
partitions
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
Prometheus.IndexingAddresses.set(
|
|
561
|
+
~addressesCount=fetchState->numAddresses + dcsToStore->Array.length,
|
|
562
|
+
~chainId=fetchState.chainId,
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
fetchState->updateInternal(
|
|
566
|
+
~partitions=fetchState.partitions->Js.Array2.concat(newPartitions),
|
|
567
|
+
~currentBlockHeight,
|
|
568
|
+
~dcsToStore=switch fetchState.dcsToStore {
|
|
569
|
+
| Some(existingDcs) => Some(Array.concat(existingDcs, dcsToStore))
|
|
570
|
+
| None => Some(dcsToStore)
|
|
571
|
+
},
|
|
572
|
+
~indexingContracts=// We don't need registeringContracts anymore,
|
|
573
|
+
// so we can safely mixin indexingContracts in it
|
|
574
|
+
// The original indexingContracts won't be mutated
|
|
575
|
+
Utils.Dict.mergeInPlace(registeringContracts, indexingContracts),
|
|
576
|
+
~nextPartitionIndex=fetchState.nextPartitionIndex + newPartitions->Array.length,
|
|
577
|
+
)
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
type queryTarget =
|
|
583
|
+
| Head
|
|
584
|
+
| EndBlock({toBlock: int})
|
|
585
|
+
| Merge({
|
|
586
|
+
// The partition we are going to merge into
|
|
587
|
+
// It shouldn't be fetching during the query
|
|
588
|
+
intoPartitionId: string,
|
|
589
|
+
toBlock: int,
|
|
590
|
+
})
|
|
591
|
+
|
|
592
|
+
type query = {
|
|
593
|
+
partitionId: string,
|
|
594
|
+
fromBlock: int,
|
|
595
|
+
selection: selection,
|
|
596
|
+
addressesByContractName: dict<array<Address.t>>,
|
|
597
|
+
target: queryTarget,
|
|
598
|
+
indexingContracts: dict<indexingContract>,
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
exception UnexpectedPartitionNotFound({partitionId: string})
|
|
602
|
+
exception UnexpectedMergeQueryResponse({message: string})
|
|
603
|
+
|
|
604
|
+
/*
|
|
605
|
+
Updates fetchState with a response for a given query.
|
|
606
|
+
Returns Error if the partition with given query cannot be found (unexpected)
|
|
607
|
+
If MergeQuery caught up to the target partition, it triggers the merge of the partitions.
|
|
608
|
+
|
|
609
|
+
newItems are ordered earliest to latest (as they are returned from the worker)
|
|
610
|
+
*/
|
|
611
|
+
let handleQueryResult = (
|
|
612
|
+
{partitions} as fetchState: t,
|
|
613
|
+
~query: query,
|
|
614
|
+
~latestFetchedBlock: blockNumberAndTimestamp,
|
|
615
|
+
~reversedNewItems,
|
|
616
|
+
~currentBlockHeight,
|
|
617
|
+
): result<t, exn> =>
|
|
618
|
+
{
|
|
619
|
+
let partitionId = query.partitionId
|
|
620
|
+
|
|
621
|
+
switch partitions->Array.getIndexBy(p => p.id === partitionId) {
|
|
622
|
+
| Some(pIndex) =>
|
|
623
|
+
let p = partitions->Js.Array2.unsafe_get(pIndex)
|
|
624
|
+
let updatedPartition = p->addItemsToPartition(~latestFetchedBlock, ~reversedNewItems)
|
|
625
|
+
|
|
626
|
+
switch query.target {
|
|
627
|
+
| Head
|
|
628
|
+
| EndBlock(_) =>
|
|
629
|
+
Ok(partitions->Utils.Array.setIndexImmutable(pIndex, updatedPartition))
|
|
630
|
+
| Merge({intoPartitionId}) =>
|
|
631
|
+
switch partitions->Array.getIndexBy(p => p.id === intoPartitionId) {
|
|
632
|
+
| Some(targetIndex)
|
|
633
|
+
if (partitions->Js.Array2.unsafe_get(targetIndex)).latestFetchedBlock.blockNumber ===
|
|
634
|
+
latestFetchedBlock.blockNumber => {
|
|
635
|
+
let target = partitions->Js.Array2.unsafe_get(targetIndex)
|
|
636
|
+
let (merged, rest) =
|
|
637
|
+
updatedPartition->mergeIntoPartition(
|
|
638
|
+
~target,
|
|
639
|
+
~maxAddrInPartition=fetchState.maxAddrInPartition,
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
let updatedPartitions = partitions->Utils.Array.setIndexImmutable(targetIndex, merged)
|
|
643
|
+
let updatedPartitions = switch rest {
|
|
644
|
+
| Some(rest) => {
|
|
645
|
+
updatedPartitions->Js.Array2.unsafe_set(pIndex, rest)
|
|
646
|
+
updatedPartitions
|
|
647
|
+
}
|
|
648
|
+
| None => updatedPartitions->Utils.Array.removeAtIndex(pIndex)
|
|
649
|
+
}
|
|
650
|
+
Ok(updatedPartitions)
|
|
651
|
+
}
|
|
652
|
+
| _ => Ok(partitions->Utils.Array.setIndexImmutable(pIndex, updatedPartition))
|
|
653
|
+
}
|
|
654
|
+
}
|
|
655
|
+
| None =>
|
|
656
|
+
Error(
|
|
657
|
+
UnexpectedPartitionNotFound({
|
|
658
|
+
partitionId: partitionId,
|
|
659
|
+
}),
|
|
660
|
+
)
|
|
661
|
+
}
|
|
662
|
+
}->Result.map(partitions => {
|
|
663
|
+
fetchState->updateInternal(
|
|
664
|
+
~partitions,
|
|
665
|
+
~currentBlockHeight,
|
|
666
|
+
~firstEventBlockNumber=switch reversedNewItems->Utils.Array.last {
|
|
667
|
+
| Some(newFirstItem) =>
|
|
668
|
+
Utils.Math.minOptInt(fetchState.firstEventBlockNumber, Some(newFirstItem.blockNumber))
|
|
669
|
+
| None => fetchState.firstEventBlockNumber
|
|
670
|
+
},
|
|
671
|
+
)
|
|
672
|
+
})
|
|
673
|
+
|
|
674
|
+
let makePartitionQuery = (p: partition, ~indexingContracts, ~endBlock, ~mergeTarget) => {
|
|
675
|
+
let fromBlock = switch p.latestFetchedBlock.blockNumber {
|
|
676
|
+
| 0 => 0
|
|
677
|
+
| latestFetchedBlockNumber => latestFetchedBlockNumber + 1
|
|
678
|
+
}
|
|
679
|
+
switch (endBlock, mergeTarget) {
|
|
680
|
+
| (Some(endBlock), _) if fromBlock > endBlock => None
|
|
681
|
+
| (_, Some(mergeTarget)) =>
|
|
682
|
+
Some(
|
|
683
|
+
Merge({
|
|
684
|
+
toBlock: mergeTarget.latestFetchedBlock.blockNumber,
|
|
685
|
+
intoPartitionId: mergeTarget.id,
|
|
686
|
+
}),
|
|
687
|
+
)
|
|
688
|
+
| (Some(endBlock), None) => Some(EndBlock({toBlock: endBlock}))
|
|
689
|
+
| (None, None) => Some(Head)
|
|
690
|
+
}->Option.map(target => {
|
|
691
|
+
{
|
|
692
|
+
partitionId: p.id,
|
|
693
|
+
fromBlock,
|
|
694
|
+
target,
|
|
695
|
+
selection: p.selection,
|
|
696
|
+
addressesByContractName: p.addressesByContractName,
|
|
697
|
+
indexingContracts,
|
|
698
|
+
}
|
|
699
|
+
})
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
type nextQuery =
|
|
703
|
+
| ReachedMaxConcurrency
|
|
704
|
+
| WaitingForNewBlock
|
|
705
|
+
| NothingToQuery
|
|
706
|
+
| Ready(array<query>)
|
|
707
|
+
|
|
708
|
+
let startFetchingQueries = ({partitions}: t, ~queries: array<query>, ~stateId) => {
|
|
709
|
+
queries->Array.forEach(q => {
|
|
710
|
+
switch partitions->Js.Array2.find(p => p.id === q.partitionId) {
|
|
711
|
+
// Shouldn't be mutated to None anymore
|
|
712
|
+
// The status will be immutably set to the initial one when we handle response
|
|
713
|
+
| Some(p) => p.status.fetchingStateId = Some(stateId)
|
|
714
|
+
| None => Js.Exn.raiseError("Unexpected case: Couldn't find partition for the fetching query")
|
|
715
|
+
}
|
|
716
|
+
})
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
let addressesByContractNameCount = (addressesByContractName: dict<array<Address.t>>) => {
|
|
720
|
+
let numAddresses = ref(0)
|
|
721
|
+
let contractNames = addressesByContractName->Js.Dict.keys
|
|
722
|
+
for idx in 0 to contractNames->Array.length - 1 {
|
|
723
|
+
let contractName = contractNames->Js.Array2.unsafe_get(idx)
|
|
724
|
+
numAddresses :=
|
|
725
|
+
numAddresses.contents + addressesByContractName->Js.Dict.unsafeGet(contractName)->Array.length
|
|
726
|
+
}
|
|
727
|
+
numAddresses.contents
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
let addressesByContractNameGetAll = (addressesByContractName: dict<array<Address.t>>) => {
|
|
731
|
+
let all = ref([])
|
|
732
|
+
let contractNames = addressesByContractName->Js.Dict.keys
|
|
733
|
+
for idx in 0 to contractNames->Array.length - 1 {
|
|
734
|
+
let contractName = contractNames->Js.Array2.unsafe_get(idx)
|
|
735
|
+
all := all.contents->Array.concat(addressesByContractName->Js.Dict.unsafeGet(contractName))
|
|
736
|
+
}
|
|
737
|
+
all.contents
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
@inline
|
|
741
|
+
let isFullPartition = (p: partition, ~maxAddrInPartition) => {
|
|
742
|
+
switch p {
|
|
743
|
+
| {selection: {dependsOnAddresses: false}} => true
|
|
744
|
+
| _ => p.addressesByContractName->addressesByContractNameCount >= maxAddrInPartition
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
let getNextQuery = (
|
|
749
|
+
{
|
|
750
|
+
partitions,
|
|
751
|
+
maxAddrInPartition,
|
|
752
|
+
endBlock,
|
|
753
|
+
latestFullyFetchedBlock,
|
|
754
|
+
indexingContracts,
|
|
755
|
+
blockLag,
|
|
756
|
+
}: t,
|
|
757
|
+
~concurrencyLimit,
|
|
758
|
+
~maxQueueSize,
|
|
759
|
+
~currentBlockHeight,
|
|
760
|
+
~stateId,
|
|
761
|
+
) => {
|
|
762
|
+
if currentBlockHeight === 0 {
|
|
763
|
+
WaitingForNewBlock
|
|
764
|
+
} else if concurrencyLimit === 0 {
|
|
765
|
+
ReachedMaxConcurrency
|
|
766
|
+
} else {
|
|
767
|
+
let headBlock = currentBlockHeight - blockLag->Option.getWithDefault(0)
|
|
768
|
+
|
|
769
|
+
let fullPartitions = []
|
|
770
|
+
let mergingPartitions = []
|
|
771
|
+
let areMergingPartitionsFetching = ref(false)
|
|
772
|
+
let mostBehindMergingPartition = ref(None)
|
|
773
|
+
let mergingPartitionTarget = ref(None)
|
|
774
|
+
let shouldWaitForNewBlock = ref(
|
|
775
|
+
switch endBlock {
|
|
776
|
+
| Some(endBlock) => headBlock < endBlock
|
|
777
|
+
| None => true
|
|
778
|
+
},
|
|
779
|
+
)
|
|
780
|
+
|
|
781
|
+
let checkIsFetchingPartition = p => {
|
|
782
|
+
switch p.status.fetchingStateId {
|
|
783
|
+
| Some(fetchingStateId) => stateId <= fetchingStateId
|
|
784
|
+
| None => false
|
|
785
|
+
}
|
|
786
|
+
}
|
|
787
|
+
|
|
788
|
+
for idx in 0 to partitions->Js.Array2.length - 1 {
|
|
789
|
+
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
790
|
+
|
|
791
|
+
let isFetching = checkIsFetchingPartition(p)
|
|
792
|
+
let hasReachedTheHead = p.latestFetchedBlock.blockNumber >= headBlock
|
|
793
|
+
|
|
794
|
+
if isFetching || !hasReachedTheHead {
|
|
795
|
+
// Even if there are some partitions waiting for the new block
|
|
796
|
+
// We still want to wait for all partitions reaching the head
|
|
797
|
+
// because they might update currentBlockHeight in their response
|
|
798
|
+
// Also, there are cases when some partitions fetching at 50% of the chain
|
|
799
|
+
// and we don't want to poll the head for a few small partitions
|
|
800
|
+
shouldWaitForNewBlock := false
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
if p->isFullPartition(~maxAddrInPartition) {
|
|
804
|
+
fullPartitions->Array.push(p)
|
|
805
|
+
} else {
|
|
806
|
+
mergingPartitions->Array.push(p)
|
|
807
|
+
|
|
808
|
+
mostBehindMergingPartition :=
|
|
809
|
+
switch mostBehindMergingPartition.contents {
|
|
810
|
+
| Some(mostBehindMergingPartition) =>
|
|
811
|
+
if (
|
|
812
|
+
// The = check is important here. We don't want to have a target
|
|
813
|
+
// with the same latestFetchedBlock. They should be merged in separate queries
|
|
814
|
+
mostBehindMergingPartition.latestFetchedBlock.blockNumber ===
|
|
815
|
+
p.latestFetchedBlock.blockNumber
|
|
816
|
+
) {
|
|
817
|
+
mostBehindMergingPartition
|
|
818
|
+
} else if (
|
|
819
|
+
mostBehindMergingPartition.latestFetchedBlock.blockNumber <
|
|
820
|
+
p.latestFetchedBlock.blockNumber
|
|
821
|
+
) {
|
|
822
|
+
mergingPartitionTarget :=
|
|
823
|
+
switch mergingPartitionTarget.contents {
|
|
824
|
+
| Some(mergingPartitionTarget)
|
|
825
|
+
if mergingPartitionTarget.latestFetchedBlock.blockNumber <
|
|
826
|
+
p.latestFetchedBlock.blockNumber => mergingPartitionTarget
|
|
827
|
+
| _ => p
|
|
828
|
+
}->Some
|
|
829
|
+
mostBehindMergingPartition
|
|
830
|
+
} else {
|
|
831
|
+
mergingPartitionTarget := Some(mostBehindMergingPartition)
|
|
832
|
+
p
|
|
833
|
+
}
|
|
834
|
+
| None => p
|
|
835
|
+
}->Some
|
|
836
|
+
|
|
837
|
+
if isFetching {
|
|
838
|
+
areMergingPartitionsFetching := true
|
|
839
|
+
}
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
let maxPartitionQueueSize = maxQueueSize / (fullPartitions->Array.length + 1)
|
|
844
|
+
let isWithinSyncRange = checkIsWithinSyncRange(
|
|
845
|
+
~latestFetchedBlock=latestFullyFetchedBlock,
|
|
846
|
+
~currentBlockHeight,
|
|
847
|
+
)
|
|
848
|
+
let queries = []
|
|
849
|
+
|
|
850
|
+
let registerPartitionQuery = (p, ~checkQueueSize, ~mergeTarget=?) => {
|
|
851
|
+
if (
|
|
852
|
+
p->checkIsFetchingPartition->not &&
|
|
853
|
+
p.latestFetchedBlock.blockNumber < currentBlockHeight &&
|
|
854
|
+
(checkQueueSize ? p.fetchedEventQueue->Array.length < maxPartitionQueueSize : true) && (
|
|
855
|
+
isWithinSyncRange
|
|
856
|
+
? true
|
|
857
|
+
: !checkIsWithinSyncRange(~latestFetchedBlock=p.latestFetchedBlock, ~currentBlockHeight)
|
|
858
|
+
)
|
|
859
|
+
) {
|
|
860
|
+
switch p->makePartitionQuery(
|
|
861
|
+
~indexingContracts,
|
|
862
|
+
~endBlock=switch blockLag {
|
|
863
|
+
| Some(_) =>
|
|
864
|
+
switch endBlock {
|
|
865
|
+
| Some(endBlock) => Some(Pervasives.min(headBlock, endBlock))
|
|
866
|
+
// Force head block as an endBlock when blockLag is set
|
|
867
|
+
// because otherwise HyperSync might return bigger range
|
|
868
|
+
| None => Some(headBlock)
|
|
869
|
+
}
|
|
870
|
+
| None => endBlock
|
|
871
|
+
},
|
|
872
|
+
~mergeTarget,
|
|
873
|
+
) {
|
|
874
|
+
| Some(q) => queries->Array.push(q)
|
|
875
|
+
| None => ()
|
|
876
|
+
}
|
|
877
|
+
}
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
fullPartitions->Array.forEach(p => p->registerPartitionQuery(~checkQueueSize=true))
|
|
881
|
+
|
|
882
|
+
if areMergingPartitionsFetching.contents->not {
|
|
883
|
+
switch mergingPartitions {
|
|
884
|
+
| [] => ()
|
|
885
|
+
| [p] =>
|
|
886
|
+
// If there's only one non-full partition without merge target,
|
|
887
|
+
// check that it didn't exceed queue size
|
|
888
|
+
p->registerPartitionQuery(~checkQueueSize=true)
|
|
889
|
+
| _ =>
|
|
890
|
+
switch (mostBehindMergingPartition.contents, mergingPartitionTarget.contents) {
|
|
891
|
+
| (Some(p), None) =>
|
|
892
|
+
// Even though there's no merge target for the query,
|
|
893
|
+
// we still have partitions to merge, so don't check for the queue size here
|
|
894
|
+
p->registerPartitionQuery(~checkQueueSize=false)
|
|
895
|
+
| (Some(p), Some(mergeTarget)) =>
|
|
896
|
+
p->registerPartitionQuery(~checkQueueSize=false, ~mergeTarget)
|
|
897
|
+
| (None, _) =>
|
|
898
|
+
Js.Exn.raiseError("Unexpected case, should always have a most behind partition.")
|
|
899
|
+
}
|
|
900
|
+
}
|
|
901
|
+
}
|
|
902
|
+
|
|
903
|
+
if queries->Utils.Array.isEmpty {
|
|
904
|
+
if shouldWaitForNewBlock.contents {
|
|
905
|
+
WaitingForNewBlock
|
|
906
|
+
} else {
|
|
907
|
+
NothingToQuery
|
|
908
|
+
}
|
|
909
|
+
} else {
|
|
910
|
+
Ready(
|
|
911
|
+
if queries->Array.length > concurrencyLimit {
|
|
912
|
+
queries
|
|
913
|
+
->Js.Array2.sortInPlaceWith((a, b) => a.fromBlock - b.fromBlock)
|
|
914
|
+
->Js.Array2.slice(~start=0, ~end_=concurrencyLimit)
|
|
915
|
+
} else {
|
|
916
|
+
queries
|
|
917
|
+
},
|
|
918
|
+
)
|
|
919
|
+
}
|
|
920
|
+
}
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
type itemWithPopFn = {item: Internal.eventItem, popItemOffQueue: unit => unit}
|
|
924
|
+
|
|
925
|
+
/**
|
|
926
|
+
Represents a fetchState partitions head of the fetchedEventQueue as either
|
|
927
|
+
an existing item, or no item with latest fetched block data
|
|
928
|
+
*/
|
|
929
|
+
type queueItem =
|
|
930
|
+
| Item(itemWithPopFn)
|
|
931
|
+
| NoItem({latestFetchedBlock: blockNumberAndTimestamp})
|
|
932
|
+
|
|
933
|
+
let queueItemBlockNumber = (queueItem: queueItem) => {
|
|
934
|
+
switch queueItem {
|
|
935
|
+
| Item({item}) => item.blockNumber
|
|
936
|
+
| NoItem({latestFetchedBlock: {blockNumber}}) => blockNumber === 0 ? 0 : blockNumber + 1
|
|
937
|
+
}
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
let queueItemIsInReorgThreshold = (
|
|
941
|
+
queueItem: queueItem,
|
|
942
|
+
~currentBlockHeight,
|
|
943
|
+
~highestBlockBelowThreshold,
|
|
944
|
+
) => {
|
|
945
|
+
if currentBlockHeight === 0 {
|
|
946
|
+
false
|
|
947
|
+
} else {
|
|
948
|
+
switch queueItem {
|
|
949
|
+
| Item(_) => queueItem->queueItemBlockNumber > highestBlockBelowThreshold
|
|
950
|
+
| NoItem(_) => queueItem->queueItemBlockNumber > highestBlockBelowThreshold
|
|
951
|
+
}
|
|
952
|
+
}
|
|
953
|
+
}
|
|
954
|
+
|
|
955
|
+
/**
|
|
956
|
+
Simple constructor for no item from partition
|
|
957
|
+
*/
|
|
958
|
+
let makeNoItem = ({latestFetchedBlock}: partition) => NoItem({
|
|
959
|
+
latestFetchedBlock: latestFetchedBlock,
|
|
960
|
+
})
|
|
961
|
+
|
|
962
|
+
/**
|
|
963
|
+
Creates a compareable value for items and no items on partition queues.
|
|
964
|
+
Block number takes priority here. Since a latest fetched timestamp could
|
|
965
|
+
be zero from initialization of partition but a higher latest fetched block number exists
|
|
966
|
+
|
|
967
|
+
Note: on the chain manager, when comparing multi chain, the timestamp is the highest priority compare value
|
|
968
|
+
*/
|
|
969
|
+
let qItemLt = (a, b) => {
|
|
970
|
+
let aBlockNumber = a->queueItemBlockNumber
|
|
971
|
+
let bBlockNumber = b->queueItemBlockNumber
|
|
972
|
+
if aBlockNumber < bBlockNumber {
|
|
973
|
+
true
|
|
974
|
+
} else if aBlockNumber === bBlockNumber {
|
|
975
|
+
switch (a, b) {
|
|
976
|
+
| (Item(a), Item(b)) => a.item.logIndex < b.item.logIndex
|
|
977
|
+
| (NoItem(_), Item(_)) => true
|
|
978
|
+
| (Item(_), NoItem(_))
|
|
979
|
+
| (NoItem(_), NoItem(_)) => false
|
|
980
|
+
}
|
|
981
|
+
} else {
|
|
982
|
+
false
|
|
983
|
+
}
|
|
984
|
+
}
|
|
985
|
+
|
|
986
|
+
/**
|
|
987
|
+
Returns queue item WITHOUT the updated fetch state. Used for checking values
|
|
988
|
+
not updating state
|
|
989
|
+
*/
|
|
990
|
+
let getEarliestEventInPartition = (p: partition) => {
|
|
991
|
+
switch p.fetchedEventQueue->Utils.Array.last {
|
|
992
|
+
| Some(head) =>
|
|
993
|
+
Item({item: head, popItemOffQueue: () => p.fetchedEventQueue->Js.Array2.pop->ignore})
|
|
994
|
+
| None => makeNoItem(p)
|
|
995
|
+
}
|
|
996
|
+
}
|
|
997
|
+
|
|
998
|
+
/**
|
|
999
|
+
Gets the earliest queueItem from thgetNodeEarliestEventWithUpdatedQueue.
|
|
1000
|
+
|
|
1001
|
+
Finds the earliest queue item across all partitions and then returns that
|
|
1002
|
+
queue item with an update fetch state.
|
|
1003
|
+
*/
|
|
1004
|
+
let getEarliestEvent = ({partitions}: t) => {
|
|
1005
|
+
let item = ref(partitions->Js.Array2.unsafe_get(0)->getEarliestEventInPartition)
|
|
1006
|
+
for idx in 1 to partitions->Array.length - 1 {
|
|
1007
|
+
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
1008
|
+
let pItem = p->getEarliestEventInPartition
|
|
1009
|
+
if pItem->qItemLt(item.contents) {
|
|
1010
|
+
item := pItem
|
|
1011
|
+
}
|
|
1012
|
+
}
|
|
1013
|
+
item.contents
|
|
1014
|
+
}
|
|
1015
|
+
|
|
1016
|
+
/**
|
|
1017
|
+
Instantiates a fetch state with partitions for initial addresses
|
|
1018
|
+
*/
|
|
1019
|
+
let make = (
|
|
1020
|
+
~startBlock,
|
|
1021
|
+
~endBlock,
|
|
1022
|
+
~eventConfigs: array<Internal.eventConfig>,
|
|
1023
|
+
~staticContracts: dict<array<Address.t>>,
|
|
1024
|
+
~dynamicContracts: array<indexingContract>,
|
|
1025
|
+
~maxAddrInPartition,
|
|
1026
|
+
~chainId,
|
|
1027
|
+
~blockLag=?,
|
|
1028
|
+
): t => {
|
|
1029
|
+
let latestFetchedBlock = {
|
|
1030
|
+
blockTimestamp: 0,
|
|
1031
|
+
// Here's a bug that startBlock: 1 won't work
|
|
1032
|
+
blockNumber: Pervasives.max(startBlock - 1, 0),
|
|
1033
|
+
}
|
|
1034
|
+
|
|
1035
|
+
let notDependingOnAddresses = []
|
|
1036
|
+
let normalEventConfigs = []
|
|
1037
|
+
let contractNamesWithNormalEvents = Utils.Set.make()
|
|
1038
|
+
let indexingContracts = Js.Dict.empty()
|
|
1039
|
+
let contractConfigs = Js.Dict.empty()
|
|
1040
|
+
|
|
1041
|
+
eventConfigs->Array.forEach(ec => {
|
|
1042
|
+
switch contractConfigs->Utils.Dict.dangerouslyGetNonOption(ec.contractName) {
|
|
1043
|
+
| Some({filterByAddresses}) =>
|
|
1044
|
+
contractConfigs->Js.Dict.set(
|
|
1045
|
+
ec.contractName,
|
|
1046
|
+
{filterByAddresses: filterByAddresses || ec.filterByAddresses},
|
|
1047
|
+
)
|
|
1048
|
+
| None =>
|
|
1049
|
+
contractConfigs->Js.Dict.set(ec.contractName, {filterByAddresses: ec.filterByAddresses})
|
|
1050
|
+
}
|
|
1051
|
+
|
|
1052
|
+
if ec.dependsOnAddresses {
|
|
1053
|
+
normalEventConfigs->Array.push(ec)
|
|
1054
|
+
contractNamesWithNormalEvents->Utils.Set.add(ec.contractName)->ignore
|
|
1055
|
+
} else {
|
|
1056
|
+
notDependingOnAddresses->Array.push(ec)
|
|
1057
|
+
}
|
|
1058
|
+
})
|
|
1059
|
+
|
|
1060
|
+
let partitions = []
|
|
1061
|
+
|
|
1062
|
+
if notDependingOnAddresses->Array.length > 0 {
|
|
1063
|
+
partitions->Array.push({
|
|
1064
|
+
id: partitions->Array.length->Int.toString,
|
|
1065
|
+
status: {
|
|
1066
|
+
fetchingStateId: None,
|
|
1067
|
+
},
|
|
1068
|
+
latestFetchedBlock,
|
|
1069
|
+
selection: {
|
|
1070
|
+
dependsOnAddresses: false,
|
|
1071
|
+
eventConfigs: notDependingOnAddresses,
|
|
1072
|
+
},
|
|
1073
|
+
addressesByContractName: Js.Dict.empty(),
|
|
1074
|
+
fetchedEventQueue: [],
|
|
1075
|
+
})
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
let normalSelection = {
|
|
1079
|
+
dependsOnAddresses: true,
|
|
1080
|
+
eventConfigs: normalEventConfigs,
|
|
1081
|
+
}
|
|
1082
|
+
|
|
1083
|
+
switch normalEventConfigs {
|
|
1084
|
+
| [] => ()
|
|
1085
|
+
| _ => {
|
|
1086
|
+
let makePendingNormalPartition = () => {
|
|
1087
|
+
{
|
|
1088
|
+
id: partitions->Array.length->Int.toString,
|
|
1089
|
+
status: {
|
|
1090
|
+
fetchingStateId: None,
|
|
1091
|
+
},
|
|
1092
|
+
latestFetchedBlock,
|
|
1093
|
+
selection: normalSelection,
|
|
1094
|
+
addressesByContractName: Js.Dict.empty(),
|
|
1095
|
+
fetchedEventQueue: [],
|
|
1096
|
+
}
|
|
1097
|
+
}
|
|
1098
|
+
|
|
1099
|
+
let pendingNormalPartition = ref(makePendingNormalPartition())
|
|
1100
|
+
|
|
1101
|
+
let registerAddress = (contractName, address, ~dc: option<indexingContract>=?) => {
|
|
1102
|
+
let pendingPartition = pendingNormalPartition.contents
|
|
1103
|
+
switch pendingPartition.addressesByContractName->Utils.Dict.dangerouslyGetNonOption(
|
|
1104
|
+
contractName,
|
|
1105
|
+
) {
|
|
1106
|
+
| Some(addresses) => addresses->Array.push(address)
|
|
1107
|
+
| None => pendingPartition.addressesByContractName->Js.Dict.set(contractName, [address])
|
|
1108
|
+
}
|
|
1109
|
+
indexingContracts->Js.Dict.set(
|
|
1110
|
+
address->Address.toString,
|
|
1111
|
+
switch dc {
|
|
1112
|
+
| Some(dc) => dc
|
|
1113
|
+
| None => {
|
|
1114
|
+
address,
|
|
1115
|
+
contractName,
|
|
1116
|
+
startBlock,
|
|
1117
|
+
register: Config,
|
|
1118
|
+
}
|
|
1119
|
+
},
|
|
1120
|
+
)
|
|
1121
|
+
if (
|
|
1122
|
+
pendingPartition.addressesByContractName->addressesByContractNameCount ===
|
|
1123
|
+
maxAddrInPartition
|
|
1124
|
+
) {
|
|
1125
|
+
partitions->Array.push(pendingPartition)
|
|
1126
|
+
pendingNormalPartition := makePendingNormalPartition()
|
|
1127
|
+
}
|
|
1128
|
+
}
|
|
1129
|
+
|
|
1130
|
+
staticContracts
|
|
1131
|
+
->Js.Dict.entries
|
|
1132
|
+
->Array.forEach(((contractName, addresses)) => {
|
|
1133
|
+
if contractNamesWithNormalEvents->Utils.Set.has(contractName) {
|
|
1134
|
+
addresses->Array.forEach(a => {
|
|
1135
|
+
registerAddress(contractName, a)
|
|
1136
|
+
})
|
|
1137
|
+
}
|
|
1138
|
+
})
|
|
1139
|
+
|
|
1140
|
+
dynamicContracts->Array.forEach(dc => {
|
|
1141
|
+
let contractName = dc.contractName
|
|
1142
|
+
if contractNamesWithNormalEvents->Utils.Set.has(contractName) {
|
|
1143
|
+
registerAddress(contractName, dc.address, ~dc)
|
|
1144
|
+
}
|
|
1145
|
+
})
|
|
1146
|
+
|
|
1147
|
+
if pendingNormalPartition.contents.addressesByContractName->addressesByContractNameCount > 0 {
|
|
1148
|
+
partitions->Array.push(pendingNormalPartition.contents)
|
|
1149
|
+
}
|
|
1150
|
+
}
|
|
1151
|
+
}
|
|
1152
|
+
|
|
1153
|
+
if partitions->Array.length === 0 {
|
|
1154
|
+
Js.Exn.raiseError(
|
|
1155
|
+
"Invalid configuration: Nothing to fetch. Make sure that you provided at least one contract address to index, or have events with Wildcard mode enabled.",
|
|
1156
|
+
)
|
|
1157
|
+
}
|
|
1158
|
+
|
|
1159
|
+
let numAddresses = indexingContracts->Js.Dict.keys->Array.length
|
|
1160
|
+
Prometheus.IndexingAddresses.set(~addressesCount=numAddresses, ~chainId)
|
|
1161
|
+
Prometheus.IndexingPartitions.set(~partitionsCount=partitions->Array.length, ~chainId)
|
|
1162
|
+
Prometheus.IndexingBufferSize.set(~bufferSize=0, ~chainId)
|
|
1163
|
+
Prometheus.IndexingBufferBlockNumber.set(~blockNumber=latestFetchedBlock.blockNumber, ~chainId)
|
|
1164
|
+
switch endBlock {
|
|
1165
|
+
| Some(endBlock) => Prometheus.IndexingEndBlock.set(~endBlock, ~chainId)
|
|
1166
|
+
| None => ()
|
|
1167
|
+
}
|
|
1168
|
+
|
|
1169
|
+
{
|
|
1170
|
+
partitions,
|
|
1171
|
+
nextPartitionIndex: partitions->Array.length,
|
|
1172
|
+
contractConfigs,
|
|
1173
|
+
isFetchingAtHead: false,
|
|
1174
|
+
maxAddrInPartition,
|
|
1175
|
+
chainId,
|
|
1176
|
+
endBlock,
|
|
1177
|
+
latestFullyFetchedBlock: latestFetchedBlock,
|
|
1178
|
+
queueSize: 0,
|
|
1179
|
+
firstEventBlockNumber: None,
|
|
1180
|
+
normalSelection,
|
|
1181
|
+
indexingContracts,
|
|
1182
|
+
dcsToStore: None,
|
|
1183
|
+
blockLag,
|
|
1184
|
+
}
|
|
1185
|
+
}
|
|
1186
|
+
|
|
1187
|
+
let queueSize = ({queueSize}: t) => queueSize
|
|
1188
|
+
|
|
1189
|
+
/**
|
|
1190
|
+
* Returns the latest block number fetched for the lowest fetcher queue (ie the earliest un-fetched dynamic contract)
|
|
1191
|
+
*/
|
|
1192
|
+
let getLatestFullyFetchedBlock = ({latestFullyFetchedBlock}: t) => latestFullyFetchedBlock
|
|
1193
|
+
|
|
1194
|
+
let pruneQueueFromFirstChangeEvent = (
|
|
1195
|
+
queue: array<Internal.eventItem>,
|
|
1196
|
+
~firstChangeEvent: blockNumberAndLogIndex,
|
|
1197
|
+
) => {
|
|
1198
|
+
queue->Array.keep(item =>
|
|
1199
|
+
(item.blockNumber, item.logIndex) < (firstChangeEvent.blockNumber, firstChangeEvent.logIndex)
|
|
1200
|
+
)
|
|
1201
|
+
}
|
|
1202
|
+
|
|
1203
|
+
/**
|
|
1204
|
+
Rolls back partitions to the given valid block
|
|
1205
|
+
*/
|
|
1206
|
+
let rollbackPartition = (
|
|
1207
|
+
p: partition,
|
|
1208
|
+
~firstChangeEvent: blockNumberAndLogIndex,
|
|
1209
|
+
~addressesToRemove,
|
|
1210
|
+
) => {
|
|
1211
|
+
switch p {
|
|
1212
|
+
| {selection: {dependsOnAddresses: false}} =>
|
|
1213
|
+
Some({
|
|
1214
|
+
...p,
|
|
1215
|
+
status: {
|
|
1216
|
+
fetchingStateId: None,
|
|
1217
|
+
},
|
|
1218
|
+
})
|
|
1219
|
+
| {addressesByContractName} =>
|
|
1220
|
+
let rollbackedAddressesByContractName = Js.Dict.empty()
|
|
1221
|
+
addressesByContractName->Utils.Dict.forEachWithKey((contractName, addresses) => {
|
|
1222
|
+
let keptAddresses =
|
|
1223
|
+
addresses->Array.keep(address => !(addressesToRemove->Utils.Set.has(address)))
|
|
1224
|
+
if keptAddresses->Array.length > 0 {
|
|
1225
|
+
rollbackedAddressesByContractName->Js.Dict.set(contractName, keptAddresses)
|
|
1226
|
+
}
|
|
1227
|
+
})
|
|
1228
|
+
|
|
1229
|
+
if rollbackedAddressesByContractName->Js.Dict.keys->Array.length === 0 {
|
|
1230
|
+
None
|
|
1231
|
+
} else {
|
|
1232
|
+
let shouldRollbackFetched = p.latestFetchedBlock.blockNumber >= firstChangeEvent.blockNumber
|
|
1233
|
+
|
|
1234
|
+
let fetchedEventQueue = if shouldRollbackFetched {
|
|
1235
|
+
p.fetchedEventQueue->pruneQueueFromFirstChangeEvent(~firstChangeEvent)
|
|
1236
|
+
} else {
|
|
1237
|
+
p.fetchedEventQueue
|
|
1238
|
+
}
|
|
1239
|
+
|
|
1240
|
+
Some({
|
|
1241
|
+
id: p.id,
|
|
1242
|
+
selection: p.selection,
|
|
1243
|
+
status: {
|
|
1244
|
+
fetchingStateId: None,
|
|
1245
|
+
},
|
|
1246
|
+
addressesByContractName: rollbackedAddressesByContractName,
|
|
1247
|
+
fetchedEventQueue,
|
|
1248
|
+
latestFetchedBlock: shouldRollbackFetched
|
|
1249
|
+
? {
|
|
1250
|
+
blockNumber: Pervasives.max(firstChangeEvent.blockNumber - 1, 0),
|
|
1251
|
+
blockTimestamp: 0,
|
|
1252
|
+
}
|
|
1253
|
+
: p.latestFetchedBlock,
|
|
1254
|
+
})
|
|
1255
|
+
}
|
|
1256
|
+
}
|
|
1257
|
+
}
|
|
1258
|
+
|
|
1259
|
+
let rollback = (fetchState: t, ~firstChangeEvent) => {
|
|
1260
|
+
let addressesToRemove = Utils.Set.make()
|
|
1261
|
+
let indexingContracts = Js.Dict.empty()
|
|
1262
|
+
|
|
1263
|
+
fetchState.indexingContracts
|
|
1264
|
+
->Js.Dict.keys
|
|
1265
|
+
->Array.forEach(address => {
|
|
1266
|
+
let indexingContract = fetchState.indexingContracts->Js.Dict.unsafeGet(address)
|
|
1267
|
+
if (
|
|
1268
|
+
switch indexingContract {
|
|
1269
|
+
| {register: Config} => true
|
|
1270
|
+
| {register: DC(dc)} =>
|
|
1271
|
+
indexingContract.startBlock < firstChangeEvent.blockNumber ||
|
|
1272
|
+
(indexingContract.startBlock === firstChangeEvent.blockNumber &&
|
|
1273
|
+
dc.registeringEventLogIndex < firstChangeEvent.logIndex)
|
|
1274
|
+
}
|
|
1275
|
+
) {
|
|
1276
|
+
indexingContracts->Js.Dict.set(address, indexingContract)
|
|
1277
|
+
} else {
|
|
1278
|
+
//If the registration block is later than the first change event,
|
|
1279
|
+
//Do not keep it and add to the removed addresses
|
|
1280
|
+
let _ = addressesToRemove->Utils.Set.add(address->Address.unsafeFromString)
|
|
1281
|
+
}
|
|
1282
|
+
})
|
|
1283
|
+
|
|
1284
|
+
let partitions =
|
|
1285
|
+
fetchState.partitions->Array.keepMap(p =>
|
|
1286
|
+
p->rollbackPartition(~firstChangeEvent, ~addressesToRemove)
|
|
1287
|
+
)
|
|
1288
|
+
|
|
1289
|
+
fetchState->updateInternal(
|
|
1290
|
+
~partitions,
|
|
1291
|
+
~indexingContracts,
|
|
1292
|
+
~dcsToStore=switch fetchState.dcsToStore {
|
|
1293
|
+
| Some(dcsToStore) =>
|
|
1294
|
+
let filtered =
|
|
1295
|
+
dcsToStore->Js.Array2.filter(dc => !(addressesToRemove->Utils.Set.has(dc.address)))
|
|
1296
|
+
switch filtered {
|
|
1297
|
+
| [] => None
|
|
1298
|
+
| _ => Some(filtered)
|
|
1299
|
+
}
|
|
1300
|
+
| None => None
|
|
1301
|
+
},
|
|
1302
|
+
)
|
|
1303
|
+
}
|
|
1304
|
+
|
|
1305
|
+
/**
|
|
1306
|
+
* Returns a boolean indicating whether the fetch state is actively indexing
|
|
1307
|
+
* used for comparing event queues in the chain manager
|
|
1308
|
+
*/
|
|
1309
|
+
let isActivelyIndexing = ({latestFullyFetchedBlock, endBlock} as fetchState: t) => {
|
|
1310
|
+
switch endBlock {
|
|
1311
|
+
| Some(endBlock) =>
|
|
1312
|
+
let isPastEndblock = latestFullyFetchedBlock.blockNumber >= endBlock
|
|
1313
|
+
if isPastEndblock {
|
|
1314
|
+
fetchState->queueSize > 0
|
|
1315
|
+
} else {
|
|
1316
|
+
true
|
|
1317
|
+
}
|
|
1318
|
+
| None => true
|
|
1319
|
+
}
|
|
1320
|
+
}
|