envio 3.0.0-alpha.2 → 3.0.0-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/evm.schema.json +44 -33
  2. package/fuel.schema.json +32 -21
  3. package/index.d.ts +1 -0
  4. package/package.json +7 -6
  5. package/src/Batch.res.mjs +1 -1
  6. package/src/Benchmark.res +394 -0
  7. package/src/Benchmark.res.mjs +398 -0
  8. package/src/ChainFetcher.res +459 -0
  9. package/src/ChainFetcher.res.mjs +281 -0
  10. package/src/ChainManager.res +179 -0
  11. package/src/ChainManager.res.mjs +139 -0
  12. package/src/Config.res +15 -1
  13. package/src/Config.res.mjs +27 -4
  14. package/src/Ecosystem.res +9 -124
  15. package/src/Ecosystem.res.mjs +19 -160
  16. package/src/Env.res +0 -1
  17. package/src/Env.res.mjs +0 -3
  18. package/src/Envio.gen.ts +9 -1
  19. package/src/Envio.res +12 -9
  20. package/src/EventProcessing.res +476 -0
  21. package/src/EventProcessing.res.mjs +341 -0
  22. package/src/FetchState.res +54 -29
  23. package/src/FetchState.res.mjs +62 -35
  24. package/src/GlobalState.res +1169 -0
  25. package/src/GlobalState.res.mjs +1196 -0
  26. package/src/Internal.res +2 -1
  27. package/src/LoadLayer.res +444 -0
  28. package/src/LoadLayer.res.mjs +296 -0
  29. package/src/LoadLayer.resi +32 -0
  30. package/src/Prometheus.res +8 -8
  31. package/src/Prometheus.res.mjs +10 -10
  32. package/src/ReorgDetection.res +6 -10
  33. package/src/ReorgDetection.res.mjs +6 -6
  34. package/src/UserContext.res +356 -0
  35. package/src/UserContext.res.mjs +238 -0
  36. package/src/bindings/DateFns.res +71 -0
  37. package/src/bindings/DateFns.res.mjs +22 -0
  38. package/src/sources/Evm.res +87 -0
  39. package/src/sources/Evm.res.mjs +105 -0
  40. package/src/sources/EvmChain.res +95 -0
  41. package/src/sources/EvmChain.res.mjs +61 -0
  42. package/src/sources/Fuel.res +19 -34
  43. package/src/sources/Fuel.res.mjs +34 -16
  44. package/src/sources/FuelSDK.res +37 -0
  45. package/src/sources/FuelSDK.res.mjs +29 -0
  46. package/src/sources/HyperFuel.res +2 -2
  47. package/src/sources/HyperFuel.resi +1 -1
  48. package/src/sources/HyperFuelClient.res +2 -2
  49. package/src/sources/HyperFuelSource.res +8 -8
  50. package/src/sources/HyperFuelSource.res.mjs +5 -5
  51. package/src/sources/HyperSyncSource.res +5 -5
  52. package/src/sources/HyperSyncSource.res.mjs +5 -5
  53. package/src/sources/RpcSource.res +4 -4
  54. package/src/sources/RpcSource.res.mjs +3 -3
  55. package/src/sources/Solana.res +59 -0
  56. package/src/sources/Solana.res.mjs +79 -0
  57. package/src/sources/Source.res +2 -2
  58. package/src/sources/SourceManager.res +24 -32
  59. package/src/sources/SourceManager.res.mjs +20 -20
  60. package/src/sources/SourceManager.resi +4 -5
@@ -0,0 +1,1169 @@
1
+ open Belt
2
+
3
+ type chain = ChainMap.Chain.t
4
+ type rollbackState =
5
+ | NoRollback
6
+ | ReorgDetected({chain: chain, blockNumber: int})
7
+ | FindingReorgDepth
8
+ | FoundReorgDepth({chain: chain, rollbackTargetBlockNumber: int})
9
+ | RollbackReady({diffInMemoryStore: InMemoryStore.t, eventsProcessedDiffByChain: dict<int>})
10
+
11
+ module WriteThrottlers = {
12
+ type t = {
13
+ chainMetaData: Throttler.t,
14
+ pruneStaleEntityHistory: Throttler.t,
15
+ }
16
+ let make = (): t => {
17
+ let chainMetaData = {
18
+ let intervalMillis = Env.ThrottleWrites.chainMetadataIntervalMillis
19
+ let logger = Logging.createChild(
20
+ ~params={
21
+ "context": "Throttler for chain metadata writes",
22
+ "intervalMillis": intervalMillis,
23
+ },
24
+ )
25
+ Throttler.make(~intervalMillis, ~logger)
26
+ }
27
+
28
+ let pruneStaleEntityHistory = {
29
+ let intervalMillis = Env.ThrottleWrites.pruneStaleDataIntervalMillis
30
+ let logger = Logging.createChild(
31
+ ~params={
32
+ "context": "Throttler for pruning stale entity history data",
33
+ "intervalMillis": intervalMillis,
34
+ },
35
+ )
36
+ Throttler.make(~intervalMillis, ~logger)
37
+ }
38
+ {chainMetaData, pruneStaleEntityHistory}
39
+ }
40
+ }
41
+
42
+ type t = {
43
+ indexer: Indexer.t,
44
+ chainManager: ChainManager.t,
45
+ processedBatches: int,
46
+ currentlyProcessingBatch: bool,
47
+ rollbackState: rollbackState,
48
+ indexerStartTime: Js.Date.t,
49
+ writeThrottlers: WriteThrottlers.t,
50
+ loadManager: LoadManager.t,
51
+ keepProcessAlive: bool,
52
+ //Initialized as 0, increments, when rollbacks occur to invalidate
53
+ //responses based on the wrong stateId
54
+ id: int,
55
+ }
56
+
57
+ let make = (
58
+ ~indexer: Indexer.t,
59
+ ~chainManager: ChainManager.t,
60
+ ~isDevelopmentMode=false,
61
+ ~shouldUseTui=false,
62
+ ) => {
63
+ {
64
+ indexer,
65
+ currentlyProcessingBatch: false,
66
+ processedBatches: 0,
67
+ chainManager,
68
+ indexerStartTime: Js.Date.make(),
69
+ rollbackState: NoRollback,
70
+ writeThrottlers: WriteThrottlers.make(),
71
+ loadManager: LoadManager.make(),
72
+ keepProcessAlive: isDevelopmentMode || shouldUseTui,
73
+ id: 0,
74
+ }
75
+ }
76
+
77
+ let getId = self => self.id
78
+ let incrementId = self => {...self, id: self.id + 1}
79
+ let setChainManager = (self, chainManager) => {
80
+ ...self,
81
+ chainManager,
82
+ }
83
+
84
+ let isPreparingRollback = state =>
85
+ switch state.rollbackState {
86
+ | NoRollback
87
+ | // We already updated fetch states here
88
+ // so we treat it as not rolling back
89
+ RollbackReady(_) => false
90
+ | FindingReorgDepth
91
+ | ReorgDetected(_)
92
+ | FoundReorgDepth(_) => true
93
+ }
94
+
95
+ type partitionQueryResponse = {
96
+ chain: chain,
97
+ response: Source.blockRangeFetchResponse,
98
+ query: FetchState.query,
99
+ }
100
+
101
+ type shouldExit = ExitWithSuccess | NoExit
102
+
103
+ // Need to dispatch an action for every async operation
104
+ // to get access to the latest state.
105
+ type action =
106
+ // After a response is received, we validate it with the new state
107
+ // if there's no reorg to continue processing the response.
108
+ | ValidatePartitionQueryResponse(partitionQueryResponse)
109
+ // This should be a separate action from ValidatePartitionQueryResponse
110
+ // because when processing the response, there might be an async contract registration.
111
+ // So after it's finished we dispatch the submit action to get the latest fetch state.
112
+ | SubmitPartitionQueryResponse({
113
+ newItems: array<Internal.item>,
114
+ newItemsWithDcs: array<Internal.item>,
115
+ knownHeight: int,
116
+ latestFetchedBlock: FetchState.blockNumberAndTimestamp,
117
+ query: FetchState.query,
118
+ chain: chain,
119
+ })
120
+ | FinishWaitingForNewBlock({chain: chain, knownHeight: int})
121
+ | EventBatchProcessed({batch: Batch.t})
122
+ | StartProcessingBatch
123
+ | StartFindingReorgDepth
124
+ | FindReorgDepth({chain: chain, rollbackTargetBlockNumber: int})
125
+ | EnterReorgThreshold
126
+ | UpdateQueues({
127
+ progressedChainsById: dict<Batch.chainAfterBatch>,
128
+ // Needed to prevent overwriting the blockLag
129
+ // set by EnterReorgThreshold
130
+ shouldEnterReorgThreshold: bool,
131
+ })
132
+ | SuccessExit
133
+ | ErrorExit(ErrorHandling.t)
134
+ | SetRollbackState({
135
+ diffInMemoryStore: InMemoryStore.t,
136
+ rollbackedChainManager: ChainManager.t,
137
+ eventsProcessedDiffByChain: dict<int>,
138
+ })
139
+
140
+ type queryChain = CheckAllChains | Chain(chain)
141
+ type task =
142
+ | NextQuery(queryChain)
143
+ | ProcessPartitionQueryResponse(partitionQueryResponse)
144
+ | ProcessEventBatch
145
+ | UpdateChainMetaDataAndCheckForExit(shouldExit)
146
+ | Rollback
147
+ | PruneStaleEntityHistory
148
+
149
+ let updateChainMetadataTable = (
150
+ cm: ChainManager.t,
151
+ ~persistence: Persistence.t,
152
+ ~throttler: Throttler.t,
153
+ ) => {
154
+ let chainsData: dict<InternalTable.Chains.metaFields> = Js.Dict.empty()
155
+
156
+ cm.chainFetchers
157
+ ->ChainMap.values
158
+ ->Belt.Array.forEach(cf => {
159
+ chainsData->Js.Dict.set(
160
+ cf.chainConfig.id->Belt.Int.toString,
161
+ {
162
+ blockHeight: cf.fetchState.knownHeight,
163
+ firstEventBlockNumber: cf.firstEventBlockNumber->Js.Null.fromOption,
164
+ isHyperSync: (cf.sourceManager->SourceManager.getActiveSource).poweredByHyperSync,
165
+ latestFetchedBlockNumber: cf.fetchState->FetchState.bufferBlockNumber,
166
+ timestampCaughtUpToHeadOrEndblock: cf.timestampCaughtUpToHeadOrEndblock->Js.Null.fromOption,
167
+ numBatchesFetched: cf.numBatchesFetched,
168
+ },
169
+ )
170
+ })
171
+
172
+ //Don't await this set, it can happen in its own time
173
+ throttler->Throttler.schedule(() =>
174
+ persistence.storage.setChainMeta(chainsData)->Promise.ignoreValue
175
+ )
176
+ }
177
+
178
+ /**
179
+ Takes in a chain manager and sets all chains timestamp caught up to head
180
+ when valid state lines up and returns an updated chain manager
181
+ */
182
+ let updateProgressedChains = (
183
+ chainManager: ChainManager.t,
184
+ ~batch: Batch.t,
185
+ ~indexer: Indexer.t,
186
+ ) => {
187
+ Prometheus.ProgressBatchCount.increment()
188
+
189
+ let nextQueueItemIsNone = chainManager->ChainManager.nextItemIsNone
190
+
191
+ let allChainsAtHead = chainManager->ChainManager.isProgressAtHead
192
+ //Update the timestampCaughtUpToHeadOrEndblock values
193
+ let chainFetchers = chainManager.chainFetchers->ChainMap.map(cf => {
194
+ let chain = ChainMap.Chain.makeUnsafe(~chainId=cf.chainConfig.id)
195
+
196
+ let maybeChainAfterBatch =
197
+ batch.progressedChainsById->Utils.Dict.dangerouslyGetByIntNonOption(
198
+ chain->ChainMap.Chain.toChainId,
199
+ )
200
+
201
+ let cf = switch maybeChainAfterBatch {
202
+ | Some(chainAfterBatch) => {
203
+ if cf.committedProgressBlockNumber !== chainAfterBatch.progressBlockNumber {
204
+ Prometheus.ProgressBlockNumber.set(
205
+ ~blockNumber=chainAfterBatch.progressBlockNumber,
206
+ ~chainId=chain->ChainMap.Chain.toChainId,
207
+ )
208
+ }
209
+ if cf.numEventsProcessed !== chainAfterBatch.totalEventsProcessed {
210
+ Prometheus.ProgressEventsCount.set(
211
+ ~processedCount=chainAfterBatch.totalEventsProcessed,
212
+ ~chainId=chain->ChainMap.Chain.toChainId,
213
+ )
214
+ }
215
+
216
+ // Calculate and set latency metrics
217
+ switch batch->Batch.findLastEventItem(~chainId=chain->ChainMap.Chain.toChainId) {
218
+ | Some(eventItem) => {
219
+ let blockTimestamp = eventItem.event.block->indexer.config.ecosystem.getTimestamp
220
+ let currentTimeMs = Js.Date.now()->Float.toInt
221
+ let blockTimestampMs = blockTimestamp * 1000
222
+ let latencyMs = currentTimeMs - blockTimestampMs
223
+
224
+ Prometheus.ProgressLatency.set(~latencyMs, ~chainId=chain->ChainMap.Chain.toChainId)
225
+ }
226
+ | None => ()
227
+ }
228
+
229
+ {
230
+ ...cf,
231
+ // Since we process per chain always in order,
232
+ // we need to calculate it once, by using the first item in a batch
233
+ firstEventBlockNumber: switch cf.firstEventBlockNumber {
234
+ | Some(_) => cf.firstEventBlockNumber
235
+ | None => batch->Batch.findFirstEventBlockNumber(~chainId=chain->ChainMap.Chain.toChainId)
236
+ },
237
+ committedProgressBlockNumber: chainAfterBatch.progressBlockNumber,
238
+ numEventsProcessed: chainAfterBatch.totalEventsProcessed,
239
+ isProgressAtHead: cf.isProgressAtHead || chainAfterBatch.isProgressAtHeadWhenBatchCreated,
240
+ safeCheckpointTracking: switch cf.safeCheckpointTracking {
241
+ | Some(safeCheckpointTracking) =>
242
+ Some(
243
+ safeCheckpointTracking->SafeCheckpointTracking.updateOnNewBatch(
244
+ ~sourceBlockNumber=cf.fetchState.knownHeight,
245
+ ~chainId=chain->ChainMap.Chain.toChainId,
246
+ ~batchCheckpointIds=batch.checkpointIds,
247
+ ~batchCheckpointBlockNumbers=batch.checkpointBlockNumbers,
248
+ ~batchCheckpointChainIds=batch.checkpointChainIds,
249
+ ),
250
+ )
251
+ | None => None
252
+ },
253
+ }
254
+ }
255
+ | None => cf
256
+ }
257
+
258
+ /* strategy for TUI synced status:
259
+ * Firstly -> only update synced status after batch is processed (not on batch creation). But also set when a batch tries to be created and there is no batch
260
+ *
261
+ * Secondly -> reset timestampCaughtUpToHead and isFetching at head when dynamic contracts get registered to a chain if they are not within 0.001 percent of the current block height
262
+ *
263
+ * New conditions for valid synced:
264
+ *
265
+ * CASE 1 (chains are being synchronised at the head)
266
+ *
267
+ * All chain fetchers are fetching at the head AND
268
+ * No events that can be processed on the queue (even if events still exist on the individual queues)
269
+ * CASE 2 (chain finishes earlier than any other chain)
270
+ *
271
+ * CASE 3 endblock has been reached and latest processed block is greater than or equal to endblock (both fields must be Some)
272
+ *
273
+ * The given chain fetcher is fetching at the head or latest processed block >= endblock
274
+ * The given chain has processed all events on the queue
275
+ * see https://github.com/Float-Capital/indexer/pull/1388 */
276
+ if cf->ChainFetcher.hasProcessedToEndblock {
277
+ // in the case this is already set, don't reset and instead propagate the existing value
278
+ let timestampCaughtUpToHeadOrEndblock =
279
+ cf.timestampCaughtUpToHeadOrEndblock->Option.isSome
280
+ ? cf.timestampCaughtUpToHeadOrEndblock
281
+ : Js.Date.make()->Some
282
+ {
283
+ ...cf,
284
+ timestampCaughtUpToHeadOrEndblock,
285
+ }
286
+ } else if cf.timestampCaughtUpToHeadOrEndblock->Option.isNone && cf.isProgressAtHead {
287
+ //Only calculate and set timestampCaughtUpToHeadOrEndblock if chain fetcher is at the head and
288
+ //its not already set
289
+ //CASE1
290
+ //All chains are caught up to head chainManager queue returns None
291
+ //Meaning we are busy synchronizing chains at the head
292
+ if nextQueueItemIsNone && allChainsAtHead {
293
+ {
294
+ ...cf,
295
+ timestampCaughtUpToHeadOrEndblock: Js.Date.make()->Some,
296
+ }
297
+ } else {
298
+ //CASE2 -> Only calculate if case1 fails
299
+ //All events have been processed on the chain fetchers queue
300
+ //Other chains may be busy syncing
301
+ let hasNoMoreEventsToProcess = cf->ChainFetcher.hasNoMoreEventsToProcess
302
+
303
+ if hasNoMoreEventsToProcess {
304
+ {
305
+ ...cf,
306
+ timestampCaughtUpToHeadOrEndblock: Js.Date.make()->Some,
307
+ }
308
+ } else {
309
+ //Default to just returning cf
310
+ cf
311
+ }
312
+ }
313
+ } else {
314
+ //Default to just returning cf
315
+ cf
316
+ }
317
+ })
318
+
319
+ let allChainsSyncedAtHead =
320
+ chainFetchers
321
+ ->ChainMap.values
322
+ ->Array.every(cf => cf.timestampCaughtUpToHeadOrEndblock->Option.isSome)
323
+
324
+ if allChainsSyncedAtHead {
325
+ Prometheus.setAllChainsSyncedToHead()
326
+ }
327
+
328
+ {
329
+ ...chainManager,
330
+ committedCheckpointId: switch batch.checkpointIds->Utils.Array.last {
331
+ | Some(checkpointId) => checkpointId
332
+ | None => chainManager.committedCheckpointId
333
+ },
334
+ chainFetchers,
335
+ }
336
+ }
337
+
338
+ let validatePartitionQueryResponse = (
339
+ state,
340
+ {chain, response, query} as partitionQueryResponse: partitionQueryResponse,
341
+ ) => {
342
+ let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(chain)
343
+ let {
344
+ parsedQueueItems,
345
+ latestFetchedBlockNumber,
346
+ stats,
347
+ knownHeight,
348
+ reorgGuard,
349
+ fromBlockQueried,
350
+ } = response
351
+
352
+ if knownHeight > chainFetcher.fetchState.knownHeight {
353
+ Prometheus.SourceHeight.set(
354
+ ~blockNumber=knownHeight,
355
+ ~chainId=chainFetcher.chainConfig.id,
356
+ // The knownHeight from response won't necessarily
357
+ // belong to the currently active source.
358
+ // But for simplicity, assume it does.
359
+ ~sourceName=(chainFetcher.sourceManager->SourceManager.getActiveSource).name,
360
+ )
361
+ }
362
+
363
+ if Env.Benchmark.shouldSaveData {
364
+ Benchmark.addBlockRangeFetched(
365
+ ~totalTimeElapsed=stats.totalTimeElapsed,
366
+ ~parsingTimeElapsed=stats.parsingTimeElapsed->Belt.Option.getWithDefault(0),
367
+ ~pageFetchTime=stats.pageFetchTime->Belt.Option.getWithDefault(0),
368
+ ~chainId=chain->ChainMap.Chain.toChainId,
369
+ ~fromBlock=fromBlockQueried,
370
+ ~toBlock=latestFetchedBlockNumber,
371
+ ~numEvents=parsedQueueItems->Array.length,
372
+ ~numAddresses=query.addressesByContractName->FetchState.addressesByContractNameCount,
373
+ ~queryName=switch query {
374
+ | {target: Merge(_)} => `Merge Query`
375
+ | {selection: {dependsOnAddresses: false}} => `Wildcard Query`
376
+ | {selection: {dependsOnAddresses: true}} => `Normal Query`
377
+ },
378
+ )
379
+ }
380
+
381
+ let (updatedReorgDetection, reorgResult: ReorgDetection.reorgResult) =
382
+ chainFetcher.reorgDetection->ReorgDetection.registerReorgGuard(~reorgGuard, ~knownHeight)
383
+
384
+ let updatedChainFetcher = {
385
+ ...chainFetcher,
386
+ reorgDetection: updatedReorgDetection,
387
+ }
388
+
389
+ let nextState = {
390
+ ...state,
391
+ chainManager: {
392
+ ...state.chainManager,
393
+ chainFetchers: state.chainManager.chainFetchers->ChainMap.set(chain, updatedChainFetcher),
394
+ },
395
+ }
396
+
397
+ let rollbackWithReorgDetectedBlockNumber = switch reorgResult {
398
+ | ReorgDetected(reorgDetected) => {
399
+ chainFetcher.logger->Logging.childInfo(
400
+ reorgDetected->ReorgDetection.reorgDetectedToLogParams(
401
+ ~shouldRollbackOnReorg=state.indexer.config.shouldRollbackOnReorg,
402
+ ),
403
+ )
404
+ Prometheus.ReorgCount.increment(~chain)
405
+ Prometheus.ReorgDetectionBlockNumber.set(
406
+ ~blockNumber=reorgDetected.scannedBlock.blockNumber,
407
+ ~chain,
408
+ )
409
+ if state.indexer.config.shouldRollbackOnReorg {
410
+ Some(reorgDetected.scannedBlock.blockNumber)
411
+ } else {
412
+ None
413
+ }
414
+ }
415
+ | NoReorg => None
416
+ }
417
+
418
+ switch rollbackWithReorgDetectedBlockNumber {
419
+ | None => (nextState, [ProcessPartitionQueryResponse(partitionQueryResponse)])
420
+ | Some(reorgDetectedBlockNumber) => {
421
+ let chainManager = switch state.rollbackState {
422
+ | RollbackReady({eventsProcessedDiffByChain}) => {
423
+ ...state.chainManager,
424
+ chainFetchers: state.chainManager.chainFetchers->ChainMap.update(chain, chainFetcher => {
425
+ switch eventsProcessedDiffByChain->Utils.Dict.dangerouslyGetByIntNonOption(
426
+ chain->ChainMap.Chain.toChainId,
427
+ ) {
428
+ | Some(eventsProcessedDiff) => {
429
+ ...chainFetcher,
430
+ // Since we detected a reorg, until rollback wasn't completed in the db
431
+ // We return the events processed counter to the pre-rollback value,
432
+ // to decrease it once more for the new rollback.
433
+ numEventsProcessed: chainFetcher.numEventsProcessed + eventsProcessedDiff,
434
+ }
435
+ | None => chainFetcher
436
+ }
437
+ }),
438
+ }
439
+ | _ => state.chainManager
440
+ }
441
+ (
442
+ {
443
+ ...nextState->incrementId,
444
+ chainManager,
445
+ rollbackState: ReorgDetected({
446
+ chain,
447
+ blockNumber: reorgDetectedBlockNumber,
448
+ }),
449
+ },
450
+ [Rollback],
451
+ )
452
+ }
453
+ }
454
+ }
455
+
456
+ let submitPartitionQueryResponse = (
457
+ state,
458
+ ~newItems,
459
+ ~newItemsWithDcs,
460
+ ~knownHeight,
461
+ ~latestFetchedBlock,
462
+ ~query,
463
+ ~chain,
464
+ ) => {
465
+ let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(chain)
466
+
467
+ let updatedChainFetcher =
468
+ chainFetcher
469
+ ->ChainFetcher.handleQueryResult(
470
+ ~query,
471
+ ~latestFetchedBlock,
472
+ ~newItems,
473
+ ~newItemsWithDcs,
474
+ ~knownHeight,
475
+ )
476
+ ->Utils.unwrapResultExn
477
+
478
+ let updatedChainFetcher = {
479
+ ...updatedChainFetcher,
480
+ numBatchesFetched: updatedChainFetcher.numBatchesFetched + 1,
481
+ }
482
+
483
+ if !chainFetcher.isProgressAtHead && updatedChainFetcher.isProgressAtHead {
484
+ updatedChainFetcher.logger->Logging.childInfo("All events have been fetched")
485
+ }
486
+
487
+ let nextState = {
488
+ ...state,
489
+ chainManager: {
490
+ ...state.chainManager,
491
+ chainFetchers: state.chainManager.chainFetchers->ChainMap.set(chain, updatedChainFetcher),
492
+ },
493
+ }
494
+
495
+ (
496
+ nextState,
497
+ [UpdateChainMetaDataAndCheckForExit(NoExit), ProcessEventBatch, NextQuery(Chain(chain))],
498
+ )
499
+ }
500
+
501
+ let processPartitionQueryResponse = async (
502
+ state,
503
+ {chain, response, query}: partitionQueryResponse,
504
+ ~dispatchAction,
505
+ ) => {
506
+ let {
507
+ parsedQueueItems,
508
+ latestFetchedBlockNumber,
509
+ knownHeight,
510
+ latestFetchedBlockTimestamp,
511
+ } = response
512
+
513
+ let itemsWithContractRegister = []
514
+ let newItems = []
515
+
516
+ for idx in 0 to parsedQueueItems->Array.length - 1 {
517
+ let item = parsedQueueItems->Array.getUnsafe(idx)
518
+ let eventItem = item->Internal.castUnsafeEventItem
519
+ if eventItem.eventConfig.contractRegister !== None {
520
+ itemsWithContractRegister->Array.push(item)
521
+ }
522
+
523
+ // TODO: Don't really need to keep it in the queue
524
+ // when there's no handler (besides raw_events, processed counter, and dcsToStore consuming)
525
+ newItems->Array.push(item)
526
+ }
527
+
528
+ let newItemsWithDcs = switch itemsWithContractRegister {
529
+ | [] as empty => empty
530
+ | _ =>
531
+ await ChainFetcher.runContractRegistersOrThrow(
532
+ ~itemsWithContractRegister,
533
+ ~chain,
534
+ ~config=state.indexer.config,
535
+ )
536
+ }
537
+
538
+ dispatchAction(
539
+ SubmitPartitionQueryResponse({
540
+ newItems,
541
+ newItemsWithDcs,
542
+ knownHeight,
543
+ latestFetchedBlock: {
544
+ blockNumber: latestFetchedBlockNumber,
545
+ blockTimestamp: latestFetchedBlockTimestamp,
546
+ },
547
+ chain,
548
+ query,
549
+ }),
550
+ )
551
+ }
552
+
553
+ let updateChainFetcher = (chainFetcherUpdate, ~state, ~chain) => {
554
+ (
555
+ {
556
+ ...state,
557
+ chainManager: {
558
+ ...state.chainManager,
559
+ chainFetchers: state.chainManager.chainFetchers->ChainMap.update(chain, chainFetcherUpdate),
560
+ },
561
+ },
562
+ [],
563
+ )
564
+ }
565
+
566
+ let onEnterReorgThreshold = (~state: t) => {
567
+ Logging.info("Reorg threshold reached")
568
+ Prometheus.ReorgThreshold.set(~isInReorgThreshold=true)
569
+
570
+ let chainFetchers = state.chainManager.chainFetchers->ChainMap.map(chainFetcher => {
571
+ {
572
+ ...chainFetcher,
573
+ fetchState: chainFetcher.fetchState->FetchState.updateInternal(
574
+ ~blockLag=Env.indexingBlockLag->Option.getWithDefault(0),
575
+ ),
576
+ }
577
+ })
578
+
579
+ {
580
+ ...state,
581
+ chainManager: {
582
+ ...state.chainManager,
583
+ chainFetchers,
584
+ isInReorgThreshold: true,
585
+ },
586
+ }
587
+ }
588
+
589
+ let actionReducer = (state: t, action: action) => {
590
+ switch action {
591
+ | FinishWaitingForNewBlock({chain, knownHeight}) => {
592
+ let updatedChainFetchers = state.chainManager.chainFetchers->ChainMap.update(
593
+ chain,
594
+ chainFetcher => {
595
+ let updatedFetchState =
596
+ chainFetcher.fetchState->FetchState.updateKnownHeight(~knownHeight)
597
+ if updatedFetchState !== chainFetcher.fetchState {
598
+ {
599
+ ...chainFetcher,
600
+ fetchState: updatedFetchState,
601
+ }
602
+ } else {
603
+ chainFetcher
604
+ }
605
+ },
606
+ )
607
+
608
+ let isBelowReorgThreshold =
609
+ !state.chainManager.isInReorgThreshold && state.indexer.config.shouldRollbackOnReorg
610
+ let shouldEnterReorgThreshold =
611
+ isBelowReorgThreshold &&
612
+ updatedChainFetchers
613
+ ->ChainMap.values
614
+ ->Array.every(chainFetcher => {
615
+ chainFetcher.fetchState->FetchState.isReadyToEnterReorgThreshold
616
+ })
617
+
618
+ let state = {
619
+ ...state,
620
+ chainManager: {
621
+ ...state.chainManager,
622
+ chainFetchers: updatedChainFetchers,
623
+ },
624
+ }
625
+
626
+ // Attempt ProcessEventBatch in case if we have block handlers to run
627
+ if shouldEnterReorgThreshold {
628
+ (onEnterReorgThreshold(~state), [NextQuery(CheckAllChains), ProcessEventBatch])
629
+ } else {
630
+ (state, [NextQuery(Chain(chain)), ProcessEventBatch])
631
+ }
632
+ }
633
+ | ValidatePartitionQueryResponse(partitionQueryResponse) =>
634
+ state->validatePartitionQueryResponse(partitionQueryResponse)
635
+ | SubmitPartitionQueryResponse({
636
+ newItems,
637
+ newItemsWithDcs,
638
+ knownHeight,
639
+ latestFetchedBlock,
640
+ query,
641
+ chain,
642
+ }) =>
643
+ state->submitPartitionQueryResponse(
644
+ ~newItems,
645
+ ~newItemsWithDcs,
646
+ ~knownHeight,
647
+ ~latestFetchedBlock,
648
+ ~query,
649
+ ~chain,
650
+ )
651
+ | EventBatchProcessed({batch}) =>
652
+ let maybePruneEntityHistory =
653
+ state.indexer.config->Config.shouldPruneHistory(
654
+ ~isInReorgThreshold=state.chainManager.isInReorgThreshold,
655
+ )
656
+ ? [PruneStaleEntityHistory]
657
+ : []
658
+
659
+ let state = {
660
+ ...state,
661
+ // Can safely reset rollback state, since overwrite is not possible.
662
+ // If rollback is pending, the EventBatchProcessed will be handled by the invalid action reducer instead.
663
+ rollbackState: NoRollback,
664
+ chainManager: state.chainManager->updateProgressedChains(~batch, ~indexer=state.indexer),
665
+ currentlyProcessingBatch: false,
666
+ processedBatches: state.processedBatches + 1,
667
+ }
668
+
669
+ let shouldExit = EventProcessing.allChainsEventsProcessedToEndblock(
670
+ state.chainManager.chainFetchers,
671
+ )
672
+ ? {
673
+ Logging.info("All chains are caught up to end blocks.")
674
+
675
+ // Keep the indexer process running when in development mode (for Dev Console)
676
+ // or when TUI is enabled (for display)
677
+ if state.keepProcessAlive {
678
+ NoExit
679
+ } else {
680
+ ExitWithSuccess
681
+ }
682
+ }
683
+ : NoExit
684
+
685
+ (
686
+ state,
687
+ [UpdateChainMetaDataAndCheckForExit(shouldExit), ProcessEventBatch]->Array.concat(
688
+ maybePruneEntityHistory,
689
+ ),
690
+ )
691
+
692
+ | StartProcessingBatch => ({...state, currentlyProcessingBatch: true}, [])
693
+ | StartFindingReorgDepth => ({...state, rollbackState: FindingReorgDepth}, [])
694
+ | FindReorgDepth({chain, rollbackTargetBlockNumber}) => (
695
+ {
696
+ ...state,
697
+ rollbackState: FoundReorgDepth({
698
+ chain,
699
+ rollbackTargetBlockNumber,
700
+ }),
701
+ },
702
+ [Rollback],
703
+ )
704
+ | EnterReorgThreshold => (onEnterReorgThreshold(~state), [NextQuery(CheckAllChains)])
705
+ | UpdateQueues({progressedChainsById, shouldEnterReorgThreshold}) =>
706
+ let chainFetchers = state.chainManager.chainFetchers->ChainMap.mapWithKey((chain, cf) => {
707
+ let fs = switch progressedChainsById->Utils.Dict.dangerouslyGetByIntNonOption(
708
+ chain->ChainMap.Chain.toChainId,
709
+ ) {
710
+ | Some(chainAfterBatch) => chainAfterBatch.fetchState
711
+ | None => cf.fetchState
712
+ }
713
+ {
714
+ ...cf,
715
+ fetchState: shouldEnterReorgThreshold
716
+ ? fs->FetchState.updateInternal(~blockLag=Env.indexingBlockLag->Option.getWithDefault(0))
717
+ : fs,
718
+ }
719
+ })
720
+
721
+ let chainManager = {
722
+ ...state.chainManager,
723
+ chainFetchers,
724
+ }
725
+
726
+ (
727
+ {
728
+ ...state,
729
+ chainManager,
730
+ },
731
+ [NextQuery(CheckAllChains)],
732
+ )
733
+ | SetRollbackState({diffInMemoryStore, rollbackedChainManager, eventsProcessedDiffByChain}) => (
734
+ {
735
+ ...state,
736
+ rollbackState: RollbackReady({
737
+ diffInMemoryStore,
738
+ eventsProcessedDiffByChain,
739
+ }),
740
+ chainManager: rollbackedChainManager,
741
+ },
742
+ [NextQuery(CheckAllChains), ProcessEventBatch],
743
+ )
744
+ | SuccessExit => {
745
+ Logging.info("Exiting with success")
746
+ NodeJs.process->NodeJs.exitWithCode(Success)
747
+ (state, [])
748
+ }
749
+ | ErrorExit(errHandler) =>
750
+ errHandler->ErrorHandling.log
751
+ NodeJs.process->NodeJs.exitWithCode(Failure)
752
+ (state, [])
753
+ }
754
+ }
755
+
756
+ let invalidatedActionReducer = (state: t, action: action) =>
757
+ switch action {
758
+ | EventBatchProcessed({batch}) if state->isPreparingRollback =>
759
+ Logging.info("Finished processing batch before rollback, actioning rollback")
760
+ (
761
+ {
762
+ ...state,
763
+ chainManager: state.chainManager->updateProgressedChains(~batch, ~indexer=state.indexer),
764
+ currentlyProcessingBatch: false,
765
+ processedBatches: state.processedBatches + 1,
766
+ },
767
+ [Rollback],
768
+ )
769
+ | ErrorExit(_) => actionReducer(state, action)
770
+ | _ =>
771
+ Logging.trace({
772
+ "msg": "Invalidated action discarded",
773
+ "action": action->S.convertOrThrow(Utils.Schema.variantTag),
774
+ })
775
+ (state, [])
776
+ }
777
+
778
+ let checkAndFetchForChain = (
779
+ //Used for dependency injection for tests
780
+ ~waitForNewBlock,
781
+ ~executeQuery,
782
+ //required args
783
+ ~state,
784
+ ~dispatchAction,
785
+ ) => async chain => {
786
+ let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(chain)
787
+ if !isPreparingRollback(state) {
788
+ let {fetchState} = chainFetcher
789
+
790
+ await chainFetcher.sourceManager->SourceManager.fetchNext(
791
+ ~fetchState,
792
+ ~waitForNewBlock=(~knownHeight) => chainFetcher.sourceManager->waitForNewBlock(~knownHeight),
793
+ ~onNewBlock=(~knownHeight) => dispatchAction(FinishWaitingForNewBlock({chain, knownHeight})),
794
+ ~executeQuery=async query => {
795
+ try {
796
+ let response =
797
+ await chainFetcher.sourceManager->executeQuery(
798
+ ~query,
799
+ ~knownHeight=fetchState.knownHeight,
800
+ )
801
+ dispatchAction(ValidatePartitionQueryResponse({chain, response, query}))
802
+ } catch {
803
+ | exn => dispatchAction(ErrorExit(exn->ErrorHandling.make))
804
+ }
805
+ },
806
+ ~stateId=state.id,
807
+ )
808
+ }
809
+ }
810
+
811
+ let injectedTaskReducer = (
812
+ //Used for dependency injection for tests
813
+ ~waitForNewBlock,
814
+ ~executeQuery,
815
+ ~getLastKnownValidBlock,
816
+ ) => async (
817
+ //required args
818
+ state: t,
819
+ task: task,
820
+ ~dispatchAction,
821
+ ) => {
822
+ switch task {
823
+ | ProcessPartitionQueryResponse(partitionQueryResponse) =>
824
+ state->processPartitionQueryResponse(partitionQueryResponse, ~dispatchAction)->Promise.done
825
+ | PruneStaleEntityHistory =>
826
+ let runPrune = async () => {
827
+ switch state.chainManager->ChainManager.getSafeCheckpointId {
828
+ | None => ()
829
+ | Some(safeCheckpointId) =>
830
+ await state.indexer.persistence.storage.pruneStaleCheckpoints(~safeCheckpointId)
831
+
832
+ for idx in 0 to state.indexer.persistence.allEntities->Array.length - 1 {
833
+ if idx !== 0 {
834
+ // Add some delay between entities
835
+ // To unblock the pg client if it's needed for something else
836
+ await Utils.delay(1000)
837
+ }
838
+ let entityConfig = state.indexer.persistence.allEntities->Array.getUnsafe(idx)
839
+ let timeRef = Hrtime.makeTimer()
840
+ try {
841
+ let () = await state.indexer.persistence.storage.pruneStaleEntityHistory(
842
+ ~entityName=entityConfig.name,
843
+ ~entityIndex=entityConfig.index,
844
+ ~safeCheckpointId,
845
+ )
846
+ } catch {
847
+ | exn =>
848
+ exn->ErrorHandling.mkLogAndRaise(
849
+ ~msg=`Failed to prune stale entity history`,
850
+ ~logger=Logging.createChild(
851
+ ~params={
852
+ "entityName": entityConfig.name,
853
+ "safeCheckpointId": safeCheckpointId,
854
+ },
855
+ ),
856
+ )
857
+ }
858
+ Prometheus.RollbackHistoryPrune.increment(
859
+ ~timeMillis=Hrtime.timeSince(timeRef)->Hrtime.toMillis,
860
+ ~entityName=entityConfig.name,
861
+ )
862
+ }
863
+ }
864
+ }
865
+ state.writeThrottlers.pruneStaleEntityHistory->Throttler.schedule(runPrune)
866
+
867
+ | UpdateChainMetaDataAndCheckForExit(shouldExit) =>
868
+ let {chainManager, writeThrottlers} = state
869
+ switch shouldExit {
870
+ | ExitWithSuccess =>
871
+ updateChainMetadataTable(
872
+ chainManager,
873
+ ~throttler=writeThrottlers.chainMetaData,
874
+ ~persistence=state.indexer.persistence,
875
+ )
876
+ dispatchAction(SuccessExit)
877
+ | NoExit =>
878
+ updateChainMetadataTable(
879
+ chainManager,
880
+ ~throttler=writeThrottlers.chainMetaData,
881
+ ~persistence=state.indexer.persistence,
882
+ )->ignore
883
+ }
884
+ | NextQuery(chainCheck) =>
885
+ let fetchForChain = checkAndFetchForChain(
886
+ ~waitForNewBlock,
887
+ ~executeQuery,
888
+ ~state,
889
+ ~dispatchAction,
890
+ )
891
+
892
+ switch chainCheck {
893
+ | Chain(chain) => await chain->fetchForChain
894
+ | CheckAllChains =>
895
+ //Mapping from the states chainManager so we can construct tests that don't use
896
+ //all chains
897
+ let _ =
898
+ await state.chainManager.chainFetchers
899
+ ->ChainMap.keys
900
+ ->Array.map(fetchForChain(_))
901
+ ->Promise.all
902
+ }
903
+ | ProcessEventBatch =>
904
+ if !state.currentlyProcessingBatch && !isPreparingRollback(state) {
905
+ //In the case of a rollback, use the provided in memory store
906
+ //With rolled back values
907
+ let rollbackInMemStore = switch state.rollbackState {
908
+ | RollbackReady({diffInMemoryStore}) => Some(diffInMemoryStore)
909
+ | _ => None
910
+ }
911
+
912
+ let batch =
913
+ state.chainManager->ChainManager.createBatch(
914
+ ~batchSizeTarget=state.indexer.config.batchSize,
915
+ ~isRollback=rollbackInMemStore !== None,
916
+ )
917
+
918
+ let progressedChainsById = batch.progressedChainsById
919
+ let totalBatchSize = batch.totalBatchSize
920
+
921
+ let isInReorgThreshold = state.chainManager.isInReorgThreshold
922
+ let shouldSaveHistory = state.indexer.config->Config.shouldSaveHistory(~isInReorgThreshold)
923
+
924
+ let isBelowReorgThreshold =
925
+ !state.chainManager.isInReorgThreshold && state.indexer.config.shouldRollbackOnReorg
926
+ let shouldEnterReorgThreshold =
927
+ isBelowReorgThreshold &&
928
+ state.chainManager.chainFetchers
929
+ ->ChainMap.values
930
+ ->Array.every(chainFetcher => {
931
+ let fetchState = switch progressedChainsById->Utils.Dict.dangerouslyGetByIntNonOption(
932
+ chainFetcher.fetchState.chainId,
933
+ ) {
934
+ | Some(chainAfterBatch) => chainAfterBatch.fetchState
935
+ | None => chainFetcher.fetchState
936
+ }
937
+ fetchState->FetchState.isReadyToEnterReorgThreshold
938
+ })
939
+
940
+ if shouldEnterReorgThreshold {
941
+ dispatchAction(EnterReorgThreshold)
942
+ }
943
+
944
+ if progressedChainsById->Utils.Dict.isEmpty {
945
+ ()
946
+ } else {
947
+ if Env.Benchmark.shouldSaveData {
948
+ let group = "Other"
949
+ Benchmark.addSummaryData(
950
+ ~group,
951
+ ~label=`Batch Size`,
952
+ ~value=totalBatchSize->Belt.Int.toFloat,
953
+ )
954
+ }
955
+
956
+ dispatchAction(StartProcessingBatch)
957
+ dispatchAction(UpdateQueues({progressedChainsById, shouldEnterReorgThreshold}))
958
+
959
+ let inMemoryStore =
960
+ rollbackInMemStore->Option.getWithDefault(
961
+ InMemoryStore.make(~entities=state.indexer.persistence.allEntities),
962
+ )
963
+
964
+ inMemoryStore->InMemoryStore.setBatchDcs(~batch, ~shouldSaveHistory)
965
+
966
+ switch await EventProcessing.processEventBatch(
967
+ ~batch,
968
+ ~inMemoryStore,
969
+ ~isInReorgThreshold,
970
+ ~loadManager=state.loadManager,
971
+ ~indexer=state.indexer,
972
+ ~chainFetchers=state.chainManager.chainFetchers,
973
+ ) {
974
+ | exception exn =>
975
+ //All casese should be handled/caught before this with better user messaging.
976
+ //This is just a safety in case something unexpected happens
977
+ let errHandler =
978
+ exn->ErrorHandling.make(~msg="A top level unexpected error occurred during processing")
979
+ dispatchAction(ErrorExit(errHandler))
980
+ | res =>
981
+ switch res {
982
+ | Ok() => dispatchAction(EventBatchProcessed({batch: batch}))
983
+ | Error(errHandler) => dispatchAction(ErrorExit(errHandler))
984
+ }
985
+ }
986
+ }
987
+ }
988
+ | Rollback =>
989
+ //If it isn't processing a batch currently continue with rollback otherwise wait for current batch to finish processing
990
+ switch state {
991
+ | {rollbackState: NoRollback | RollbackReady(_)} =>
992
+ Js.Exn.raiseError("Internal error: Rollback initiated with invalid state")
993
+ | {rollbackState: ReorgDetected({chain, blockNumber: reorgBlockNumber})} => {
994
+ let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(chain)
995
+
996
+ dispatchAction(StartFindingReorgDepth)
997
+ let rollbackTargetBlockNumber =
998
+ await chainFetcher->getLastKnownValidBlock(~reorgBlockNumber)
999
+
1000
+ dispatchAction(FindReorgDepth({chain, rollbackTargetBlockNumber}))
1001
+ }
1002
+ // We can come to this case when event batch finished processing
1003
+ // while we are still finding the reorg depth
1004
+ // Do nothing here, just wait for reorg depth to be found
1005
+ | {rollbackState: FindingReorgDepth} => ()
1006
+ | {rollbackState: FoundReorgDepth(_), currentlyProcessingBatch: true} =>
1007
+ Logging.info("Waiting for batch to finish processing before executing rollback")
1008
+ | {rollbackState: FoundReorgDepth({chain: reorgChain, rollbackTargetBlockNumber})} =>
1009
+ let startTime = Hrtime.makeTimer()
1010
+
1011
+ let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(reorgChain)
1012
+
1013
+ let logger = Logging.createChildFrom(
1014
+ ~logger=chainFetcher.logger,
1015
+ ~params={
1016
+ "action": "Rollback",
1017
+ "reorgChain": reorgChain,
1018
+ "targetBlockNumber": rollbackTargetBlockNumber,
1019
+ },
1020
+ )
1021
+ logger->Logging.childInfo("Started rollback on reorg")
1022
+ Prometheus.RollbackTargetBlockNumber.set(
1023
+ ~blockNumber=rollbackTargetBlockNumber,
1024
+ ~chain=reorgChain,
1025
+ )
1026
+
1027
+ let reorgChainId = reorgChain->ChainMap.Chain.toChainId
1028
+
1029
+ let rollbackTargetCheckpointId = {
1030
+ switch await state.indexer.persistence.storage.getRollbackTargetCheckpoint(
1031
+ ~reorgChainId,
1032
+ ~lastKnownValidBlockNumber=rollbackTargetBlockNumber,
1033
+ ) {
1034
+ | [checkpoint] => checkpoint["id"]
1035
+ | _ => 0.
1036
+ }
1037
+ }
1038
+
1039
+ let eventsProcessedDiffByChain = Js.Dict.empty()
1040
+ let newProgressBlockNumberPerChain = Js.Dict.empty()
1041
+ let rollbackedProcessedEvents = ref(0)
1042
+
1043
+ {
1044
+ let rollbackProgressDiff = await state.indexer.persistence.storage.getRollbackProgressDiff(
1045
+ ~rollbackTargetCheckpointId,
1046
+ )
1047
+ for idx in 0 to rollbackProgressDiff->Js.Array2.length - 1 {
1048
+ let diff = rollbackProgressDiff->Js.Array2.unsafe_get(idx)
1049
+ eventsProcessedDiffByChain->Utils.Dict.setByInt(
1050
+ diff["chain_id"],
1051
+ switch diff["events_processed_diff"]->Int.fromString {
1052
+ | Some(eventsProcessedDiff) => {
1053
+ rollbackedProcessedEvents :=
1054
+ rollbackedProcessedEvents.contents + eventsProcessedDiff
1055
+ eventsProcessedDiff
1056
+ }
1057
+ | None =>
1058
+ Js.Exn.raiseError(
1059
+ `Unexpedted case: Invalid events processed diff ${diff["events_processed_diff"]}`,
1060
+ )
1061
+ },
1062
+ )
1063
+ newProgressBlockNumberPerChain->Utils.Dict.setByInt(
1064
+ diff["chain_id"],
1065
+ if rollbackTargetCheckpointId === 0. && diff["chain_id"] === reorgChainId {
1066
+ Pervasives.min(diff["new_progress_block_number"], rollbackTargetBlockNumber)
1067
+ } else {
1068
+ diff["new_progress_block_number"]
1069
+ },
1070
+ )
1071
+ }
1072
+ }
1073
+
1074
+ let chainFetchers = state.chainManager.chainFetchers->ChainMap.mapWithKey((chain, cf) => {
1075
+ switch newProgressBlockNumberPerChain->Utils.Dict.dangerouslyGetByIntNonOption(
1076
+ chain->ChainMap.Chain.toChainId,
1077
+ ) {
1078
+ | Some(newProgressBlockNumber) =>
1079
+ let fetchState =
1080
+ cf.fetchState->FetchState.rollback(~targetBlockNumber=newProgressBlockNumber)
1081
+ let newTotalEventsProcessed =
1082
+ cf.numEventsProcessed -
1083
+ eventsProcessedDiffByChain
1084
+ ->Utils.Dict.dangerouslyGetByIntNonOption(chain->ChainMap.Chain.toChainId)
1085
+ ->Option.getUnsafe
1086
+
1087
+ if cf.committedProgressBlockNumber !== newProgressBlockNumber {
1088
+ Prometheus.ProgressBlockNumber.set(
1089
+ ~blockNumber=newProgressBlockNumber,
1090
+ ~chainId=chain->ChainMap.Chain.toChainId,
1091
+ )
1092
+ }
1093
+ if cf.numEventsProcessed !== newTotalEventsProcessed {
1094
+ Prometheus.ProgressEventsCount.set(
1095
+ ~processedCount=newTotalEventsProcessed,
1096
+ ~chainId=chain->ChainMap.Chain.toChainId,
1097
+ )
1098
+ }
1099
+
1100
+ {
1101
+ ...cf,
1102
+ reorgDetection: chain == reorgChain
1103
+ ? cf.reorgDetection->ReorgDetection.rollbackToValidBlockNumber(
1104
+ ~blockNumber=rollbackTargetBlockNumber,
1105
+ )
1106
+ : cf.reorgDetection,
1107
+ safeCheckpointTracking: switch cf.safeCheckpointTracking {
1108
+ | Some(safeCheckpointTracking) =>
1109
+ Some(
1110
+ safeCheckpointTracking->SafeCheckpointTracking.rollback(
1111
+ ~targetBlockNumber=newProgressBlockNumber,
1112
+ ),
1113
+ )
1114
+ | None => None
1115
+ },
1116
+ fetchState,
1117
+ committedProgressBlockNumber: newProgressBlockNumber,
1118
+ numEventsProcessed: newTotalEventsProcessed,
1119
+ }
1120
+
1121
+ | None => //If no change was produced on the given chain after the reorged chain, no need to rollback anything
1122
+ cf
1123
+ }
1124
+ })
1125
+
1126
+ // Construct in Memory store with rollback diff
1127
+ let diff =
1128
+ await state.indexer.persistence->Persistence.prepareRollbackDiff(
1129
+ ~rollbackTargetCheckpointId,
1130
+ ~rollbackDiffCheckpointId=state.chainManager.committedCheckpointId +. 1.,
1131
+ )
1132
+
1133
+ let chainManager = {
1134
+ ...state.chainManager,
1135
+ chainFetchers,
1136
+ }
1137
+
1138
+ logger->Logging.childTrace({
1139
+ "msg": "Finished rollback on reorg",
1140
+ "entityChanges": {
1141
+ "deleted": diff["deletedEntities"],
1142
+ "upserted": diff["setEntities"],
1143
+ },
1144
+ "rollbackedEvents": rollbackedProcessedEvents.contents,
1145
+ "beforeCheckpointId": state.chainManager.committedCheckpointId,
1146
+ "targetCheckpointId": rollbackTargetCheckpointId,
1147
+ })
1148
+ Prometheus.RollbackSuccess.increment(
1149
+ ~timeMillis=Hrtime.timeSince(startTime)->Hrtime.toMillis,
1150
+ ~rollbackedProcessedEvents=rollbackedProcessedEvents.contents,
1151
+ )
1152
+
1153
+ dispatchAction(
1154
+ SetRollbackState({
1155
+ diffInMemoryStore: diff["inMemStore"],
1156
+ rollbackedChainManager: chainManager,
1157
+ eventsProcessedDiffByChain,
1158
+ }),
1159
+ )
1160
+ }
1161
+ }
1162
+ }
1163
+
1164
+ let taskReducer = injectedTaskReducer(
1165
+ ~waitForNewBlock=SourceManager.waitForNewBlock,
1166
+ ~executeQuery=SourceManager.executeQuery,
1167
+ ~getLastKnownValidBlock=(chainFetcher, ~reorgBlockNumber) =>
1168
+ chainFetcher->ChainFetcher.getLastKnownValidBlock(~reorgBlockNumber),
1169
+ )