envio 3.0.0-alpha.2 → 3.0.0-alpha.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/README.md +2 -2
  2. package/evm.schema.json +44 -34
  3. package/fuel.schema.json +32 -21
  4. package/index.d.ts +4 -1
  5. package/index.js +1 -0
  6. package/package.json +7 -6
  7. package/src/Batch.res.mjs +1 -1
  8. package/src/Benchmark.res +394 -0
  9. package/src/Benchmark.res.mjs +398 -0
  10. package/src/ChainFetcher.res +459 -0
  11. package/src/ChainFetcher.res.mjs +281 -0
  12. package/src/ChainManager.res +179 -0
  13. package/src/ChainManager.res.mjs +139 -0
  14. package/src/Config.res +15 -1
  15. package/src/Config.res.mjs +28 -5
  16. package/src/Ecosystem.res +9 -124
  17. package/src/Ecosystem.res.mjs +19 -160
  18. package/src/Env.res +0 -1
  19. package/src/Env.res.mjs +0 -3
  20. package/src/Envio.gen.ts +9 -1
  21. package/src/Envio.res +12 -9
  22. package/src/EventProcessing.res +476 -0
  23. package/src/EventProcessing.res.mjs +341 -0
  24. package/src/FetchState.res +54 -29
  25. package/src/FetchState.res.mjs +62 -35
  26. package/src/GlobalState.res +1169 -0
  27. package/src/GlobalState.res.mjs +1196 -0
  28. package/src/Internal.res +43 -1
  29. package/src/LoadLayer.res +444 -0
  30. package/src/LoadLayer.res.mjs +296 -0
  31. package/src/LoadLayer.resi +32 -0
  32. package/src/Prometheus.res +8 -8
  33. package/src/Prometheus.res.mjs +10 -10
  34. package/src/ReorgDetection.res +6 -10
  35. package/src/ReorgDetection.res.mjs +6 -6
  36. package/src/Types.ts +1 -1
  37. package/src/UserContext.res +356 -0
  38. package/src/UserContext.res.mjs +238 -0
  39. package/src/Utils.res +15 -0
  40. package/src/Utils.res.mjs +18 -0
  41. package/src/bindings/ClickHouse.res +31 -1
  42. package/src/bindings/ClickHouse.res.mjs +27 -1
  43. package/src/bindings/DateFns.res +71 -0
  44. package/src/bindings/DateFns.res.mjs +22 -0
  45. package/src/bindings/Ethers.res +27 -63
  46. package/src/bindings/Ethers.res.mjs +18 -65
  47. package/src/sources/Evm.res +87 -0
  48. package/src/sources/Evm.res.mjs +105 -0
  49. package/src/sources/EvmChain.res +95 -0
  50. package/src/sources/EvmChain.res.mjs +61 -0
  51. package/src/sources/Fuel.res +19 -34
  52. package/src/sources/Fuel.res.mjs +34 -16
  53. package/src/sources/FuelSDK.res +37 -0
  54. package/src/sources/FuelSDK.res.mjs +29 -0
  55. package/src/sources/HyperFuel.res +2 -2
  56. package/src/sources/HyperFuel.resi +1 -1
  57. package/src/sources/HyperFuelClient.res +2 -2
  58. package/src/sources/HyperFuelSource.res +8 -8
  59. package/src/sources/HyperFuelSource.res.mjs +5 -5
  60. package/src/sources/HyperSyncHeightStream.res +28 -110
  61. package/src/sources/HyperSyncHeightStream.res.mjs +30 -63
  62. package/src/sources/HyperSyncSource.res +16 -18
  63. package/src/sources/HyperSyncSource.res.mjs +25 -25
  64. package/src/sources/Rpc.res +43 -0
  65. package/src/sources/Rpc.res.mjs +31 -0
  66. package/src/sources/RpcSource.res +13 -8
  67. package/src/sources/RpcSource.res.mjs +12 -7
  68. package/src/sources/Source.res +3 -2
  69. package/src/sources/SourceManager.res +183 -108
  70. package/src/sources/SourceManager.res.mjs +162 -99
  71. package/src/sources/SourceManager.resi +4 -5
  72. package/src/sources/Svm.res +59 -0
  73. package/src/sources/Svm.res.mjs +79 -0
  74. package/src/bindings/Ethers.gen.ts +0 -14
@@ -0,0 +1,341 @@
1
+ // Generated by ReScript, PLEASE EDIT WITH CARE
2
+
3
+ import * as Env from "./Env.res.mjs";
4
+ import * as Utils from "./Utils.res.mjs";
5
+ import * as Config from "./Config.res.mjs";
6
+ import * as Hrtime from "./bindings/Hrtime.res.mjs";
7
+ import * as Logging from "./Logging.res.mjs";
8
+ import * as $$Promise from "./bindings/Promise.res.mjs";
9
+ import * as ChainMap from "./ChainMap.res.mjs";
10
+ import * as Benchmark from "./Benchmark.res.mjs";
11
+ import * as Ecosystem from "./Ecosystem.res.mjs";
12
+ import * as Belt_Array from "rescript/lib/es6/belt_Array.js";
13
+ import * as EventUtils from "./EventUtils.res.mjs";
14
+ import * as Prometheus from "./Prometheus.res.mjs";
15
+ import * as Persistence from "./Persistence.res.mjs";
16
+ import * as UserContext from "./UserContext.res.mjs";
17
+ import * as ChainFetcher from "./ChainFetcher.res.mjs";
18
+ import * as ErrorHandling from "./ErrorHandling.res.mjs";
19
+ import * as InMemoryTable from "./InMemoryTable.res.mjs";
20
+ import * as Caml_exceptions from "rescript/lib/es6/caml_exceptions.js";
21
+ import * as S$RescriptSchema from "rescript-schema/src/S.res.mjs";
22
+ import * as Caml_js_exceptions from "rescript/lib/es6/caml_js_exceptions.js";
23
+
24
+ function allChainsEventsProcessedToEndblock(chainFetchers) {
25
+ return Belt_Array.every(ChainMap.values(chainFetchers), (function (cf) {
26
+ return ChainFetcher.hasProcessedToEndblock(cf);
27
+ }));
28
+ }
29
+
30
+ function computeChainsState(chainFetchers) {
31
+ var chains = {};
32
+ Belt_Array.forEach(ChainMap.entries(chainFetchers), (function (param) {
33
+ var chain = param[0];
34
+ var chainId = String(chain);
35
+ var isLive = param[1].timestampCaughtUpToHeadOrEndblock !== undefined;
36
+ chains[chainId] = {
37
+ id: chain,
38
+ isLive: isLive
39
+ };
40
+ }));
41
+ return chains;
42
+ }
43
+
44
+ function convertFieldsToJson(fields) {
45
+ if (fields === undefined) {
46
+ return {};
47
+ }
48
+ var keys = Object.keys(fields);
49
+ var $$new = {};
50
+ for(var i = 0 ,i_finish = keys.length; i < i_finish; ++i){
51
+ var key = keys[i];
52
+ var value = fields[key];
53
+ $$new[key] = typeof value === "bigint" ? value.toString() : value;
54
+ }
55
+ return $$new;
56
+ }
57
+
58
+ function addItemToRawEvents(eventItem, inMemoryStore, config) {
59
+ var $$event = eventItem.event;
60
+ var block = $$event.block;
61
+ var logIndex = $$event.logIndex;
62
+ var blockNumber = eventItem.blockNumber;
63
+ var chain = eventItem.chain;
64
+ var eventConfig = eventItem.eventConfig;
65
+ var eventId = EventUtils.packEventIndex(blockNumber, logIndex);
66
+ var blockFields = convertFieldsToJson(block);
67
+ var transactionFields = convertFieldsToJson($$event.transaction);
68
+ config.ecosystem.cleanUpRawEventFieldsInPlace(blockFields);
69
+ var params = S$RescriptSchema.reverseConvertOrThrow($$event.params, eventConfig.paramsRawEventSchema);
70
+ var params$1 = params === null ? "null" : params;
71
+ var rawEvent_event_name = eventConfig.name;
72
+ var rawEvent_contract_name = eventConfig.contractName;
73
+ var rawEvent_src_address = $$event.srcAddress;
74
+ var rawEvent_block_hash = config.ecosystem.getId(block);
75
+ var rawEvent_block_timestamp = eventItem.timestamp;
76
+ var rawEvent = {
77
+ chain_id: chain,
78
+ event_id: eventId,
79
+ event_name: rawEvent_event_name,
80
+ contract_name: rawEvent_contract_name,
81
+ block_number: blockNumber,
82
+ log_index: logIndex,
83
+ src_address: rawEvent_src_address,
84
+ block_hash: rawEvent_block_hash,
85
+ block_timestamp: rawEvent_block_timestamp,
86
+ block_fields: blockFields,
87
+ transaction_fields: transactionFields,
88
+ params: params$1
89
+ };
90
+ var eventIdStr = eventId.toString();
91
+ InMemoryTable.set(inMemoryStore.rawEvents, {
92
+ chainId: chain,
93
+ eventId: eventIdStr
94
+ }, rawEvent);
95
+ }
96
+
97
+ var ProcessingError = /* @__PURE__ */Caml_exceptions.create("EventProcessing.ProcessingError");
98
+
99
+ async function runEventHandlerOrThrow(item, checkpointId, handler, inMemoryStore, loadManager, persistence, shouldSaveHistory, shouldBenchmark, chains, config) {
100
+ var timeBeforeHandler = Hrtime.makeTimer();
101
+ try {
102
+ var contextParams = {
103
+ item: item,
104
+ checkpointId: checkpointId,
105
+ inMemoryStore: inMemoryStore,
106
+ loadManager: loadManager,
107
+ persistence: persistence,
108
+ isPreload: false,
109
+ shouldSaveHistory: shouldSaveHistory,
110
+ chains: chains,
111
+ config: config,
112
+ isResolved: false
113
+ };
114
+ await handler({
115
+ event: item.event,
116
+ context: UserContext.getHandlerContext(contextParams)
117
+ });
118
+ contextParams.isResolved = true;
119
+ }
120
+ catch (raw_exn){
121
+ var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
122
+ throw {
123
+ RE_EXN_ID: ProcessingError,
124
+ message: "Unexpected error in the event handler. Please handle the error to keep the indexer running smoothly.",
125
+ exn: exn,
126
+ item: item,
127
+ Error: new Error()
128
+ };
129
+ }
130
+ if (!shouldBenchmark) {
131
+ return ;
132
+ }
133
+ var timeEnd = Hrtime.floatFromMillis(Hrtime.toMillis(Hrtime.timeSince(timeBeforeHandler)));
134
+ return Benchmark.addSummaryData("Handlers Per Event", item.eventConfig.contractName + " " + item.eventConfig.name + " Handler (ms)", timeEnd, 4);
135
+ }
136
+
137
+ async function runHandlerOrThrow(item, checkpointId, inMemoryStore, loadManager, indexer, shouldSaveHistory, shouldBenchmark, chains) {
138
+ if (item.kind === 0) {
139
+ var handler = item.eventConfig.handler;
140
+ if (handler !== undefined) {
141
+ await runEventHandlerOrThrow(item, checkpointId, handler, inMemoryStore, loadManager, indexer.persistence, shouldSaveHistory, shouldBenchmark, chains, indexer.config);
142
+ }
143
+ if (indexer.config.enableRawEvents) {
144
+ return addItemToRawEvents(item, inMemoryStore, indexer.config);
145
+ } else {
146
+ return ;
147
+ }
148
+ }
149
+ try {
150
+ var contextParams = {
151
+ item: item,
152
+ checkpointId: checkpointId,
153
+ inMemoryStore: inMemoryStore,
154
+ loadManager: loadManager,
155
+ persistence: indexer.persistence,
156
+ isPreload: false,
157
+ shouldSaveHistory: shouldSaveHistory,
158
+ chains: chains,
159
+ config: indexer.config,
160
+ isResolved: false
161
+ };
162
+ await item.onBlockConfig.handler(Ecosystem.makeOnBlockArgs(item.blockNumber, indexer.config.ecosystem, UserContext.getHandlerContext(contextParams)));
163
+ contextParams.isResolved = true;
164
+ return ;
165
+ }
166
+ catch (raw_exn){
167
+ var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
168
+ throw {
169
+ RE_EXN_ID: ProcessingError,
170
+ message: "Unexpected error in the block handler. Please handle the error to keep the indexer running smoothly.",
171
+ exn: exn,
172
+ item: item,
173
+ Error: new Error()
174
+ };
175
+ }
176
+ }
177
+
178
+ async function preloadBatchOrThrow(batch, loadManager, persistence, config, inMemoryStore, chains) {
179
+ var promises = [];
180
+ var itemIdx = 0;
181
+ for(var checkpointIdx = 0 ,checkpointIdx_finish = batch.checkpointIds.length; checkpointIdx < checkpointIdx_finish; ++checkpointIdx){
182
+ var checkpointId = batch.checkpointIds[checkpointIdx];
183
+ var checkpointEventsProcessed = batch.checkpointEventsProcessed[checkpointIdx];
184
+ for(var idx = 0; idx < checkpointEventsProcessed; ++idx){
185
+ var item = batch.items[itemIdx + idx | 0];
186
+ if (item.kind === 0) {
187
+ var handler = item.eventConfig.handler;
188
+ if (handler !== undefined) {
189
+ try {
190
+ promises.push($$Promise.silentCatch(handler({
191
+ event: item.event,
192
+ context: UserContext.getHandlerContext({
193
+ item: item,
194
+ checkpointId: checkpointId,
195
+ inMemoryStore: inMemoryStore,
196
+ loadManager: loadManager,
197
+ persistence: persistence,
198
+ isPreload: true,
199
+ shouldSaveHistory: false,
200
+ chains: chains,
201
+ config: config,
202
+ isResolved: false
203
+ })
204
+ })));
205
+ }
206
+ catch (exn){
207
+
208
+ }
209
+ }
210
+
211
+ } else {
212
+ try {
213
+ promises.push($$Promise.silentCatch(item.onBlockConfig.handler(Ecosystem.makeOnBlockArgs(item.blockNumber, config.ecosystem, UserContext.getHandlerContext({
214
+ item: item,
215
+ checkpointId: checkpointId,
216
+ inMemoryStore: inMemoryStore,
217
+ loadManager: loadManager,
218
+ persistence: persistence,
219
+ isPreload: true,
220
+ shouldSaveHistory: false,
221
+ chains: chains,
222
+ config: config,
223
+ isResolved: false
224
+ })))));
225
+ }
226
+ catch (exn$1){
227
+
228
+ }
229
+ }
230
+ }
231
+ itemIdx = itemIdx + checkpointEventsProcessed | 0;
232
+ }
233
+ await Promise.all(promises);
234
+ }
235
+
236
+ async function runBatchHandlersOrThrow(batch, inMemoryStore, loadManager, indexer, shouldSaveHistory, shouldBenchmark, chains) {
237
+ var itemIdx = 0;
238
+ for(var checkpointIdx = 0 ,checkpointIdx_finish = batch.checkpointIds.length; checkpointIdx < checkpointIdx_finish; ++checkpointIdx){
239
+ var checkpointId = batch.checkpointIds[checkpointIdx];
240
+ var checkpointEventsProcessed = batch.checkpointEventsProcessed[checkpointIdx];
241
+ for(var idx = 0; idx < checkpointEventsProcessed; ++idx){
242
+ var item = batch.items[itemIdx + idx | 0];
243
+ await runHandlerOrThrow(item, checkpointId, inMemoryStore, loadManager, indexer, shouldSaveHistory, shouldBenchmark, chains);
244
+ }
245
+ itemIdx = itemIdx + checkpointEventsProcessed | 0;
246
+ }
247
+ }
248
+
249
+ function registerProcessEventBatchMetrics(logger, loadDuration, handlerDuration, dbWriteDuration) {
250
+ Logging.childTrace(logger, {
251
+ msg: "Finished processing batch",
252
+ loader_time_elapsed: loadDuration,
253
+ handlers_time_elapsed: handlerDuration,
254
+ write_time_elapsed: dbWriteDuration
255
+ });
256
+ Prometheus.incrementLoadEntityDurationCounter(loadDuration);
257
+ Prometheus.incrementEventRouterDurationCounter(handlerDuration);
258
+ Prometheus.incrementExecuteBatchDurationCounter(dbWriteDuration);
259
+ Prometheus.incrementStorageWriteTimeCounter(dbWriteDuration);
260
+ Prometheus.incrementStorageWriteCounter();
261
+ }
262
+
263
+ async function processEventBatch(batch, inMemoryStore, isInReorgThreshold, loadManager, indexer, chainFetchers) {
264
+ var totalBatchSize = batch.totalBatchSize;
265
+ var chains = computeChainsState(chainFetchers);
266
+ var logger = Logging.getLogger();
267
+ Logging.childTrace(logger, {
268
+ msg: "Started processing batch",
269
+ totalBatchSize: totalBatchSize,
270
+ chains: Utils.Dict.mapValues(batch.progressedChainsById, (function (chainAfterBatch) {
271
+ return {
272
+ batchSize: chainAfterBatch.batchSize,
273
+ progress: chainAfterBatch.progressBlockNumber
274
+ };
275
+ }))
276
+ });
277
+ try {
278
+ var timeRef = Hrtime.makeTimer();
279
+ if (Utils.$$Array.notEmpty(batch.items)) {
280
+ await preloadBatchOrThrow(batch, loadManager, indexer.persistence, indexer.config, inMemoryStore, chains);
281
+ }
282
+ var elapsedTimeAfterLoaders = Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(timeRef)));
283
+ if (Utils.$$Array.notEmpty(batch.items)) {
284
+ await runBatchHandlersOrThrow(batch, inMemoryStore, loadManager, indexer, Config.shouldSaveHistory(indexer.config, isInReorgThreshold), Env.Benchmark.shouldSaveData, chains);
285
+ }
286
+ var elapsedTimeAfterProcessing = Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(timeRef)));
287
+ try {
288
+ await Persistence.writeBatch(indexer.persistence, batch, indexer.config, inMemoryStore, isInReorgThreshold);
289
+ var elapsedTimeAfterDbWrite = Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(timeRef)));
290
+ var handlerDuration = elapsedTimeAfterProcessing - elapsedTimeAfterLoaders | 0;
291
+ var dbWriteDuration = elapsedTimeAfterDbWrite - elapsedTimeAfterProcessing | 0;
292
+ registerProcessEventBatchMetrics(logger, elapsedTimeAfterLoaders, handlerDuration, dbWriteDuration);
293
+ if (Env.Benchmark.shouldSaveData) {
294
+ Benchmark.addEventProcessing(totalBatchSize, elapsedTimeAfterLoaders, handlerDuration, dbWriteDuration, elapsedTimeAfterDbWrite);
295
+ }
296
+ return {
297
+ TAG: "Ok",
298
+ _0: undefined
299
+ };
300
+ }
301
+ catch (raw_exn){
302
+ var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
303
+ if (exn.RE_EXN_ID === Persistence.StorageError) {
304
+ return {
305
+ TAG: "Error",
306
+ _0: ErrorHandling.make(exn.reason, logger, exn.message)
307
+ };
308
+ } else {
309
+ return {
310
+ TAG: "Error",
311
+ _0: ErrorHandling.make(exn, logger, "Failed writing batch to database")
312
+ };
313
+ }
314
+ }
315
+ }
316
+ catch (raw_exn$1){
317
+ var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn$1);
318
+ if (exn$1.RE_EXN_ID === ProcessingError) {
319
+ return {
320
+ TAG: "Error",
321
+ _0: ErrorHandling.make(exn$1.exn, Logging.getItemLogger(exn$1.item), exn$1.message)
322
+ };
323
+ }
324
+ throw exn$1;
325
+ }
326
+ }
327
+
328
+ export {
329
+ allChainsEventsProcessedToEndblock ,
330
+ computeChainsState ,
331
+ convertFieldsToJson ,
332
+ addItemToRawEvents ,
333
+ ProcessingError ,
334
+ runEventHandlerOrThrow ,
335
+ runHandlerOrThrow ,
336
+ preloadBatchOrThrow ,
337
+ runBatchHandlersOrThrow ,
338
+ registerProcessEventBatchMetrics ,
339
+ processEventBatch ,
340
+ }
341
+ /* Env Not a pure module */
@@ -59,6 +59,7 @@ type t = {
59
59
  // ready for processing
60
60
  targetBufferSize: int,
61
61
  onBlockConfigs: array<Internal.onBlockConfig>,
62
+ knownHeight: int,
62
63
  }
63
64
 
64
65
  let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) => {
@@ -184,16 +185,27 @@ let updateInternal = (
184
185
  ~indexingContracts=fetchState.indexingContracts,
185
186
  ~mutItems=?,
186
187
  ~blockLag=fetchState.blockLag,
188
+ ~knownHeight=fetchState.knownHeight,
187
189
  ): t => {
188
- let firstPartition = partitions->Js.Array2.unsafe_get(0)
189
- let latestFullyFetchedBlock = ref(firstPartition.latestFetchedBlock)
190
- for idx in 0 to partitions->Array.length - 1 {
191
- let p = partitions->Js.Array2.unsafe_get(idx)
192
- if latestFullyFetchedBlock.contents.blockNumber > p.latestFetchedBlock.blockNumber {
193
- latestFullyFetchedBlock := p.latestFetchedBlock
190
+ let latestFullyFetchedBlock = if partitions->Utils.Array.notEmpty {
191
+ let firstPartition = partitions->Js.Array2.unsafe_get(0)
192
+ let latestFullyFetchedBlock = ref(firstPartition.latestFetchedBlock)
193
+ for idx in 0 to partitions->Array.length - 1 {
194
+ let p = partitions->Js.Array2.unsafe_get(idx)
195
+ if latestFullyFetchedBlock.contents.blockNumber > p.latestFetchedBlock.blockNumber {
196
+ latestFullyFetchedBlock := p.latestFetchedBlock
197
+ }
198
+ }
199
+ latestFullyFetchedBlock.contents
200
+ } else {
201
+ {
202
+ blockNumber: knownHeight,
203
+ // The case is only possible when using only block handlers
204
+ // so it's fine to have a zero timestamp
205
+ // since we don't support ordered multichain mode anyways
206
+ blockTimestamp: 0,
194
207
  }
195
208
  }
196
- let latestFullyFetchedBlock = latestFullyFetchedBlock.contents
197
209
 
198
210
  let mutItemsRef = ref(mutItems)
199
211
 
@@ -282,6 +294,7 @@ let updateInternal = (
282
294
  latestFullyFetchedBlock,
283
295
  indexingContracts,
284
296
  blockLag,
297
+ knownHeight,
285
298
  buffer: switch mutItemsRef.contents {
286
299
  // Theoretically it could be faster to asume that
287
300
  // the items are sorted, but there are cases
@@ -751,13 +764,14 @@ let getNextQuery = (
751
764
  endBlock,
752
765
  indexingContracts,
753
766
  blockLag,
767
+ latestOnBlockBlockNumber,
768
+ knownHeight,
754
769
  }: t,
755
770
  ~concurrencyLimit,
756
- ~currentBlockHeight,
757
771
  ~stateId,
758
772
  ) => {
759
- let headBlock = currentBlockHeight - blockLag
760
- if headBlock <= 0 {
773
+ let headBlockNumber = knownHeight - blockLag
774
+ if headBlockNumber <= 0 {
761
775
  WaitingForNewBlock
762
776
  } else if concurrencyLimit === 0 {
763
777
  ReachedMaxConcurrency
@@ -767,11 +781,14 @@ let getNextQuery = (
767
781
  let areMergingPartitionsFetching = ref(false)
768
782
  let mostBehindMergingPartition = ref(None)
769
783
  let mergingPartitionTarget = ref(None)
784
+
785
+ let isOnBlockBehindTheHead = latestOnBlockBlockNumber < headBlockNumber
770
786
  let shouldWaitForNewBlock = ref(
771
787
  switch endBlock {
772
- | Some(endBlock) => headBlock < endBlock
788
+ | Some(endBlock) => headBlockNumber < endBlock
773
789
  | None => true
774
- },
790
+ } &&
791
+ !isOnBlockBehindTheHead,
775
792
  )
776
793
 
777
794
  let checkIsFetchingPartition = p => {
@@ -785,12 +802,12 @@ let getNextQuery = (
785
802
  let p = partitions->Js.Array2.unsafe_get(idx)
786
803
 
787
804
  let isFetching = checkIsFetchingPartition(p)
788
- let hasReachedTheHead = p.latestFetchedBlock.blockNumber >= headBlock
805
+ let isBehindTheHead = p.latestFetchedBlock.blockNumber < headBlockNumber
789
806
 
790
- if isFetching || !hasReachedTheHead {
807
+ if isFetching || isBehindTheHead {
791
808
  // Even if there are some partitions waiting for the new block
792
809
  // We still want to wait for all partitions reaching the head
793
- // because they might update currentBlockHeight in their response
810
+ // because they might update knownHeight in their response
794
811
  // Also, there are cases when some partitions fetching at 50% of the chain
795
812
  // and we don't want to poll the head for a few small partitions
796
813
  shouldWaitForNewBlock := false
@@ -845,8 +862,8 @@ let getNextQuery = (
845
862
  switch buffer->Array.get(targetBufferSize - 1) {
846
863
  | Some(item) =>
847
864
  // Just in case check that we don't query beyond the current block
848
- Pervasives.min(item->Internal.getItemBlockNumber, currentBlockHeight)
849
- | None => currentBlockHeight
865
+ Pervasives.min(item->Internal.getItemBlockNumber, knownHeight)
866
+ | None => knownHeight
850
867
  }
851
868
  }
852
869
  let queries = []
@@ -859,17 +876,17 @@ let getNextQuery = (
859
876
  | 0 => endBlock
860
877
  | _ =>
861
878
  switch endBlock {
862
- | Some(endBlock) => Some(Pervasives.min(headBlock, endBlock))
879
+ | Some(endBlock) => Some(Pervasives.min(headBlockNumber, endBlock))
863
880
  // Force head block as an endBlock when blockLag is set
864
881
  // because otherwise HyperSync might return bigger range
865
- | None => Some(headBlock)
882
+ | None => Some(headBlockNumber)
866
883
  }
867
884
  }
868
885
  // Enforce the respose range up until target block
869
886
  // Otherwise for indexers with 100+ partitions
870
887
  // we might blow up the buffer size to more than 600k events
871
888
  // simply because of HyperSync returning extra blocks
872
- let endBlock = switch (endBlock, maxQueryBlockNumber < currentBlockHeight) {
889
+ let endBlock = switch (endBlock, maxQueryBlockNumber < knownHeight) {
873
890
  | (Some(endBlock), true) => Some(Pervasives.min(maxQueryBlockNumber, endBlock))
874
891
  | (None, true) => Some(maxQueryBlockNumber)
875
892
  | (_, false) => endBlock
@@ -968,6 +985,7 @@ let make = (
968
985
  ~maxAddrInPartition,
969
986
  ~chainId,
970
987
  ~targetBufferSize,
988
+ ~knownHeight,
971
989
  ~progressBlockNumber=startBlock - 1,
972
990
  ~onBlockConfigs=[],
973
991
  ~blockLag=0,
@@ -1065,9 +1083,9 @@ let make = (
1065
1083
  }
1066
1084
  }
1067
1085
 
1068
- if partitions->Array.length === 0 {
1086
+ if partitions->Utils.Array.isEmpty && onBlockConfigs->Utils.Array.isEmpty {
1069
1087
  Js.Exn.raiseError(
1070
- "Invalid configuration: Nothing to fetch. Make sure that you provided at least one contract address to index, or have events with Wildcard mode enabled.",
1088
+ "Invalid configuration: Nothing to fetch. Make sure that you provided at least one contract address to index, or have events with Wildcard mode enabled, or have onBlock handlers.",
1071
1089
  )
1072
1090
  }
1073
1091
 
@@ -1096,6 +1114,7 @@ let make = (
1096
1114
  blockLag,
1097
1115
  onBlockConfigs,
1098
1116
  targetBufferSize,
1117
+ knownHeight,
1099
1118
  buffer: [],
1100
1119
  }
1101
1120
  }
@@ -1173,7 +1192,7 @@ let rollback = (fetchState: t, ~targetBlockNumber) => {
1173
1192
 
1174
1193
  {
1175
1194
  ...fetchState,
1176
- latestOnBlockBlockNumber: targetBlockNumber, // TODO: This is not tested. I assume there might be a possible issue of it skipping some blocks
1195
+ latestOnBlockBlockNumber: targetBlockNumber, // FIXME: This is not tested. I assume there might be a possible issue of it skipping some blocks
1177
1196
  }->updateInternal(
1178
1197
  ~partitions,
1179
1198
  ~indexingContracts,
@@ -1204,15 +1223,12 @@ let isActivelyIndexing = ({endBlock} as fetchState: t) => {
1204
1223
  }
1205
1224
  }
1206
1225
 
1207
- let isReadyToEnterReorgThreshold = (
1208
- {endBlock, blockLag, buffer} as fetchState: t,
1209
- ~currentBlockHeight,
1210
- ) => {
1226
+ let isReadyToEnterReorgThreshold = ({endBlock, blockLag, buffer, knownHeight} as fetchState: t) => {
1211
1227
  let bufferBlockNumber = fetchState->bufferBlockNumber
1212
- currentBlockHeight !== 0 &&
1228
+ knownHeight !== 0 &&
1213
1229
  switch endBlock {
1214
1230
  | Some(endBlock) if bufferBlockNumber >= endBlock => true
1215
- | _ => bufferBlockNumber >= currentBlockHeight - blockLag
1231
+ | _ => bufferBlockNumber >= knownHeight - blockLag
1216
1232
  } &&
1217
1233
  buffer->Utils.Array.isEmpty
1218
1234
  }
@@ -1262,3 +1278,12 @@ let getUnorderedMultichainProgressBlockNumberAt = ({buffer} as fetchState: t, ~i
1262
1278
  | _ => bufferBlockNumber
1263
1279
  }
1264
1280
  }
1281
+
1282
+ let updateKnownHeight = (fetchState: t, ~knownHeight) => {
1283
+ if knownHeight > fetchState.knownHeight {
1284
+ Prometheus.setKnownHeight(~blockNumber=knownHeight, ~chainId=fetchState.chainId)
1285
+ fetchState->updateInternal(~knownHeight)
1286
+ } else {
1287
+ fetchState
1288
+ }
1289
+ }