envio 2.27.1 → 2.27.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.27.1",
3
+ "version": "v2.27.3",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,10 +25,10 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.27.1",
29
- "envio-linux-arm64": "v2.27.1",
30
- "envio-darwin-x64": "v2.27.1",
31
- "envio-darwin-arm64": "v2.27.1"
28
+ "envio-linux-x64": "v2.27.3",
29
+ "envio-linux-arm64": "v2.27.3",
30
+ "envio-darwin-x64": "v2.27.3",
31
+ "envio-darwin-arm64": "v2.27.3"
32
32
  },
33
33
  "dependencies": {
34
34
  "@envio-dev/hypersync-client": "0.6.5",
@@ -68,8 +68,8 @@ type t = {
68
68
  // Fields computed by updateInternal
69
69
  latestFullyFetchedBlock: blockNumberAndTimestamp,
70
70
  // How much blocks behind the head we should query
71
- // Added for the purpose of avoiding reorg handling
72
- blockLag: option<int>,
71
+ // Needed to query before entering reorg threshold
72
+ blockLag: int,
73
73
  //Items ordered from latest to earliest
74
74
  queue: array<Internal.eventItem>,
75
75
  }
@@ -229,6 +229,7 @@ let updateInternal = (
229
229
  ~dcsToStore=fetchState.dcsToStore,
230
230
  ~currentBlockHeight=?,
231
231
  ~queue=fetchState.queue,
232
+ ~blockLag=fetchState.blockLag,
232
233
  ): t => {
233
234
  let firstPartition = partitions->Js.Array2.unsafe_get(0)
234
235
  let latestFullyFetchedBlock = ref(firstPartition.latestFetchedBlock)
@@ -257,12 +258,12 @@ let updateInternal = (
257
258
  }
258
259
  }
259
260
 
260
- let queueSize = queue->Array.length
261
+ let bufferSize = queue->Array.length
261
262
  Prometheus.IndexingPartitions.set(
262
263
  ~partitionsCount=partitions->Array.length,
263
264
  ~chainId=fetchState.chainId,
264
265
  )
265
- Prometheus.IndexingBufferSize.set(~bufferSize=queueSize, ~chainId=fetchState.chainId)
266
+ Prometheus.IndexingBufferSize.set(~bufferSize, ~chainId=fetchState.chainId)
266
267
  Prometheus.IndexingBufferBlockNumber.set(
267
268
  ~blockNumber=latestFullyFetchedBlock.blockNumber,
268
269
  ~chainId=fetchState.chainId,
@@ -284,7 +285,7 @@ let updateInternal = (
284
285
  latestFullyFetchedBlock,
285
286
  indexingContracts,
286
287
  dcsToStore,
287
- blockLag: fetchState.blockLag,
288
+ blockLag,
288
289
  queue,
289
290
  }
290
291
  }
@@ -717,13 +718,12 @@ let getNextQuery = (
717
718
  ~currentBlockHeight,
718
719
  ~stateId,
719
720
  ) => {
720
- if currentBlockHeight === 0 {
721
+ let headBlock = currentBlockHeight - blockLag
722
+ if headBlock <= 0 {
721
723
  WaitingForNewBlock
722
724
  } else if concurrencyLimit === 0 {
723
725
  ReachedMaxConcurrency
724
726
  } else {
725
- let headBlock = currentBlockHeight - blockLag->Option.getWithDefault(0)
726
-
727
727
  let fullPartitions = []
728
728
  let mergingPartitions = []
729
729
  let areMergingPartitionsFetching = ref(false)
@@ -823,14 +823,14 @@ let getNextQuery = (
823
823
  switch p->makePartitionQuery(
824
824
  ~indexingContracts,
825
825
  ~endBlock=switch blockLag {
826
- | Some(_) =>
826
+ | 0 => endBlock
827
+ | _ =>
827
828
  switch endBlock {
828
829
  | Some(endBlock) => Some(Pervasives.min(headBlock, endBlock))
829
830
  // Force head block as an endBlock when blockLag is set
830
831
  // because otherwise HyperSync might return bigger range
831
832
  | None => Some(headBlock)
832
833
  }
833
- | None => endBlock
834
834
  },
835
835
  ~mergeTarget,
836
836
  ) {
@@ -893,18 +893,6 @@ let queueItemBlockNumber = (queueItem: queueItem) => {
893
893
  }
894
894
  }
895
895
 
896
- let queueItemIsInReorgThreshold = (
897
- queueItem: queueItem,
898
- ~currentBlockHeight,
899
- ~highestBlockBelowThreshold,
900
- ) => {
901
- if currentBlockHeight === 0 {
902
- false
903
- } else {
904
- queueItem->queueItemBlockNumber > highestBlockBelowThreshold
905
- }
906
- }
907
-
908
896
  /**
909
897
  Simple constructor for no item from partition
910
898
  */
@@ -969,7 +957,7 @@ let make = (
969
957
  ~contracts: array<indexingContract>,
970
958
  ~maxAddrInPartition,
971
959
  ~chainId,
972
- ~blockLag=?,
960
+ ~blockLag=0,
973
961
  ): t => {
974
962
  let latestFetchedBlock = {
975
963
  blockTimestamp: 0,
@@ -1098,7 +1086,7 @@ let make = (
1098
1086
  }
1099
1087
  }
1100
1088
 
1101
- let queueSize = ({queue}: t) => queue->Array.length
1089
+ let bufferSize = ({queue}: t) => queue->Array.length
1102
1090
 
1103
1091
  /**
1104
1092
  * Returns the latest block number fetched for the lowest fetcher queue (ie the earliest un-fetched dynamic contract)
@@ -1219,10 +1207,42 @@ let isActivelyIndexing = ({latestFullyFetchedBlock, endBlock} as fetchState: t)
1219
1207
  | Some(endBlock) =>
1220
1208
  let isPastEndblock = latestFullyFetchedBlock.blockNumber >= endBlock
1221
1209
  if isPastEndblock {
1222
- fetchState->queueSize > 0
1210
+ fetchState->bufferSize > 0
1223
1211
  } else {
1224
1212
  true
1225
1213
  }
1226
1214
  | None => true
1227
1215
  }
1228
1216
  }
1217
+
1218
+ let isReadyToEnterReorgThreshold = (
1219
+ {latestFullyFetchedBlock, endBlock, blockLag, queue}: t,
1220
+ ~currentBlockHeight,
1221
+ ) => {
1222
+ currentBlockHeight !== 0 &&
1223
+ switch endBlock {
1224
+ | Some(endBlock) if latestFullyFetchedBlock.blockNumber >= endBlock => true
1225
+ | _ => latestFullyFetchedBlock.blockNumber >= currentBlockHeight - blockLag
1226
+ } &&
1227
+ queue->Utils.Array.isEmpty
1228
+ }
1229
+
1230
+ let filterAndSortForUnorderedBatch = {
1231
+ let hasBatchItem = ({queue, latestFullyFetchedBlock}: t) => {
1232
+ switch queue->Utils.Array.last {
1233
+ | Some(item) => item.blockNumber <= latestFullyFetchedBlock.blockNumber
1234
+ | None => false
1235
+ }
1236
+ }
1237
+
1238
+ let compareUnorderedBatchChainPriority = (a: t, b: t) => {
1239
+ // Use unsafe since we filtered out all queues without batch items
1240
+ (a.queue->Utils.Array.lastUnsafe).timestamp - (b.queue->Utils.Array.lastUnsafe).timestamp
1241
+ }
1242
+
1243
+ (fetchStates: array<t>) => {
1244
+ fetchStates
1245
+ ->Array.keepU(hasBatchItem)
1246
+ ->Js.Array2.sortInPlaceWith(compareUnorderedBatchChainPriority)
1247
+ }
1248
+ }
@@ -127,12 +127,13 @@ function checkIsWithinSyncRange(latestFetchedBlock, currentBlockHeight) {
127
127
  return (currentBlockHeight - latestFetchedBlock.blockNumber) / currentBlockHeight <= 0.001;
128
128
  }
129
129
 
130
- function updateInternal(fetchState, partitionsOpt, nextPartitionIndexOpt, indexingContractsOpt, dcsToStoreOpt, currentBlockHeight, queueOpt) {
130
+ function updateInternal(fetchState, partitionsOpt, nextPartitionIndexOpt, indexingContractsOpt, dcsToStoreOpt, currentBlockHeight, queueOpt, blockLagOpt) {
131
131
  var partitions = partitionsOpt !== undefined ? partitionsOpt : fetchState.partitions;
132
132
  var nextPartitionIndex = nextPartitionIndexOpt !== undefined ? nextPartitionIndexOpt : fetchState.nextPartitionIndex;
133
133
  var indexingContracts = indexingContractsOpt !== undefined ? indexingContractsOpt : fetchState.indexingContracts;
134
134
  var dcsToStore = dcsToStoreOpt !== undefined ? Caml_option.valFromOption(dcsToStoreOpt) : fetchState.dcsToStore;
135
135
  var queue = queueOpt !== undefined ? queueOpt : fetchState.queue;
136
+ var blockLag = blockLagOpt !== undefined ? blockLagOpt : fetchState.blockLag;
136
137
  var firstPartition = partitions[0];
137
138
  var latestFullyFetchedBlock = firstPartition.latestFetchedBlock;
138
139
  for(var idx = 0 ,idx_finish = partitions.length; idx < idx_finish; ++idx){
@@ -146,9 +147,9 @@ function updateInternal(fetchState, partitionsOpt, nextPartitionIndexOpt, indexi
146
147
  var isFetchingAtHead = currentBlockHeight !== undefined ? (
147
148
  latestFullyFetchedBlock$1.blockNumber >= currentBlockHeight ? true : fetchState.isFetchingAtHead && checkIsWithinSyncRange(latestFullyFetchedBlock$1, currentBlockHeight)
148
149
  ) : fetchState.isFetchingAtHead;
149
- var queueSize = queue.length;
150
+ var bufferSize = queue.length;
150
151
  Prometheus.IndexingPartitions.set(partitions.length, fetchState.chainId);
151
- Prometheus.IndexingBufferSize.set(queueSize, fetchState.chainId);
152
+ Prometheus.IndexingBufferSize.set(bufferSize, fetchState.chainId);
152
153
  Prometheus.IndexingBufferBlockNumber.set(latestFullyFetchedBlock$1.blockNumber, fetchState.chainId);
153
154
  var item = Utils.$$Array.last(queue);
154
155
  return {
@@ -164,7 +165,7 @@ function updateInternal(fetchState, partitionsOpt, nextPartitionIndexOpt, indexi
164
165
  dcsToStore: dcsToStore,
165
166
  chainId: fetchState.chainId,
166
167
  latestFullyFetchedBlock: latestFullyFetchedBlock$1,
167
- blockLag: fetchState.blockLag,
168
+ blockLag: blockLag,
168
169
  queue: queue
169
170
  };
170
171
  }
@@ -336,7 +337,7 @@ function registerDynamicContracts(fetchState, dynamicContracts, currentBlockHeig
336
337
  }
337
338
  Prometheus.IndexingAddresses.set(Object.keys(fetchState.indexingContracts).length + dcsToStore.length | 0, fetchState.chainId);
338
339
  var existingDcs = fetchState.dcsToStore;
339
- return updateInternal(fetchState, fetchState.partitions.concat(newPartitions), fetchState.nextPartitionIndex + newPartitions.length | 0, Object.assign(registeringContracts, indexingContracts), Caml_option.some(existingDcs !== undefined ? Belt_Array.concat(existingDcs, dcsToStore) : dcsToStore), currentBlockHeight, undefined);
340
+ return updateInternal(fetchState, fetchState.partitions.concat(newPartitions), fetchState.nextPartitionIndex + newPartitions.length | 0, Object.assign(registeringContracts, indexingContracts), Caml_option.some(existingDcs !== undefined ? Belt_Array.concat(existingDcs, dcsToStore) : dcsToStore), currentBlockHeight, undefined, undefined);
340
341
  }
341
342
 
342
343
  var UnexpectedPartitionNotFound = /* @__PURE__ */Caml_exceptions.create("FetchState.UnexpectedPartitionNotFound");
@@ -413,7 +414,7 @@ function handleQueryResult(fetchState, query, latestFetchedBlock, reversedNewIte
413
414
  };
414
415
  }
415
416
  return Belt_Result.map(tmp, (function (partitions) {
416
- return updateInternal(fetchState, partitions, undefined, undefined, undefined, currentBlockHeight, mergeSortedEventList(reversedNewItems, fetchState.queue));
417
+ return updateInternal(fetchState, partitions, undefined, undefined, undefined, currentBlockHeight, mergeSortedEventList(reversedNewItems, fetchState.queue), undefined);
417
418
  }));
418
419
  }
419
420
 
@@ -501,19 +502,19 @@ function isFullPartition(p, maxAddrInPartition) {
501
502
  }
502
503
 
503
504
  function getNextQuery(param, concurrencyLimit, targetBufferSize, currentBlockHeight, stateId) {
504
- if (currentBlockHeight === 0) {
505
- return "WaitingForNewBlock";
506
- }
507
- if (concurrencyLimit === 0) {
508
- return "ReachedMaxConcurrency";
509
- }
510
505
  var queue = param.queue;
511
506
  var blockLag = param.blockLag;
512
507
  var indexingContracts = param.indexingContracts;
513
508
  var maxAddrInPartition = param.maxAddrInPartition;
514
509
  var endBlock = param.endBlock;
515
510
  var partitions = param.partitions;
516
- var headBlock = currentBlockHeight - Belt_Option.getWithDefault(blockLag, 0) | 0;
511
+ var headBlock = currentBlockHeight - blockLag | 0;
512
+ if (headBlock <= 0) {
513
+ return "WaitingForNewBlock";
514
+ }
515
+ if (concurrencyLimit === 0) {
516
+ return "ReachedMaxConcurrency";
517
+ }
517
518
  var fullPartitions = [];
518
519
  var mergingPartitions = [];
519
520
  var areMergingPartitionsFetching = false;
@@ -575,7 +576,7 @@ function getNextQuery(param, concurrencyLimit, targetBufferSize, currentBlockHei
575
576
  if (!(!checkIsFetchingPartition(p) && p.latestFetchedBlock.blockNumber < maxQueryBlockNumber)) {
576
577
  return ;
577
578
  }
578
- var q = makePartitionQuery(p, indexingContracts, blockLag !== undefined ? (
579
+ var q = makePartitionQuery(p, indexingContracts, blockLag !== 0 ? (
579
580
  endBlock !== undefined ? (
580
581
  headBlock < endBlock ? headBlock : endBlock
581
582
  ) : headBlock
@@ -639,14 +640,6 @@ function queueItemBlockNumber(queueItem) {
639
640
  }
640
641
  }
641
642
 
642
- function queueItemIsInReorgThreshold(queueItem, currentBlockHeight, highestBlockBelowThreshold) {
643
- if (currentBlockHeight === 0) {
644
- return false;
645
- } else {
646
- return queueItemBlockNumber(queueItem) > highestBlockBelowThreshold;
647
- }
648
- }
649
-
650
643
  function makeNoItem(param) {
651
644
  return {
652
645
  TAG: "NoItem",
@@ -698,7 +691,8 @@ function getEarliestEvent(param) {
698
691
  }
699
692
  }
700
693
 
701
- function make(startBlock, endBlock, eventConfigs, contracts, maxAddrInPartition, chainId, blockLag) {
694
+ function make(startBlock, endBlock, eventConfigs, contracts, maxAddrInPartition, chainId, blockLagOpt) {
695
+ var blockLag = blockLagOpt !== undefined ? blockLagOpt : 0;
702
696
  var latestFetchedBlock_blockNumber = startBlock - 1 | 0;
703
697
  var latestFetchedBlock = {
704
698
  blockNumber: latestFetchedBlock_blockNumber,
@@ -810,7 +804,7 @@ function make(startBlock, endBlock, eventConfigs, contracts, maxAddrInPartition,
810
804
  };
811
805
  }
812
806
 
813
- function queueSize(param) {
807
+ function bufferSize(param) {
814
808
  return param.queue.length;
815
809
  }
816
810
 
@@ -898,7 +892,7 @@ function rollback(fetchState, firstChangeEvent) {
898
892
  } else {
899
893
  tmp = undefined;
900
894
  }
901
- return updateInternal(fetchState, partitions, undefined, indexingContracts, Caml_option.some(tmp), undefined, pruneQueueFromFirstChangeEvent(fetchState.queue, firstChangeEvent));
895
+ return updateInternal(fetchState, partitions, undefined, indexingContracts, Caml_option.some(tmp), undefined, pruneQueueFromFirstChangeEvent(fetchState.queue, firstChangeEvent), undefined);
902
896
  }
903
897
 
904
898
  function isActivelyIndexing(fetchState) {
@@ -908,12 +902,42 @@ function isActivelyIndexing(fetchState) {
908
902
  }
909
903
  var isPastEndblock = fetchState.latestFullyFetchedBlock.blockNumber >= endBlock;
910
904
  if (isPastEndblock) {
911
- return queueSize(fetchState) > 0;
905
+ return bufferSize(fetchState) > 0;
912
906
  } else {
913
907
  return true;
914
908
  }
915
909
  }
916
910
 
911
+ function isReadyToEnterReorgThreshold(param, currentBlockHeight) {
912
+ var blockLag = param.blockLag;
913
+ var latestFullyFetchedBlock = param.latestFullyFetchedBlock;
914
+ var endBlock = param.endBlock;
915
+ if (currentBlockHeight !== 0 && (
916
+ endBlock !== undefined && latestFullyFetchedBlock.blockNumber >= endBlock ? true : latestFullyFetchedBlock.blockNumber >= (currentBlockHeight - blockLag | 0)
917
+ )) {
918
+ return Utils.$$Array.isEmpty(param.queue);
919
+ } else {
920
+ return false;
921
+ }
922
+ }
923
+
924
+ function hasBatchItem(param) {
925
+ var item = Utils.$$Array.last(param.queue);
926
+ if (item !== undefined) {
927
+ return item.blockNumber <= param.latestFullyFetchedBlock.blockNumber;
928
+ } else {
929
+ return false;
930
+ }
931
+ }
932
+
933
+ function compareUnorderedBatchChainPriority(a, b) {
934
+ return Utils.$$Array.lastUnsafe(a.queue).timestamp - Utils.$$Array.lastUnsafe(b.queue).timestamp | 0;
935
+ }
936
+
937
+ function filterAndSortForUnorderedBatch(fetchStates) {
938
+ return Belt_Array.keepU(fetchStates, hasBatchItem).sort(compareUnorderedBatchChainPriority);
939
+ }
940
+
917
941
  exports.copy = copy;
918
942
  exports.eventItemGt = eventItemGt;
919
943
  exports.mergeSortedEventList = mergeSortedEventList;
@@ -933,15 +957,16 @@ exports.addressesByContractNameGetAll = addressesByContractNameGetAll;
933
957
  exports.isFullPartition = isFullPartition;
934
958
  exports.getNextQuery = getNextQuery;
935
959
  exports.queueItemBlockNumber = queueItemBlockNumber;
936
- exports.queueItemIsInReorgThreshold = queueItemIsInReorgThreshold;
937
960
  exports.makeNoItem = makeNoItem;
938
961
  exports.qItemLt = qItemLt;
939
962
  exports.getEarliestEvent = getEarliestEvent;
940
963
  exports.make = make;
941
- exports.queueSize = queueSize;
964
+ exports.bufferSize = bufferSize;
942
965
  exports.getLatestFullyFetchedBlock = getLatestFullyFetchedBlock;
943
966
  exports.pruneQueueFromFirstChangeEvent = pruneQueueFromFirstChangeEvent;
944
967
  exports.rollbackPartition = rollbackPartition;
945
968
  exports.rollback = rollback;
946
969
  exports.isActivelyIndexing = isActivelyIndexing;
970
+ exports.isReadyToEnterReorgThreshold = isReadyToEnterReorgThreshold;
971
+ exports.filterAndSortForUnorderedBatch = filterAndSortForUnorderedBatch;
947
972
  /* Utils Not a pure module */
@@ -472,6 +472,17 @@ module ReorgDetectionBlockNumber = {
472
472
  }
473
473
  }
474
474
 
475
+ module ReorgThreshold = {
476
+ let gauge = PromClient.Gauge.makeGauge({
477
+ "name": "envio_reorg_threshold",
478
+ "help": "Whether indexing is currently within the reorg threshold",
479
+ })
480
+
481
+ let set = (~isInReorgThreshold) => {
482
+ gauge->PromClient.Gauge.set(isInReorgThreshold ? 1 : 0)
483
+ }
484
+ }
485
+
475
486
  module RollbackEnabled = {
476
487
  let gauge = PromClient.Gauge.makeGauge({
477
488
  "name": "envio_rollback_enabled",
@@ -573,17 +573,31 @@ var ReorgDetectionBlockNumber = {
573
573
  };
574
574
 
575
575
  var gauge$14 = new PromClient.Gauge({
576
+ name: "envio_reorg_threshold",
577
+ help: "Whether indexing is currently within the reorg threshold"
578
+ });
579
+
580
+ function set$14(isInReorgThreshold) {
581
+ gauge$14.set(isInReorgThreshold ? 1 : 0);
582
+ }
583
+
584
+ var ReorgThreshold = {
585
+ gauge: gauge$14,
586
+ set: set$14
587
+ };
588
+
589
+ var gauge$15 = new PromClient.Gauge({
576
590
  name: "envio_rollback_enabled",
577
591
  help: "Whether rollback on reorg is enabled"
578
592
  });
579
593
 
580
- function set$14(enabled) {
581
- gauge$14.set(enabled ? 1 : 0);
594
+ function set$15(enabled) {
595
+ gauge$15.set(enabled ? 1 : 0);
582
596
  }
583
597
 
584
598
  var RollbackEnabled = {
585
- gauge: gauge$14,
586
- set: set$14
599
+ gauge: gauge$15,
600
+ set: set$15
587
601
  };
588
602
 
589
603
  var histogram = new PromClient.Histogram({
@@ -606,62 +620,62 @@ var RollbackDuration = {
606
620
  startTimer: startTimer$1
607
621
  };
608
622
 
609
- var gauge$15 = makeOrThrow$1("envio_rollback_target_block_number", "The block number reorg was rollbacked to the last time.", chainIdLabelsSchema);
623
+ var gauge$16 = makeOrThrow$1("envio_rollback_target_block_number", "The block number reorg was rollbacked to the last time.", chainIdLabelsSchema);
610
624
 
611
- function set$15(blockNumber, chain) {
612
- handleInt$1(gauge$15, chain, blockNumber);
625
+ function set$16(blockNumber, chain) {
626
+ handleInt$1(gauge$16, chain, blockNumber);
613
627
  }
614
628
 
615
629
  var RollbackTargetBlockNumber = {
616
- gauge: gauge$15,
617
- set: set$15
630
+ gauge: gauge$16,
631
+ set: set$16
618
632
  };
619
633
 
620
- var gauge$16 = makeOrThrow$1("envio_processing_block_number", "The latest item block number included in the currently processing batch for the chain.", chainIdLabelsSchema);
634
+ var gauge$17 = makeOrThrow$1("envio_processing_block_number", "The latest item block number included in the currently processing batch for the chain.", chainIdLabelsSchema);
621
635
 
622
- function set$16(blockNumber, chainId) {
623
- handleInt$1(gauge$16, chainId, blockNumber);
636
+ function set$17(blockNumber, chainId) {
637
+ handleInt$1(gauge$17, chainId, blockNumber);
624
638
  }
625
639
 
626
640
  var ProcessingBlockNumber = {
627
- gauge: gauge$16,
628
- set: set$16
641
+ gauge: gauge$17,
642
+ set: set$17
629
643
  };
630
644
 
631
- var gauge$17 = makeOrThrow$1("envio_processing_batch_size", "The number of items included in the currently processing batch for the chain.", chainIdLabelsSchema);
645
+ var gauge$18 = makeOrThrow$1("envio_processing_batch_size", "The number of items included in the currently processing batch for the chain.", chainIdLabelsSchema);
632
646
 
633
- function set$17(batchSize, chainId) {
634
- handleInt$1(gauge$17, chainId, batchSize);
647
+ function set$18(batchSize, chainId) {
648
+ handleInt$1(gauge$18, chainId, batchSize);
635
649
  }
636
650
 
637
651
  var ProcessingBatchSize = {
638
- gauge: gauge$17,
639
- set: set$17
652
+ gauge: gauge$18,
653
+ set: set$18
640
654
  };
641
655
 
642
- var gauge$18 = new PromClient.Gauge({
656
+ var gauge$19 = new PromClient.Gauge({
643
657
  name: "envio_processing_max_batch_size",
644
658
  help: "The maximum number of items to process in a single batch."
645
659
  });
646
660
 
647
- function set$18(maxBatchSize) {
648
- gauge$18.set(maxBatchSize);
661
+ function set$19(maxBatchSize) {
662
+ gauge$19.set(maxBatchSize);
649
663
  }
650
664
 
651
665
  var ProcessingMaxBatchSize = {
652
- gauge: gauge$18,
653
- set: set$18
666
+ gauge: gauge$19,
667
+ set: set$19
654
668
  };
655
669
 
656
- var gauge$19 = makeOrThrow$1("envio_progress_block_number", "The block number of the latest block processed and stored in the database.", chainIdLabelsSchema);
670
+ var gauge$20 = makeOrThrow$1("envio_progress_block_number", "The block number of the latest block processed and stored in the database.", chainIdLabelsSchema);
657
671
 
658
- function set$19(blockNumber, chainId) {
659
- handleInt$1(gauge$19, chainId, blockNumber);
672
+ function set$20(blockNumber, chainId) {
673
+ handleInt$1(gauge$20, chainId, blockNumber);
660
674
  }
661
675
 
662
676
  var ProgressBlockNumber = {
663
- gauge: gauge$19,
664
- set: set$19
677
+ gauge: gauge$20,
678
+ set: set$20
665
679
  };
666
680
 
667
681
  var deprecatedGauge$1 = new PromClient.Gauge({
@@ -670,45 +684,45 @@ var deprecatedGauge$1 = new PromClient.Gauge({
670
684
  labelNames: ["chainId"]
671
685
  });
672
686
 
673
- var gauge$20 = makeOrThrow$1("envio_progress_events_count", "The number of events processed and reflected in the database.", chainIdLabelsSchema);
687
+ var gauge$21 = makeOrThrow$1("envio_progress_events_count", "The number of events processed and reflected in the database.", chainIdLabelsSchema);
674
688
 
675
- function set$20(processedCount, chainId) {
689
+ function set$21(processedCount, chainId) {
676
690
  deprecatedGauge$1.labels({
677
691
  chainId: chainId
678
692
  }).set(processedCount);
679
- handleInt$1(gauge$20, chainId, processedCount);
693
+ handleInt$1(gauge$21, chainId, processedCount);
680
694
  }
681
695
 
682
696
  var ProgressEventsCount = {
683
697
  deprecatedGauge: deprecatedGauge$1,
684
- gauge: gauge$20,
685
- set: set$20
698
+ gauge: gauge$21,
699
+ set: set$21
686
700
  };
687
701
 
688
702
  var effectLabelsSchema = S$RescriptSchema.object(function (s) {
689
703
  return s.f("effect", S$RescriptSchema.string);
690
704
  });
691
705
 
692
- var gauge$21 = makeOrThrow$1("envio_effect_calls_count", "The number of calls to the effect. Including both handler execution and cache hits.", effectLabelsSchema);
706
+ var gauge$22 = makeOrThrow$1("envio_effect_calls_count", "The number of calls to the effect. Including both handler execution and cache hits.", effectLabelsSchema);
693
707
 
694
- function set$21(callsCount, effectName) {
695
- handleInt$1(gauge$21, effectName, callsCount);
708
+ function set$22(callsCount, effectName) {
709
+ handleInt$1(gauge$22, effectName, callsCount);
696
710
  }
697
711
 
698
712
  var EffectCallsCount = {
699
- gauge: gauge$21,
700
- set: set$21
713
+ gauge: gauge$22,
714
+ set: set$22
701
715
  };
702
716
 
703
- var gauge$22 = makeOrThrow$1("envio_effect_cache_count", "The number of items in the effect cache.", effectLabelsSchema);
717
+ var gauge$23 = makeOrThrow$1("envio_effect_cache_count", "The number of items in the effect cache.", effectLabelsSchema);
704
718
 
705
- function set$22(count, effectName) {
706
- handleInt$1(gauge$22, effectName, count);
719
+ function set$23(count, effectName) {
720
+ handleInt$1(gauge$23, effectName, count);
707
721
  }
708
722
 
709
723
  var EffectCacheCount = {
710
- gauge: gauge$22,
711
- set: set$22
724
+ gauge: gauge$23,
725
+ set: set$23
712
726
  };
713
727
 
714
728
  exports.loadEntitiesDurationCounter = loadEntitiesDurationCounter;
@@ -748,6 +762,7 @@ exports.SourceHeight = SourceHeight;
748
762
  exports.SourceGetHeightDuration = SourceGetHeightDuration;
749
763
  exports.ReorgCount = ReorgCount;
750
764
  exports.ReorgDetectionBlockNumber = ReorgDetectionBlockNumber;
765
+ exports.ReorgThreshold = ReorgThreshold;
751
766
  exports.RollbackEnabled = RollbackEnabled;
752
767
  exports.RollbackDuration = RollbackDuration;
753
768
  exports.RollbackTargetBlockNumber = RollbackTargetBlockNumber;
package/src/Utils.res CHANGED
@@ -250,6 +250,8 @@ Helper to check if a value exists in an array
250
250
 
251
251
  let last = (arr: array<'a>): option<'a> => arr->Belt.Array.get(arr->Array.length - 1)
252
252
 
253
+ let lastUnsafe = (arr: array<'a>): 'a => arr->Belt.Array.getUnsafe(arr->Array.length - 1)
254
+
253
255
  let findReverseWithIndex = (arr: array<'a>, fn: 'a => bool): option<('a, int)> => {
254
256
  let rec loop = (index: int) => {
255
257
  if index < 0 {
package/src/Utils.res.js CHANGED
@@ -268,6 +268,10 @@ function last(arr) {
268
268
  return Belt_Array.get(arr, arr.length - 1 | 0);
269
269
  }
270
270
 
271
+ function lastUnsafe(arr) {
272
+ return arr[arr.length - 1 | 0];
273
+ }
274
+
271
275
  function findReverseWithIndex(arr, fn) {
272
276
  var _index = arr.length - 1 | 0;
273
277
  while(true) {
@@ -310,6 +314,7 @@ var $$Array$1 = {
310
314
  awaitEach: awaitEach,
311
315
  removeAtIndex: removeAtIndex,
312
316
  last: last,
317
+ lastUnsafe: lastUnsafe,
313
318
  findReverseWithIndex: findReverseWithIndex,
314
319
  interleave: interleave
315
320
  };