envio 2.27.2 → 2.27.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -12,7 +12,7 @@ HyperIndex is a fast, developer-friendly multichain indexer, optimized for both
12
12
  ## Key Features
13
13
 
14
14
  - **[Indexer auto-generation](https://docs.envio.dev/docs/HyperIndex/contract-import)** – Generate Indexers directly from smart contract addresses
15
- - **High performance** – Historical backfills at over 5,000+ events per second ([fastest in market](https://docs.envio.dev/blog/indexer-benchmarking-results))
15
+ - **High performance** – Historical backfills at over 10,000+ events per second ([fastest in market](https://docs.envio.dev/blog/indexer-benchmarking-results))
16
16
  - **Local development** – Full-featured local environment with Docker
17
17
  - **[Multichain indexing](https://docs.envio.dev/docs/HyperIndex/multichain-indexing)** – Index any EVM-compatible blockchain and Fuel (simultaneously)
18
18
  - **Real-time indexing** – Instantly track blockchain events
package/index.js CHANGED
@@ -35,4 +35,5 @@ exports.S = {
35
35
  // Nullish type will change in "sury@10"
36
36
  // nullish: Sury.nullish,
37
37
  assertOrThrow: Sury.assertOrThrow,
38
+ parseOrThrow: Sury.parseOrThrow,
38
39
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.27.2",
3
+ "version": "v2.27.4",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,10 +25,10 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.27.2",
29
- "envio-linux-arm64": "v2.27.2",
30
- "envio-darwin-x64": "v2.27.2",
31
- "envio-darwin-arm64": "v2.27.2"
28
+ "envio-linux-x64": "v2.27.4",
29
+ "envio-linux-arm64": "v2.27.4",
30
+ "envio-darwin-x64": "v2.27.4",
31
+ "envio-darwin-arm64": "v2.27.4"
32
32
  },
33
33
  "dependencies": {
34
34
  "@envio-dev/hypersync-client": "0.6.5",
@@ -258,12 +258,12 @@ let updateInternal = (
258
258
  }
259
259
  }
260
260
 
261
- let queueSize = queue->Array.length
261
+ let bufferSize = queue->Array.length
262
262
  Prometheus.IndexingPartitions.set(
263
263
  ~partitionsCount=partitions->Array.length,
264
264
  ~chainId=fetchState.chainId,
265
265
  )
266
- Prometheus.IndexingBufferSize.set(~bufferSize=queueSize, ~chainId=fetchState.chainId)
266
+ Prometheus.IndexingBufferSize.set(~bufferSize, ~chainId=fetchState.chainId)
267
267
  Prometheus.IndexingBufferBlockNumber.set(
268
268
  ~blockNumber=latestFullyFetchedBlock.blockNumber,
269
269
  ~chainId=fetchState.chainId,
@@ -1086,7 +1086,7 @@ let make = (
1086
1086
  }
1087
1087
  }
1088
1088
 
1089
- let queueSize = ({queue}: t) => queue->Array.length
1089
+ let bufferSize = ({queue}: t) => queue->Array.length
1090
1090
 
1091
1091
  /**
1092
1092
  * Returns the latest block number fetched for the lowest fetcher queue (ie the earliest un-fetched dynamic contract)
@@ -1207,7 +1207,7 @@ let isActivelyIndexing = ({latestFullyFetchedBlock, endBlock} as fetchState: t)
1207
1207
  | Some(endBlock) =>
1208
1208
  let isPastEndblock = latestFullyFetchedBlock.blockNumber >= endBlock
1209
1209
  if isPastEndblock {
1210
- fetchState->queueSize > 0
1210
+ fetchState->bufferSize > 0
1211
1211
  } else {
1212
1212
  true
1213
1213
  }
@@ -1226,3 +1226,43 @@ let isReadyToEnterReorgThreshold = (
1226
1226
  } &&
1227
1227
  queue->Utils.Array.isEmpty
1228
1228
  }
1229
+
1230
+ let filterAndSortForUnorderedBatch = {
1231
+ let hasBatchItem = ({queue, latestFullyFetchedBlock}: t) => {
1232
+ switch queue->Utils.Array.last {
1233
+ | Some(item) => item.blockNumber <= latestFullyFetchedBlock.blockNumber
1234
+ | None => false
1235
+ }
1236
+ }
1237
+
1238
+ let hasFullBatch = ({queue, latestFullyFetchedBlock}: t, ~maxBatchSize) => {
1239
+ // Queue is ordered from latest to earliest, so the earliest eligible
1240
+ // item for a full batch of size B is at index (length - B).
1241
+ // Do NOT subtract an extra 1 here; when length === B we should still
1242
+ // classify the queue as full and probe index 0.
1243
+ let targetBlockIdx = queue->Array.length - maxBatchSize
1244
+ if targetBlockIdx < 0 {
1245
+ false
1246
+ } else {
1247
+ // Unsafe can fail when maxBatchSize is 0,
1248
+ // but we ignore the case
1249
+ (queue->Js.Array2.unsafe_get(targetBlockIdx)).blockNumber <=
1250
+ latestFullyFetchedBlock.blockNumber
1251
+ }
1252
+ }
1253
+
1254
+ (fetchStates: array<t>, ~maxBatchSize: int) => {
1255
+ fetchStates
1256
+ ->Array.keepU(hasBatchItem)
1257
+ ->Js.Array2.sortInPlaceWith((a: t, b: t) => {
1258
+ switch (a->hasFullBatch(~maxBatchSize), b->hasFullBatch(~maxBatchSize)) {
1259
+ | (true, true)
1260
+ | (false, false) =>
1261
+ // Use unsafe since we filtered out all queues without batch items
1262
+ (a.queue->Utils.Array.lastUnsafe).timestamp - (b.queue->Utils.Array.lastUnsafe).timestamp
1263
+ | (true, false) => -1
1264
+ | (false, true) => 1
1265
+ }
1266
+ })
1267
+ }
1268
+ }
@@ -147,9 +147,9 @@ function updateInternal(fetchState, partitionsOpt, nextPartitionIndexOpt, indexi
147
147
  var isFetchingAtHead = currentBlockHeight !== undefined ? (
148
148
  latestFullyFetchedBlock$1.blockNumber >= currentBlockHeight ? true : fetchState.isFetchingAtHead && checkIsWithinSyncRange(latestFullyFetchedBlock$1, currentBlockHeight)
149
149
  ) : fetchState.isFetchingAtHead;
150
- var queueSize = queue.length;
150
+ var bufferSize = queue.length;
151
151
  Prometheus.IndexingPartitions.set(partitions.length, fetchState.chainId);
152
- Prometheus.IndexingBufferSize.set(queueSize, fetchState.chainId);
152
+ Prometheus.IndexingBufferSize.set(bufferSize, fetchState.chainId);
153
153
  Prometheus.IndexingBufferBlockNumber.set(latestFullyFetchedBlock$1.blockNumber, fetchState.chainId);
154
154
  var item = Utils.$$Array.last(queue);
155
155
  return {
@@ -804,7 +804,7 @@ function make(startBlock, endBlock, eventConfigs, contracts, maxAddrInPartition,
804
804
  };
805
805
  }
806
806
 
807
- function queueSize(param) {
807
+ function bufferSize(param) {
808
808
  return param.queue.length;
809
809
  }
810
810
 
@@ -902,7 +902,7 @@ function isActivelyIndexing(fetchState) {
902
902
  }
903
903
  var isPastEndblock = fetchState.latestFullyFetchedBlock.blockNumber >= endBlock;
904
904
  if (isPastEndblock) {
905
- return queueSize(fetchState) > 0;
905
+ return bufferSize(fetchState) > 0;
906
906
  } else {
907
907
  return true;
908
908
  }
@@ -921,6 +921,41 @@ function isReadyToEnterReorgThreshold(param, currentBlockHeight) {
921
921
  }
922
922
  }
923
923
 
924
+ function hasBatchItem(param) {
925
+ var item = Utils.$$Array.last(param.queue);
926
+ if (item !== undefined) {
927
+ return item.blockNumber <= param.latestFullyFetchedBlock.blockNumber;
928
+ } else {
929
+ return false;
930
+ }
931
+ }
932
+
933
+ function hasFullBatch(param, maxBatchSize) {
934
+ var queue = param.queue;
935
+ var targetBlockIdx = queue.length - maxBatchSize | 0;
936
+ if (targetBlockIdx < 0) {
937
+ return false;
938
+ } else {
939
+ return queue[targetBlockIdx].blockNumber <= param.latestFullyFetchedBlock.blockNumber;
940
+ }
941
+ }
942
+
943
+ function filterAndSortForUnorderedBatch(fetchStates, maxBatchSize) {
944
+ return Belt_Array.keepU(fetchStates, hasBatchItem).sort(function (a, b) {
945
+ var match = hasFullBatch(a, maxBatchSize);
946
+ var match$1 = hasFullBatch(b, maxBatchSize);
947
+ if (match) {
948
+ if (!match$1) {
949
+ return -1;
950
+ }
951
+
952
+ } else if (match$1) {
953
+ return 1;
954
+ }
955
+ return Utils.$$Array.lastUnsafe(a.queue).timestamp - Utils.$$Array.lastUnsafe(b.queue).timestamp | 0;
956
+ });
957
+ }
958
+
924
959
  exports.copy = copy;
925
960
  exports.eventItemGt = eventItemGt;
926
961
  exports.mergeSortedEventList = mergeSortedEventList;
@@ -944,11 +979,12 @@ exports.makeNoItem = makeNoItem;
944
979
  exports.qItemLt = qItemLt;
945
980
  exports.getEarliestEvent = getEarliestEvent;
946
981
  exports.make = make;
947
- exports.queueSize = queueSize;
982
+ exports.bufferSize = bufferSize;
948
983
  exports.getLatestFullyFetchedBlock = getLatestFullyFetchedBlock;
949
984
  exports.pruneQueueFromFirstChangeEvent = pruneQueueFromFirstChangeEvent;
950
985
  exports.rollbackPartition = rollbackPartition;
951
986
  exports.rollback = rollback;
952
987
  exports.isActivelyIndexing = isActivelyIndexing;
953
988
  exports.isReadyToEnterReorgThreshold = isReadyToEnterReorgThreshold;
989
+ exports.filterAndSortForUnorderedBatch = filterAndSortForUnorderedBatch;
954
990
  /* Utils Not a pure module */
package/src/PgStorage.res CHANGED
@@ -325,8 +325,8 @@ let removeInvalidUtf8InPlace = entities =>
325
325
  })
326
326
  })
327
327
 
328
- let pgEncodingErrorSchema = S.object(s =>
329
- s.tag("message", `invalid byte sequence for encoding "UTF8": 0x00`)
328
+ let pgErrorMessageSchema = S.object(s =>
329
+ s.field("message", S.string)
330
330
  )
331
331
 
332
332
  exception PgEncodingError({table: Table.table})
@@ -228,8 +228,8 @@ function removeInvalidUtf8InPlace(entities) {
228
228
  });
229
229
  }
230
230
 
231
- var pgEncodingErrorSchema = S$RescriptSchema.object(function (s) {
232
- s.tag("message", "invalid byte sequence for encoding \"UTF8\": 0x00");
231
+ var pgErrorMessageSchema = S$RescriptSchema.object(function (s) {
232
+ return s.f("message", S$RescriptSchema.string);
233
233
  });
234
234
 
235
235
  var PgEncodingError = /* @__PURE__ */Caml_exceptions.create("PgStorage.PgEncodingError");
@@ -631,7 +631,7 @@ exports.maxItemsPerQuery = maxItemsPerQuery;
631
631
  exports.makeTableBatchSetQuery = makeTableBatchSetQuery;
632
632
  exports.chunkArray = chunkArray;
633
633
  exports.removeInvalidUtf8InPlace = removeInvalidUtf8InPlace;
634
- exports.pgEncodingErrorSchema = pgEncodingErrorSchema;
634
+ exports.pgErrorMessageSchema = pgErrorMessageSchema;
635
635
  exports.PgEncodingError = PgEncodingError;
636
636
  exports.setQueryCache = setQueryCache;
637
637
  exports.setOrThrow = setOrThrow;
@@ -641,4 +641,4 @@ exports.cacheTablePrefixLength = cacheTablePrefixLength;
641
641
  exports.makeSchemaCacheTableInfoQuery = makeSchemaCacheTableInfoQuery;
642
642
  exports.getConnectedPsqlExec = getConnectedPsqlExec;
643
643
  exports.make = make;
644
- /* pgEncodingErrorSchema Not a pure module */
644
+ /* pgErrorMessageSchema Not a pure module */
@@ -494,15 +494,44 @@ module RollbackEnabled = {
494
494
  }
495
495
  }
496
496
 
497
- module RollbackDuration = {
498
- let histogram = PromClient.Histogram.make({
499
- "name": "envio_rollback_duration",
500
- "help": "Rollback on reorg duration in seconds",
501
- "buckets": [0.5, 1., 5., 10.],
497
+ module RollbackSuccess = {
498
+ let timeCounter = PromClient.Counter.makeCounter({
499
+ "name": "envio_rollback_time",
500
+ "help": "Rollback on reorg total time in milliseconds",
501
+ })
502
+
503
+ let counter = PromClient.Counter.makeCounter({
504
+ "name": "envio_rollback_count",
505
+ "help": "Number of successful rollbacks on reorg",
502
506
  })
503
507
 
504
- let startTimer = () => {
505
- histogram->PromClient.Histogram.startTimer
508
+ let increment = (~timeMillis: Hrtime.milliseconds) => {
509
+ timeCounter->PromClient.Counter.incMany(timeMillis->Hrtime.intFromMillis)
510
+ counter->PromClient.Counter.inc
511
+ }
512
+ }
513
+
514
+ module RollbackHistoryPrune = {
515
+ let entityNameLabelsSchema = S.object(s => s.field("entity", S.string))
516
+
517
+ let timeCounter = SafeCounter.makeOrThrow(
518
+ ~name="envio_rollback_history_prune_time",
519
+ ~help="The total time spent pruning entity history which is not in the reorg threshold. (milliseconds)",
520
+ ~labelSchema=entityNameLabelsSchema,
521
+ )
522
+
523
+ let counter = SafeCounter.makeOrThrow(
524
+ ~name="envio_rollback_history_prune_count",
525
+ ~help="Number of successful entity history prunes",
526
+ ~labelSchema=entityNameLabelsSchema,
527
+ )
528
+
529
+ let increment = (~timeMillis, ~entityName) => {
530
+ timeCounter->SafeCounter.handleInt(
531
+ ~labels={entityName},
532
+ ~value=timeMillis->Hrtime.intFromMillis,
533
+ )
534
+ counter->SafeCounter.increment(~labels={entityName})
506
535
  }
507
536
  }
508
537
 
@@ -1,6 +1,7 @@
1
1
  // Generated by ReScript, PLEASE EDIT WITH CARE
2
2
  'use strict';
3
3
 
4
+ var Hrtime = require("./bindings/Hrtime.res.js");
4
5
  var Js_exn = require("rescript/lib/js/js_exn.js");
5
6
  var ChainMap = require("./ChainMap.res.js");
6
7
  var Belt_Array = require("rescript/lib/js/belt_Array.js");
@@ -600,24 +601,45 @@ var RollbackEnabled = {
600
601
  set: set$15
601
602
  };
602
603
 
603
- var histogram = new PromClient.Histogram({
604
- name: "envio_rollback_duration",
605
- help: "Rollback on reorg duration in seconds",
606
- buckets: [
607
- 0.5,
608
- 1,
609
- 5,
610
- 10
611
- ]
604
+ var timeCounter = new PromClient.Counter({
605
+ name: "envio_rollback_time",
606
+ help: "Rollback on reorg total time in milliseconds"
612
607
  });
613
608
 
614
- function startTimer$1() {
615
- return histogram.startTimer();
609
+ var counter$4 = new PromClient.Counter({
610
+ name: "envio_rollback_count",
611
+ help: "Number of successful rollbacks on reorg"
612
+ });
613
+
614
+ function increment$3(timeMillis) {
615
+ timeCounter.inc(Hrtime.intFromMillis(timeMillis));
616
+ counter$4.inc();
617
+ }
618
+
619
+ var RollbackSuccess = {
620
+ timeCounter: timeCounter,
621
+ counter: counter$4,
622
+ increment: increment$3
623
+ };
624
+
625
+ var entityNameLabelsSchema = S$RescriptSchema.object(function (s) {
626
+ return s.f("entity", S$RescriptSchema.string);
627
+ });
628
+
629
+ var timeCounter$1 = makeOrThrow("envio_rollback_history_prune_time", "The total time spent pruning entity history which is not in the reorg threshold. (milliseconds)", entityNameLabelsSchema);
630
+
631
+ var counter$5 = makeOrThrow("envio_rollback_history_prune_count", "Number of successful entity history prunes", entityNameLabelsSchema);
632
+
633
+ function increment$4(timeMillis, entityName) {
634
+ handleInt(timeCounter$1, entityName, Hrtime.intFromMillis(timeMillis));
635
+ increment(counter$5, entityName);
616
636
  }
617
637
 
618
- var RollbackDuration = {
619
- histogram: histogram,
620
- startTimer: startTimer$1
638
+ var RollbackHistoryPrune = {
639
+ entityNameLabelsSchema: entityNameLabelsSchema,
640
+ timeCounter: timeCounter$1,
641
+ counter: counter$5,
642
+ increment: increment$4
621
643
  };
622
644
 
623
645
  var gauge$16 = makeOrThrow$1("envio_rollback_target_block_number", "The block number reorg was rollbacked to the last time.", chainIdLabelsSchema);
@@ -764,7 +786,8 @@ exports.ReorgCount = ReorgCount;
764
786
  exports.ReorgDetectionBlockNumber = ReorgDetectionBlockNumber;
765
787
  exports.ReorgThreshold = ReorgThreshold;
766
788
  exports.RollbackEnabled = RollbackEnabled;
767
- exports.RollbackDuration = RollbackDuration;
789
+ exports.RollbackSuccess = RollbackSuccess;
790
+ exports.RollbackHistoryPrune = RollbackHistoryPrune;
768
791
  exports.RollbackTargetBlockNumber = RollbackTargetBlockNumber;
769
792
  exports.ProcessingBlockNumber = ProcessingBlockNumber;
770
793
  exports.ProcessingBatchSize = ProcessingBatchSize;
package/src/Utils.res CHANGED
@@ -250,6 +250,8 @@ Helper to check if a value exists in an array
250
250
 
251
251
  let last = (arr: array<'a>): option<'a> => arr->Belt.Array.get(arr->Array.length - 1)
252
252
 
253
+ let lastUnsafe = (arr: array<'a>): 'a => arr->Belt.Array.getUnsafe(arr->Array.length - 1)
254
+
253
255
  let findReverseWithIndex = (arr: array<'a>, fn: 'a => bool): option<('a, int)> => {
254
256
  let rec loop = (index: int) => {
255
257
  if index < 0 {
package/src/Utils.res.js CHANGED
@@ -268,6 +268,10 @@ function last(arr) {
268
268
  return Belt_Array.get(arr, arr.length - 1 | 0);
269
269
  }
270
270
 
271
+ function lastUnsafe(arr) {
272
+ return arr[arr.length - 1 | 0];
273
+ }
274
+
271
275
  function findReverseWithIndex(arr, fn) {
272
276
  var _index = arr.length - 1 | 0;
273
277
  while(true) {
@@ -310,6 +314,7 @@ var $$Array$1 = {
310
314
  awaitEach: awaitEach,
311
315
  removeAtIndex: removeAtIndex,
312
316
  last: last,
317
+ lastUnsafe: lastUnsafe,
313
318
  findReverseWithIndex: findReverseWithIndex,
314
319
  interleave: interleave
315
320
  };
@@ -368,9 +368,20 @@ let queryRoute = Rest.route(() => {
368
368
  responses: [s => s.data(ResponseTypes.queryResponseSchema)],
369
369
  })
370
370
 
371
+ @unboxed
372
+ type heightResult = Value(int) | ErrorMessage(string)
373
+
371
374
  let heightRoute = Rest.route(() => {
372
375
  path: "/height",
373
376
  method: Get,
374
377
  input: s => s.auth(Bearer),
375
- responses: [s => s.field("height", S.int)],
378
+ responses: [
379
+ s =>
380
+ s.data(
381
+ S.union([
382
+ S.object(s => Value(s.field("height", S.int))),
383
+ S.string->S.shape(s => ErrorMessage(s)),
384
+ ]),
385
+ ),
386
+ ],
376
387
  })
@@ -247,7 +247,14 @@ function heightRoute() {
247
247
  return s.auth("Bearer");
248
248
  }),
249
249
  responses: [(function (s) {
250
- return s.field("height", S$RescriptSchema.$$int);
250
+ return s.data(S$RescriptSchema.union([
251
+ S$RescriptSchema.object(function (s) {
252
+ return s.f("height", S$RescriptSchema.$$int);
253
+ }),
254
+ S$RescriptSchema.shape(S$RescriptSchema.string, (function (s) {
255
+ return s;
256
+ }))
257
+ ]));
251
258
  })]
252
259
  };
253
260
  }