envio 2.27.4 → 2.27.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.27.4",
3
+ "version": "v2.27.5",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,10 +25,10 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.27.4",
29
- "envio-linux-arm64": "v2.27.4",
30
- "envio-darwin-x64": "v2.27.4",
31
- "envio-darwin-arm64": "v2.27.4"
28
+ "envio-linux-x64": "v2.27.5",
29
+ "envio-linux-arm64": "v2.27.5",
30
+ "envio-darwin-x64": "v2.27.5",
31
+ "envio-darwin-arm64": "v2.27.5"
32
32
  },
33
33
  "dependencies": {
34
34
  "@envio-dev/hypersync-client": "0.6.5",
@@ -93,25 +93,6 @@ let copy = (fetchState: t) => {
93
93
  }
94
94
  }
95
95
 
96
- /*
97
- Comapritor for two events from the same chain. No need for chain id or timestamp
98
- */
99
- let eventItemGt = (a: Internal.eventItem, b: Internal.eventItem) =>
100
- if a.blockNumber > b.blockNumber {
101
- true
102
- } else if a.blockNumber === b.blockNumber {
103
- a.logIndex > b.logIndex
104
- } else {
105
- false
106
- }
107
-
108
- /*
109
- Merges two event queues on a single event fetcher
110
-
111
- Pass the shorter list into A for better performance
112
- */
113
- let mergeSortedEventList = (a, b) => Utils.Array.mergeSorted(eventItemGt, a, b)
114
-
115
96
  let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) => {
116
97
  switch (p, target) {
117
98
  | ({selection: {dependsOnAddresses: true}}, {selection: {dependsOnAddresses: true}}) => {
@@ -565,6 +546,18 @@ type query = {
565
546
  exception UnexpectedPartitionNotFound({partitionId: string})
566
547
  exception UnexpectedMergeQueryResponse({message: string})
567
548
 
549
+ /*
550
+ Comparitor for two events from the same chain. No need for chain id or timestamp
551
+ */
552
+ let compareBufferItem = (a: Internal.eventItem, b: Internal.eventItem) => {
553
+ let blockDiff = b.blockNumber - a.blockNumber
554
+ if blockDiff === 0 {
555
+ b.logIndex - a.logIndex
556
+ } else {
557
+ blockDiff
558
+ }
559
+ }
560
+
568
561
  /*
569
562
  Updates fetchState with a response for a given query.
570
563
  Returns Error if the partition with given query cannot be found (unexpected)
@@ -576,7 +569,7 @@ let handleQueryResult = (
576
569
  {partitions} as fetchState: t,
577
570
  ~query: query,
578
571
  ~latestFetchedBlock: blockNumberAndTimestamp,
579
- ~reversedNewItems,
572
+ ~newItems,
580
573
  ~currentBlockHeight,
581
574
  ): result<t, exn> =>
582
575
  {
@@ -633,7 +626,12 @@ let handleQueryResult = (
633
626
  fetchState->updateInternal(
634
627
  ~partitions,
635
628
  ~currentBlockHeight,
636
- ~queue=mergeSortedEventList(reversedNewItems, fetchState.queue),
629
+ ~queue=fetchState.queue
630
+ ->Array.concat(newItems)
631
+ // Theoretically it could be faster to asume that
632
+ // the items are sorted, but there are cases
633
+ // when the data source returns them unsorted
634
+ ->Js.Array2.sortInPlaceWith(compareBufferItem),
637
635
  )
638
636
  })
639
637
 
@@ -34,20 +34,6 @@ function copy(fetchState) {
34
34
  };
35
35
  }
36
36
 
37
- function eventItemGt(a, b) {
38
- if (a.blockNumber > b.blockNumber) {
39
- return true;
40
- } else if (a.blockNumber === b.blockNumber) {
41
- return a.logIndex > b.logIndex;
42
- } else {
43
- return false;
44
- }
45
- }
46
-
47
- function mergeSortedEventList(a, b) {
48
- return Utils.$$Array.mergeSorted(eventItemGt, a, b);
49
- }
50
-
51
37
  function mergeIntoPartition(p, target, maxAddrInPartition) {
52
38
  if (!p.selection.dependsOnAddresses) {
53
39
  return [
@@ -344,7 +330,16 @@ var UnexpectedPartitionNotFound = /* @__PURE__ */Caml_exceptions.create("FetchSt
344
330
 
345
331
  var UnexpectedMergeQueryResponse = /* @__PURE__ */Caml_exceptions.create("FetchState.UnexpectedMergeQueryResponse");
346
332
 
347
- function handleQueryResult(fetchState, query, latestFetchedBlock, reversedNewItems, currentBlockHeight) {
333
+ function compareBufferItem(a, b) {
334
+ var blockDiff = b.blockNumber - a.blockNumber | 0;
335
+ if (blockDiff === 0) {
336
+ return b.logIndex - a.logIndex | 0;
337
+ } else {
338
+ return blockDiff;
339
+ }
340
+ }
341
+
342
+ function handleQueryResult(fetchState, query, latestFetchedBlock, newItems, currentBlockHeight) {
348
343
  var partitions = fetchState.partitions;
349
344
  var partitionId = query.partitionId;
350
345
  var pIndex = Belt_Array.getIndexBy(partitions, (function (p) {
@@ -414,7 +409,7 @@ function handleQueryResult(fetchState, query, latestFetchedBlock, reversedNewIte
414
409
  };
415
410
  }
416
411
  return Belt_Result.map(tmp, (function (partitions) {
417
- return updateInternal(fetchState, partitions, undefined, undefined, undefined, currentBlockHeight, mergeSortedEventList(reversedNewItems, fetchState.queue), undefined);
412
+ return updateInternal(fetchState, partitions, undefined, undefined, undefined, currentBlockHeight, Belt_Array.concat(fetchState.queue, newItems).sort(compareBufferItem), undefined);
418
413
  }));
419
414
  }
420
415
 
@@ -957,8 +952,6 @@ function filterAndSortForUnorderedBatch(fetchStates, maxBatchSize) {
957
952
  }
958
953
 
959
954
  exports.copy = copy;
960
- exports.eventItemGt = eventItemGt;
961
- exports.mergeSortedEventList = mergeSortedEventList;
962
955
  exports.mergeIntoPartition = mergeIntoPartition;
963
956
  exports.checkIsWithinSyncRange = checkIsWithinSyncRange;
964
957
  exports.updateInternal = updateInternal;
@@ -967,6 +960,7 @@ exports.warnDifferentContractType = warnDifferentContractType;
967
960
  exports.registerDynamicContracts = registerDynamicContracts;
968
961
  exports.UnexpectedPartitionNotFound = UnexpectedPartitionNotFound;
969
962
  exports.UnexpectedMergeQueryResponse = UnexpectedMergeQueryResponse;
963
+ exports.compareBufferItem = compareBufferItem;
970
964
  exports.handleQueryResult = handleQueryResult;
971
965
  exports.makePartitionQuery = makePartitionQuery;
972
966
  exports.startFetchingQueries = startFetchingQueries;
@@ -241,25 +241,6 @@ module BenchmarkCounters = {
241
241
  }
242
242
  }
243
243
 
244
- module PartitionBlockFetched = {
245
- type labels = {chainId: int, partitionId: string}
246
-
247
- let labelSchema = S.schema(s => {
248
- chainId: s.matches(S.string->S.coerce(S.int)),
249
- partitionId: s.matches(S.string),
250
- })
251
-
252
- let counter = SafeGauge.makeOrThrow(
253
- ~name="partition_block_fetched",
254
- ~help="The latest fetched block number for each partition",
255
- ~labelSchema,
256
- )
257
-
258
- let set = (~blockNumber, ~partitionId, ~chainId) => {
259
- counter->SafeGauge.handleInt(~labels={chainId, partitionId}, ~value=blockNumber)
260
- }
261
- }
262
-
263
244
  let chainIdLabelsSchema = S.object(s => {
264
245
  s.field("chainId", S.string->S.coerce(S.int))
265
246
  })
@@ -440,12 +421,6 @@ module SourceGetHeightDuration = {
440
421
  }
441
422
 
442
423
  module ReorgCount = {
443
- let deprecatedCounter = PromClient.Counter.makeCounter({
444
- "name": "reorgs_detected",
445
- "help": "Total number of reorgs detected",
446
- "labelNames": ["chainId"],
447
- })
448
-
449
424
  let gauge = SafeGauge.makeOrThrow(
450
425
  ~name="envio_reorg_count",
451
426
  ~help="Total number of reorgs detected",
@@ -453,9 +428,6 @@ module ReorgCount = {
453
428
  )
454
429
 
455
430
  let increment = (~chain) => {
456
- deprecatedCounter
457
- ->PromClient.Counter.labels({"chainId": chain->ChainMap.Chain.toString})
458
- ->PromClient.Counter.inc
459
431
  gauge->SafeGauge.increment(~labels=chain->ChainMap.Chain.toChainId)
460
432
  }
461
433
  }
@@ -642,3 +614,79 @@ module EffectCacheCount = {
642
614
  gauge->SafeGauge.handleInt(~labels=effectName, ~value=count)
643
615
  }
644
616
  }
617
+
618
+ module StorageLoad = {
619
+ let operationLabelsSchema = S.object(s => s.field("operation", S.string))
620
+
621
+ let timeCounter = SafeCounter.makeOrThrow(
622
+ ~name="envio_storage_load_time",
623
+ ~help="Processing time taken to load data from storage. (milliseconds)",
624
+ ~labelSchema=operationLabelsSchema,
625
+ )
626
+
627
+ let totalTimeCounter = SafeCounter.makeOrThrow(
628
+ ~name="envio_storage_load_total_time",
629
+ ~help="Cumulative time spent loading data from storage during the indexing process. (milliseconds)",
630
+ ~labelSchema=operationLabelsSchema,
631
+ )
632
+
633
+ let counter = SafeCounter.makeOrThrow(
634
+ ~name="envio_storage_load_count",
635
+ ~help="Cumulative number of successful storage load operations during the indexing process.",
636
+ ~labelSchema=operationLabelsSchema,
637
+ )
638
+
639
+ let whereSizeCounter = SafeCounter.makeOrThrow(
640
+ ~name="envio_storage_load_where_size",
641
+ ~help="Cumulative number of filter conditions ('where' items) used in storage load operations during the indexing process.",
642
+ ~labelSchema=operationLabelsSchema,
643
+ )
644
+
645
+ let sizeCounter = SafeCounter.makeOrThrow(
646
+ ~name="envio_storage_load_size",
647
+ ~help="Cumulative number of records loaded from storage during the indexing process.",
648
+ ~labelSchema=operationLabelsSchema,
649
+ )
650
+
651
+ type operationRef = {
652
+ mutable pendingCount: int,
653
+ timerRef: Hrtime.timeRef,
654
+ }
655
+ let operations = Js.Dict.empty()
656
+
657
+ let startOperation = (~operation) => {
658
+ switch operations->Utils.Dict.dangerouslyGetNonOption(operation) {
659
+ | Some(operationRef) => operationRef.pendingCount = operationRef.pendingCount + 1
660
+ | None =>
661
+ operations->Js.Dict.set(
662
+ operation,
663
+ (
664
+ {
665
+ pendingCount: 1,
666
+ timerRef: Hrtime.makeTimer(),
667
+ }: operationRef
668
+ ),
669
+ )
670
+ }
671
+ Hrtime.makeTimer()
672
+ }
673
+
674
+ let endOperation = (timerRef, ~operation, ~whereSize, ~size) => {
675
+ let operationRef = operations->Js.Dict.unsafeGet(operation)
676
+ operationRef.pendingCount = operationRef.pendingCount - 1
677
+ if operationRef.pendingCount === 0 {
678
+ timeCounter->SafeCounter.handleInt(
679
+ ~labels={operation},
680
+ ~value=operationRef.timerRef->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis,
681
+ )
682
+ operations->Utils.Dict.deleteInPlace(operation)
683
+ }
684
+ totalTimeCounter->SafeCounter.handleInt(
685
+ ~labels={operation},
686
+ ~value=timerRef->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis,
687
+ )
688
+ counter->SafeCounter.increment(~labels={operation})
689
+ whereSizeCounter->SafeCounter.handleInt(~labels={operation}, ~value=whereSize)
690
+ sizeCounter->SafeCounter.handleInt(~labels={operation}, ~value=size)
691
+ }
692
+ }
@@ -1,6 +1,7 @@
1
1
  // Generated by ReScript, PLEASE EDIT WITH CARE
2
2
  'use strict';
3
3
 
4
+ var Utils = require("./Utils.res.js");
4
5
  var Hrtime = require("./bindings/Hrtime.res.js");
5
6
  var Js_exn = require("rescript/lib/js/js_exn.js");
6
7
  var ChainMap = require("./ChainMap.res.js");
@@ -347,28 +348,6 @@ var BenchmarkCounters = {
347
348
  set: set$1
348
349
  };
349
350
 
350
- var labelSchema$2 = S$RescriptSchema.schema(function (s) {
351
- return {
352
- chainId: s.m(S$RescriptSchema.coerce(S$RescriptSchema.string, S$RescriptSchema.$$int)),
353
- partitionId: s.m(S$RescriptSchema.string)
354
- };
355
- });
356
-
357
- var counter = makeOrThrow$1("partition_block_fetched", "The latest fetched block number for each partition", labelSchema$2);
358
-
359
- function set$2(blockNumber, partitionId, chainId) {
360
- handleInt$1(counter, {
361
- chainId: chainId,
362
- partitionId: partitionId
363
- }, blockNumber);
364
- }
365
-
366
- var PartitionBlockFetched = {
367
- labelSchema: labelSchema$2,
368
- counter: counter,
369
- set: set$2
370
- };
371
-
372
351
  var chainIdLabelsSchema = S$RescriptSchema.object(function (s) {
373
352
  return s.f("chainId", S$RescriptSchema.coerce(S$RescriptSchema.string, S$RescriptSchema.$$int));
374
353
  });
@@ -379,7 +358,7 @@ var gauge$2 = makeOrThrow$1("envio_info", "Information about the indexer", S$Res
379
358
  };
380
359
  }));
381
360
 
382
- function set$3(version) {
361
+ function set$2(version) {
383
362
  handleInt$1(gauge$2, {
384
363
  version: version
385
364
  }, 1);
@@ -387,80 +366,80 @@ function set$3(version) {
387
366
 
388
367
  var Info = {
389
368
  gauge: gauge$2,
390
- set: set$3
369
+ set: set$2
391
370
  };
392
371
 
393
372
  var gauge$3 = makeOrThrow$1("envio_indexing_addresses", "The number of addresses indexed on chain. Includes both static and dynamic addresses.", chainIdLabelsSchema);
394
373
 
395
- function set$4(addressesCount, chainId) {
374
+ function set$3(addressesCount, chainId) {
396
375
  handleInt$1(gauge$3, chainId, addressesCount);
397
376
  }
398
377
 
399
378
  var IndexingAddresses = {
400
379
  gauge: gauge$3,
401
- set: set$4
380
+ set: set$3
402
381
  };
403
382
 
404
383
  var gauge$4 = makeOrThrow$1("envio_indexing_max_concurrency", "The maximum number of concurrent queries to the chain data-source.", chainIdLabelsSchema);
405
384
 
406
- function set$5(maxConcurrency, chainId) {
385
+ function set$4(maxConcurrency, chainId) {
407
386
  handleInt$1(gauge$4, chainId, maxConcurrency);
408
387
  }
409
388
 
410
389
  var IndexingMaxConcurrency = {
411
390
  gauge: gauge$4,
412
- set: set$5
391
+ set: set$4
413
392
  };
414
393
 
415
394
  var gauge$5 = makeOrThrow$1("envio_indexing_concurrency", "The number of executing concurrent queries to the chain data-source.", chainIdLabelsSchema);
416
395
 
417
- function set$6(concurrency, chainId) {
396
+ function set$5(concurrency, chainId) {
418
397
  handleInt$1(gauge$5, chainId, concurrency);
419
398
  }
420
399
 
421
400
  var IndexingConcurrency = {
422
401
  gauge: gauge$5,
423
- set: set$6
402
+ set: set$5
424
403
  };
425
404
 
426
405
  var gauge$6 = makeOrThrow$1("envio_indexing_partitions", "The number of partitions used to split fetching logic by addresses and block ranges.", chainIdLabelsSchema);
427
406
 
428
- function set$7(partitionsCount, chainId) {
407
+ function set$6(partitionsCount, chainId) {
429
408
  handleInt$1(gauge$6, chainId, partitionsCount);
430
409
  }
431
410
 
432
411
  var IndexingPartitions = {
433
412
  gauge: gauge$6,
434
- set: set$7
413
+ set: set$6
435
414
  };
436
415
 
437
- var counter$1 = makeOrThrow("envio_indexing_idle_time", "The number of milliseconds the indexer source syncing has been idle. A high value may indicate the source sync is a bottleneck.", chainIdLabelsSchema);
416
+ var counter = makeOrThrow("envio_indexing_idle_time", "The number of milliseconds the indexer source syncing has been idle. A high value may indicate the source sync is a bottleneck.", chainIdLabelsSchema);
438
417
 
439
418
  var IndexingIdleTime = {
440
- counter: counter$1
419
+ counter: counter
441
420
  };
442
421
 
443
- var counter$2 = makeOrThrow("envio_indexing_source_waiting_time", "The number of milliseconds the indexer has been waiting for new blocks.", chainIdLabelsSchema);
422
+ var counter$1 = makeOrThrow("envio_indexing_source_waiting_time", "The number of milliseconds the indexer has been waiting for new blocks.", chainIdLabelsSchema);
444
423
 
445
424
  var IndexingSourceWaitingTime = {
446
- counter: counter$2
425
+ counter: counter$1
447
426
  };
448
427
 
449
- var counter$3 = makeOrThrow("envio_indexing_query_time", "The number of milliseconds spent performing queries to the chain data-source.", chainIdLabelsSchema);
428
+ var counter$2 = makeOrThrow("envio_indexing_query_time", "The number of milliseconds spent performing queries to the chain data-source.", chainIdLabelsSchema);
450
429
 
451
430
  var IndexingQueryTime = {
452
- counter: counter$3
431
+ counter: counter$2
453
432
  };
454
433
 
455
434
  var gauge$7 = makeOrThrow$1("envio_indexing_buffer_size", "The current number of items in the indexing buffer.", chainIdLabelsSchema);
456
435
 
457
- function set$8(bufferSize, chainId) {
436
+ function set$7(bufferSize, chainId) {
458
437
  handleInt$1(gauge$7, chainId, bufferSize);
459
438
  }
460
439
 
461
440
  var IndexingBufferSize = {
462
441
  gauge: gauge$7,
463
- set: set$8
442
+ set: set$7
464
443
  };
465
444
 
466
445
  var gauge$8 = new PromClient.Gauge({
@@ -468,13 +447,13 @@ var gauge$8 = new PromClient.Gauge({
468
447
  help: "The target buffer size per chain for indexing. The actual number of items in the queue may exceed this value, but the indexer always tries to keep the buffer filled up to this target."
469
448
  });
470
449
 
471
- function set$9(targetBufferSize) {
450
+ function set$8(targetBufferSize) {
472
451
  gauge$8.set(targetBufferSize);
473
452
  }
474
453
 
475
454
  var IndexingTargetBufferSize = {
476
455
  gauge: gauge$8,
477
- set: set$9
456
+ set: set$8
478
457
  };
479
458
 
480
459
  var deprecatedGauge = new PromClient.Gauge({
@@ -485,7 +464,7 @@ var deprecatedGauge = new PromClient.Gauge({
485
464
 
486
465
  var gauge$9 = makeOrThrow$1("envio_indexing_buffer_block_number", "The highest block number that has been fully fetched by the indexer.", chainIdLabelsSchema);
487
466
 
488
- function set$10(blockNumber, chainId) {
467
+ function set$9(blockNumber, chainId) {
489
468
  deprecatedGauge.labels({
490
469
  chainId: chainId
491
470
  }).set(blockNumber);
@@ -495,18 +474,18 @@ function set$10(blockNumber, chainId) {
495
474
  var IndexingBufferBlockNumber = {
496
475
  deprecatedGauge: deprecatedGauge,
497
476
  gauge: gauge$9,
498
- set: set$10
477
+ set: set$9
499
478
  };
500
479
 
501
480
  var gauge$10 = makeOrThrow$1("envio_indexing_end_block", "The block number to stop indexing at. (inclusive)", chainIdLabelsSchema);
502
481
 
503
- function set$11(endBlock, chainId) {
482
+ function set$10(endBlock, chainId) {
504
483
  handleInt$1(gauge$10, chainId, endBlock);
505
484
  }
506
485
 
507
486
  var IndexingEndBlock = {
508
487
  gauge: gauge$10,
509
- set: set$11
488
+ set: set$10
510
489
  };
511
490
 
512
491
  var sourceLabelsSchema = S$RescriptSchema.schema(function (s) {
@@ -518,7 +497,7 @@ var sourceLabelsSchema = S$RescriptSchema.schema(function (s) {
518
497
 
519
498
  var gauge$11 = makeOrThrow$1("envio_source_height", "The latest known block number reported by the source. This value may lag behind the actual chain height, as it is updated only when queried.", sourceLabelsSchema);
520
499
 
521
- function set$12(sourceName, chainId, blockNumber) {
500
+ function set$11(sourceName, chainId, blockNumber) {
522
501
  handleInt$1(gauge$11, {
523
502
  source: sourceName,
524
503
  chainId: chainId
@@ -527,7 +506,7 @@ function set$12(sourceName, chainId, blockNumber) {
527
506
 
528
507
  var SourceHeight = {
529
508
  gauge: gauge$11,
530
- set: set$12
509
+ set: set$11
531
510
  };
532
511
 
533
512
  var startTimer = makeSafeHistogramOrThrow("envio_source_get_height_duration", "Duration of the source get height requests in seconds", sourceLabelsSchema, [
@@ -541,36 +520,26 @@ var SourceGetHeightDuration = {
541
520
  startTimer: startTimer
542
521
  };
543
522
 
544
- var deprecatedCounter = new PromClient.Counter({
545
- name: "reorgs_detected",
546
- help: "Total number of reorgs detected",
547
- labelNames: ["chainId"]
548
- });
549
-
550
523
  var gauge$12 = makeOrThrow$1("envio_reorg_count", "Total number of reorgs detected", chainIdLabelsSchema);
551
524
 
552
525
  function increment$2(chain) {
553
- deprecatedCounter.labels({
554
- chainId: ChainMap.Chain.toString(chain)
555
- }).inc();
556
526
  increment$1(gauge$12, chain);
557
527
  }
558
528
 
559
529
  var ReorgCount = {
560
- deprecatedCounter: deprecatedCounter,
561
530
  gauge: gauge$12,
562
531
  increment: increment$2
563
532
  };
564
533
 
565
534
  var gauge$13 = makeOrThrow$1("envio_reorg_detection_block_number", "The block number where reorg was detected the last time. This doesn't mean that the block was reorged, this is simply where we found block hash to be different.", chainIdLabelsSchema);
566
535
 
567
- function set$13(blockNumber, chain) {
536
+ function set$12(blockNumber, chain) {
568
537
  handleInt$1(gauge$13, chain, blockNumber);
569
538
  }
570
539
 
571
540
  var ReorgDetectionBlockNumber = {
572
541
  gauge: gauge$13,
573
- set: set$13
542
+ set: set$12
574
543
  };
575
544
 
576
545
  var gauge$14 = new PromClient.Gauge({
@@ -578,13 +547,13 @@ var gauge$14 = new PromClient.Gauge({
578
547
  help: "Whether indexing is currently within the reorg threshold"
579
548
  });
580
549
 
581
- function set$14(isInReorgThreshold) {
550
+ function set$13(isInReorgThreshold) {
582
551
  gauge$14.set(isInReorgThreshold ? 1 : 0);
583
552
  }
584
553
 
585
554
  var ReorgThreshold = {
586
555
  gauge: gauge$14,
587
- set: set$14
556
+ set: set$13
588
557
  };
589
558
 
590
559
  var gauge$15 = new PromClient.Gauge({
@@ -592,13 +561,13 @@ var gauge$15 = new PromClient.Gauge({
592
561
  help: "Whether rollback on reorg is enabled"
593
562
  });
594
563
 
595
- function set$15(enabled) {
564
+ function set$14(enabled) {
596
565
  gauge$15.set(enabled ? 1 : 0);
597
566
  }
598
567
 
599
568
  var RollbackEnabled = {
600
569
  gauge: gauge$15,
601
- set: set$15
570
+ set: set$14
602
571
  };
603
572
 
604
573
  var timeCounter = new PromClient.Counter({
@@ -606,19 +575,19 @@ var timeCounter = new PromClient.Counter({
606
575
  help: "Rollback on reorg total time in milliseconds"
607
576
  });
608
577
 
609
- var counter$4 = new PromClient.Counter({
578
+ var counter$3 = new PromClient.Counter({
610
579
  name: "envio_rollback_count",
611
580
  help: "Number of successful rollbacks on reorg"
612
581
  });
613
582
 
614
583
  function increment$3(timeMillis) {
615
584
  timeCounter.inc(Hrtime.intFromMillis(timeMillis));
616
- counter$4.inc();
585
+ counter$3.inc();
617
586
  }
618
587
 
619
588
  var RollbackSuccess = {
620
589
  timeCounter: timeCounter,
621
- counter: counter$4,
590
+ counter: counter$3,
622
591
  increment: increment$3
623
592
  };
624
593
 
@@ -628,51 +597,51 @@ var entityNameLabelsSchema = S$RescriptSchema.object(function (s) {
628
597
 
629
598
  var timeCounter$1 = makeOrThrow("envio_rollback_history_prune_time", "The total time spent pruning entity history which is not in the reorg threshold. (milliseconds)", entityNameLabelsSchema);
630
599
 
631
- var counter$5 = makeOrThrow("envio_rollback_history_prune_count", "Number of successful entity history prunes", entityNameLabelsSchema);
600
+ var counter$4 = makeOrThrow("envio_rollback_history_prune_count", "Number of successful entity history prunes", entityNameLabelsSchema);
632
601
 
633
602
  function increment$4(timeMillis, entityName) {
634
603
  handleInt(timeCounter$1, entityName, Hrtime.intFromMillis(timeMillis));
635
- increment(counter$5, entityName);
604
+ increment(counter$4, entityName);
636
605
  }
637
606
 
638
607
  var RollbackHistoryPrune = {
639
608
  entityNameLabelsSchema: entityNameLabelsSchema,
640
609
  timeCounter: timeCounter$1,
641
- counter: counter$5,
610
+ counter: counter$4,
642
611
  increment: increment$4
643
612
  };
644
613
 
645
614
  var gauge$16 = makeOrThrow$1("envio_rollback_target_block_number", "The block number reorg was rollbacked to the last time.", chainIdLabelsSchema);
646
615
 
647
- function set$16(blockNumber, chain) {
616
+ function set$15(blockNumber, chain) {
648
617
  handleInt$1(gauge$16, chain, blockNumber);
649
618
  }
650
619
 
651
620
  var RollbackTargetBlockNumber = {
652
621
  gauge: gauge$16,
653
- set: set$16
622
+ set: set$15
654
623
  };
655
624
 
656
625
  var gauge$17 = makeOrThrow$1("envio_processing_block_number", "The latest item block number included in the currently processing batch for the chain.", chainIdLabelsSchema);
657
626
 
658
- function set$17(blockNumber, chainId) {
627
+ function set$16(blockNumber, chainId) {
659
628
  handleInt$1(gauge$17, chainId, blockNumber);
660
629
  }
661
630
 
662
631
  var ProcessingBlockNumber = {
663
632
  gauge: gauge$17,
664
- set: set$17
633
+ set: set$16
665
634
  };
666
635
 
667
636
  var gauge$18 = makeOrThrow$1("envio_processing_batch_size", "The number of items included in the currently processing batch for the chain.", chainIdLabelsSchema);
668
637
 
669
- function set$18(batchSize, chainId) {
638
+ function set$17(batchSize, chainId) {
670
639
  handleInt$1(gauge$18, chainId, batchSize);
671
640
  }
672
641
 
673
642
  var ProcessingBatchSize = {
674
643
  gauge: gauge$18,
675
- set: set$18
644
+ set: set$17
676
645
  };
677
646
 
678
647
  var gauge$19 = new PromClient.Gauge({
@@ -680,24 +649,24 @@ var gauge$19 = new PromClient.Gauge({
680
649
  help: "The maximum number of items to process in a single batch."
681
650
  });
682
651
 
683
- function set$19(maxBatchSize) {
652
+ function set$18(maxBatchSize) {
684
653
  gauge$19.set(maxBatchSize);
685
654
  }
686
655
 
687
656
  var ProcessingMaxBatchSize = {
688
657
  gauge: gauge$19,
689
- set: set$19
658
+ set: set$18
690
659
  };
691
660
 
692
661
  var gauge$20 = makeOrThrow$1("envio_progress_block_number", "The block number of the latest block processed and stored in the database.", chainIdLabelsSchema);
693
662
 
694
- function set$20(blockNumber, chainId) {
663
+ function set$19(blockNumber, chainId) {
695
664
  handleInt$1(gauge$20, chainId, blockNumber);
696
665
  }
697
666
 
698
667
  var ProgressBlockNumber = {
699
668
  gauge: gauge$20,
700
- set: set$20
669
+ set: set$19
701
670
  };
702
671
 
703
672
  var deprecatedGauge$1 = new PromClient.Gauge({
@@ -708,7 +677,7 @@ var deprecatedGauge$1 = new PromClient.Gauge({
708
677
 
709
678
  var gauge$21 = makeOrThrow$1("envio_progress_events_count", "The number of events processed and reflected in the database.", chainIdLabelsSchema);
710
679
 
711
- function set$21(processedCount, chainId) {
680
+ function set$20(processedCount, chainId) {
712
681
  deprecatedGauge$1.labels({
713
682
  chainId: chainId
714
683
  }).set(processedCount);
@@ -718,7 +687,7 @@ function set$21(processedCount, chainId) {
718
687
  var ProgressEventsCount = {
719
688
  deprecatedGauge: deprecatedGauge$1,
720
689
  gauge: gauge$21,
721
- set: set$21
690
+ set: set$20
722
691
  };
723
692
 
724
693
  var effectLabelsSchema = S$RescriptSchema.object(function (s) {
@@ -727,24 +696,78 @@ var effectLabelsSchema = S$RescriptSchema.object(function (s) {
727
696
 
728
697
  var gauge$22 = makeOrThrow$1("envio_effect_calls_count", "The number of calls to the effect. Including both handler execution and cache hits.", effectLabelsSchema);
729
698
 
730
- function set$22(callsCount, effectName) {
699
+ function set$21(callsCount, effectName) {
731
700
  handleInt$1(gauge$22, effectName, callsCount);
732
701
  }
733
702
 
734
703
  var EffectCallsCount = {
735
704
  gauge: gauge$22,
736
- set: set$22
705
+ set: set$21
737
706
  };
738
707
 
739
708
  var gauge$23 = makeOrThrow$1("envio_effect_cache_count", "The number of items in the effect cache.", effectLabelsSchema);
740
709
 
741
- function set$23(count, effectName) {
710
+ function set$22(count, effectName) {
742
711
  handleInt$1(gauge$23, effectName, count);
743
712
  }
744
713
 
745
714
  var EffectCacheCount = {
746
715
  gauge: gauge$23,
747
- set: set$23
716
+ set: set$22
717
+ };
718
+
719
+ var operationLabelsSchema = S$RescriptSchema.object(function (s) {
720
+ return s.f("operation", S$RescriptSchema.string);
721
+ });
722
+
723
+ var timeCounter$2 = makeOrThrow("envio_storage_load_time", "Processing time taken to load data from storage. (milliseconds)", operationLabelsSchema);
724
+
725
+ var totalTimeCounter = makeOrThrow("envio_storage_load_total_time", "Cumulative time spent loading data from storage during the indexing process. (milliseconds)", operationLabelsSchema);
726
+
727
+ var counter$5 = makeOrThrow("envio_storage_load_count", "Cumulative number of successful storage load operations during the indexing process.", operationLabelsSchema);
728
+
729
+ var whereSizeCounter = makeOrThrow("envio_storage_load_where_size", "Cumulative number of filter conditions ('where' items) used in storage load operations during the indexing process.", operationLabelsSchema);
730
+
731
+ var sizeCounter = makeOrThrow("envio_storage_load_size", "Cumulative number of records loaded from storage during the indexing process.", operationLabelsSchema);
732
+
733
+ var operations = {};
734
+
735
+ function startOperation(operation) {
736
+ var operationRef = operations[operation];
737
+ if (operationRef !== undefined) {
738
+ operationRef.pendingCount = operationRef.pendingCount + 1 | 0;
739
+ } else {
740
+ operations[operation] = {
741
+ pendingCount: 1,
742
+ timerRef: Hrtime.makeTimer()
743
+ };
744
+ }
745
+ return Hrtime.makeTimer();
746
+ }
747
+
748
+ function endOperation(timerRef, operation, whereSize, size) {
749
+ var operationRef = operations[operation];
750
+ operationRef.pendingCount = operationRef.pendingCount - 1 | 0;
751
+ if (operationRef.pendingCount === 0) {
752
+ handleInt(timeCounter$2, operation, Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(operationRef.timerRef))));
753
+ Utils.Dict.deleteInPlace(operations, operation);
754
+ }
755
+ handleInt(totalTimeCounter, operation, Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(timerRef))));
756
+ increment(counter$5, operation);
757
+ handleInt(whereSizeCounter, operation, whereSize);
758
+ handleInt(sizeCounter, operation, size);
759
+ }
760
+
761
+ var StorageLoad = {
762
+ operationLabelsSchema: operationLabelsSchema,
763
+ timeCounter: timeCounter$2,
764
+ totalTimeCounter: totalTimeCounter,
765
+ counter: counter$5,
766
+ whereSizeCounter: whereSizeCounter,
767
+ sizeCounter: sizeCounter,
768
+ operations: operations,
769
+ startOperation: startOperation,
770
+ endOperation: endOperation
748
771
  };
749
772
 
750
773
  exports.loadEntitiesDurationCounter = loadEntitiesDurationCounter;
@@ -765,7 +788,6 @@ exports.incrementExecuteBatchDurationCounter = incrementExecuteBatchDurationCoun
765
788
  exports.setSourceChainHeight = setSourceChainHeight;
766
789
  exports.setAllChainsSyncedToHead = setAllChainsSyncedToHead;
767
790
  exports.BenchmarkCounters = BenchmarkCounters;
768
- exports.PartitionBlockFetched = PartitionBlockFetched;
769
791
  exports.chainIdLabelsSchema = chainIdLabelsSchema;
770
792
  exports.Info = Info;
771
793
  exports.IndexingAddresses = IndexingAddresses;
@@ -797,4 +819,5 @@ exports.ProgressEventsCount = ProgressEventsCount;
797
819
  exports.effectLabelsSchema = effectLabelsSchema;
798
820
  exports.EffectCallsCount = EffectCallsCount;
799
821
  exports.EffectCacheCount = EffectCacheCount;
822
+ exports.StorageLoad = StorageLoad;
800
823
  /* loadEntitiesDurationCounter Not a pure module */
package/src/db/Table.res CHANGED
@@ -240,6 +240,8 @@ let toSqlParams = (table: table, ~schema, ~pgSchema) => {
240
240
  switch field {
241
241
  | Field(f) =>
242
242
  switch f.fieldType {
243
+ // The case for `BigDecimal! @config(precision: 10, scale: 8)`
244
+ | Custom(fieldType) if fieldType->Js.String2.startsWith("NUMERIC(") => fieldType
243
245
  | Custom(fieldType) => `${(Text :> string)}[]::"${pgSchema}".${(fieldType :> string)}`
244
246
  | Boolean => `${(Integer :> string)}[]::${(f.fieldType :> string)}`
245
247
  | fieldType => (fieldType :> string)
@@ -261,7 +261,9 @@ function toSqlParams(table, schema, pgSchema) {
261
261
  var fieldType = f.fieldType;
262
262
  tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL" ? (
263
263
  fieldType === "BOOLEAN" ? "INTEGER[]::" + f.fieldType : fieldType
264
- ) : "TEXT[]::\"" + pgSchema + "\"." + fieldType;
264
+ ) : (
265
+ fieldType.startsWith("NUMERIC(") ? fieldType : "TEXT[]::\"" + pgSchema + "\"." + fieldType
266
+ );
265
267
  } else {
266
268
  tmp = "TEXT";
267
269
  }