envio 2.27.3 → 2.27.5-rc.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/index.js +1 -0
- package/package.json +5 -5
- package/src/FetchState.res +44 -26
- package/src/FetchState.res.js +34 -22
- package/src/PgStorage.res +2 -2
- package/src/PgStorage.res.js +4 -4
- package/src/Prometheus.res +112 -35
- package/src/Prometheus.res.js +142 -96
- package/src/db/Table.res +1 -0
- package/src/db/Table.res.js +3 -1
- package/src/sources/HyperSyncJsonApi.res +12 -1
- package/src/sources/HyperSyncJsonApi.res.js +8 -1
package/README.md
CHANGED
|
@@ -12,7 +12,7 @@ HyperIndex is a fast, developer-friendly multichain indexer, optimized for both
|
|
|
12
12
|
## Key Features
|
|
13
13
|
|
|
14
14
|
- **[Indexer auto-generation](https://docs.envio.dev/docs/HyperIndex/contract-import)** – Generate Indexers directly from smart contract addresses
|
|
15
|
-
- **High performance** – Historical backfills at over
|
|
15
|
+
- **High performance** – Historical backfills at over 10,000+ events per second ([fastest in market](https://docs.envio.dev/blog/indexer-benchmarking-results))
|
|
16
16
|
- **Local development** – Full-featured local environment with Docker
|
|
17
17
|
- **[Multichain indexing](https://docs.envio.dev/docs/HyperIndex/multichain-indexing)** – Index any EVM-compatible blockchain and Fuel (simultaneously)
|
|
18
18
|
- **Real-time indexing** – Instantly track blockchain events
|
package/index.js
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.27.
|
|
3
|
+
"version": "v2.27.5-rc.0",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.27.
|
|
29
|
-
"envio-linux-arm64": "v2.27.
|
|
30
|
-
"envio-darwin-x64": "v2.27.
|
|
31
|
-
"envio-darwin-arm64": "v2.27.
|
|
28
|
+
"envio-linux-x64": "v2.27.5-rc.0",
|
|
29
|
+
"envio-linux-arm64": "v2.27.5-rc.0",
|
|
30
|
+
"envio-darwin-x64": "v2.27.5-rc.0",
|
|
31
|
+
"envio-darwin-arm64": "v2.27.5-rc.0"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.5",
|
package/src/FetchState.res
CHANGED
|
@@ -93,25 +93,6 @@ let copy = (fetchState: t) => {
|
|
|
93
93
|
}
|
|
94
94
|
}
|
|
95
95
|
|
|
96
|
-
/*
|
|
97
|
-
Comapritor for two events from the same chain. No need for chain id or timestamp
|
|
98
|
-
*/
|
|
99
|
-
let eventItemGt = (a: Internal.eventItem, b: Internal.eventItem) =>
|
|
100
|
-
if a.blockNumber > b.blockNumber {
|
|
101
|
-
true
|
|
102
|
-
} else if a.blockNumber === b.blockNumber {
|
|
103
|
-
a.logIndex > b.logIndex
|
|
104
|
-
} else {
|
|
105
|
-
false
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
/*
|
|
109
|
-
Merges two event queues on a single event fetcher
|
|
110
|
-
|
|
111
|
-
Pass the shorter list into A for better performance
|
|
112
|
-
*/
|
|
113
|
-
let mergeSortedEventList = (a, b) => Utils.Array.mergeSorted(eventItemGt, a, b)
|
|
114
|
-
|
|
115
96
|
let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) => {
|
|
116
97
|
switch (p, target) {
|
|
117
98
|
| ({selection: {dependsOnAddresses: true}}, {selection: {dependsOnAddresses: true}}) => {
|
|
@@ -565,6 +546,18 @@ type query = {
|
|
|
565
546
|
exception UnexpectedPartitionNotFound({partitionId: string})
|
|
566
547
|
exception UnexpectedMergeQueryResponse({message: string})
|
|
567
548
|
|
|
549
|
+
/*
|
|
550
|
+
Comparitor for two events from the same chain. No need for chain id or timestamp
|
|
551
|
+
*/
|
|
552
|
+
let compareBufferItem = (a: Internal.eventItem, b: Internal.eventItem) => {
|
|
553
|
+
let blockDiff = b.blockNumber - a.blockNumber
|
|
554
|
+
if blockDiff === 0 {
|
|
555
|
+
b.logIndex - a.logIndex
|
|
556
|
+
} else {
|
|
557
|
+
blockDiff
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
|
|
568
561
|
/*
|
|
569
562
|
Updates fetchState with a response for a given query.
|
|
570
563
|
Returns Error if the partition with given query cannot be found (unexpected)
|
|
@@ -576,7 +569,7 @@ let handleQueryResult = (
|
|
|
576
569
|
{partitions} as fetchState: t,
|
|
577
570
|
~query: query,
|
|
578
571
|
~latestFetchedBlock: blockNumberAndTimestamp,
|
|
579
|
-
~
|
|
572
|
+
~newItems,
|
|
580
573
|
~currentBlockHeight,
|
|
581
574
|
): result<t, exn> =>
|
|
582
575
|
{
|
|
@@ -633,7 +626,12 @@ let handleQueryResult = (
|
|
|
633
626
|
fetchState->updateInternal(
|
|
634
627
|
~partitions,
|
|
635
628
|
~currentBlockHeight,
|
|
636
|
-
~queue=
|
|
629
|
+
~queue=fetchState.queue
|
|
630
|
+
->Array.concat(newItems)
|
|
631
|
+
// Theoretically it could be faster to asume that
|
|
632
|
+
// the items are sorted, but there are cases
|
|
633
|
+
// when the data source returns them unsorted
|
|
634
|
+
->Js.Array2.sortInPlaceWith(compareBufferItem),
|
|
637
635
|
)
|
|
638
636
|
})
|
|
639
637
|
|
|
@@ -1235,14 +1233,34 @@ let filterAndSortForUnorderedBatch = {
|
|
|
1235
1233
|
}
|
|
1236
1234
|
}
|
|
1237
1235
|
|
|
1238
|
-
let
|
|
1239
|
-
//
|
|
1240
|
-
|
|
1236
|
+
let hasFullBatch = ({queue, latestFullyFetchedBlock}: t, ~maxBatchSize) => {
|
|
1237
|
+
// Queue is ordered from latest to earliest, so the earliest eligible
|
|
1238
|
+
// item for a full batch of size B is at index (length - B).
|
|
1239
|
+
// Do NOT subtract an extra 1 here; when length === B we should still
|
|
1240
|
+
// classify the queue as full and probe index 0.
|
|
1241
|
+
let targetBlockIdx = queue->Array.length - maxBatchSize
|
|
1242
|
+
if targetBlockIdx < 0 {
|
|
1243
|
+
false
|
|
1244
|
+
} else {
|
|
1245
|
+
// Unsafe can fail when maxBatchSize is 0,
|
|
1246
|
+
// but we ignore the case
|
|
1247
|
+
(queue->Js.Array2.unsafe_get(targetBlockIdx)).blockNumber <=
|
|
1248
|
+
latestFullyFetchedBlock.blockNumber
|
|
1249
|
+
}
|
|
1241
1250
|
}
|
|
1242
1251
|
|
|
1243
|
-
(fetchStates: array<t
|
|
1252
|
+
(fetchStates: array<t>, ~maxBatchSize: int) => {
|
|
1244
1253
|
fetchStates
|
|
1245
1254
|
->Array.keepU(hasBatchItem)
|
|
1246
|
-
->Js.Array2.sortInPlaceWith(
|
|
1255
|
+
->Js.Array2.sortInPlaceWith((a: t, b: t) => {
|
|
1256
|
+
switch (a->hasFullBatch(~maxBatchSize), b->hasFullBatch(~maxBatchSize)) {
|
|
1257
|
+
| (true, true)
|
|
1258
|
+
| (false, false) =>
|
|
1259
|
+
// Use unsafe since we filtered out all queues without batch items
|
|
1260
|
+
(a.queue->Utils.Array.lastUnsafe).timestamp - (b.queue->Utils.Array.lastUnsafe).timestamp
|
|
1261
|
+
| (true, false) => -1
|
|
1262
|
+
| (false, true) => 1
|
|
1263
|
+
}
|
|
1264
|
+
})
|
|
1247
1265
|
}
|
|
1248
1266
|
}
|
package/src/FetchState.res.js
CHANGED
|
@@ -34,20 +34,6 @@ function copy(fetchState) {
|
|
|
34
34
|
};
|
|
35
35
|
}
|
|
36
36
|
|
|
37
|
-
function eventItemGt(a, b) {
|
|
38
|
-
if (a.blockNumber > b.blockNumber) {
|
|
39
|
-
return true;
|
|
40
|
-
} else if (a.blockNumber === b.blockNumber) {
|
|
41
|
-
return a.logIndex > b.logIndex;
|
|
42
|
-
} else {
|
|
43
|
-
return false;
|
|
44
|
-
}
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
function mergeSortedEventList(a, b) {
|
|
48
|
-
return Utils.$$Array.mergeSorted(eventItemGt, a, b);
|
|
49
|
-
}
|
|
50
|
-
|
|
51
37
|
function mergeIntoPartition(p, target, maxAddrInPartition) {
|
|
52
38
|
if (!p.selection.dependsOnAddresses) {
|
|
53
39
|
return [
|
|
@@ -344,7 +330,16 @@ var UnexpectedPartitionNotFound = /* @__PURE__ */Caml_exceptions.create("FetchSt
|
|
|
344
330
|
|
|
345
331
|
var UnexpectedMergeQueryResponse = /* @__PURE__ */Caml_exceptions.create("FetchState.UnexpectedMergeQueryResponse");
|
|
346
332
|
|
|
347
|
-
function
|
|
333
|
+
function compareBufferItem(a, b) {
|
|
334
|
+
var blockDiff = b.blockNumber - a.blockNumber | 0;
|
|
335
|
+
if (blockDiff === 0) {
|
|
336
|
+
return b.logIndex - a.logIndex | 0;
|
|
337
|
+
} else {
|
|
338
|
+
return blockDiff;
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
function handleQueryResult(fetchState, query, latestFetchedBlock, newItems, currentBlockHeight) {
|
|
348
343
|
var partitions = fetchState.partitions;
|
|
349
344
|
var partitionId = query.partitionId;
|
|
350
345
|
var pIndex = Belt_Array.getIndexBy(partitions, (function (p) {
|
|
@@ -414,7 +409,7 @@ function handleQueryResult(fetchState, query, latestFetchedBlock, reversedNewIte
|
|
|
414
409
|
};
|
|
415
410
|
}
|
|
416
411
|
return Belt_Result.map(tmp, (function (partitions) {
|
|
417
|
-
return updateInternal(fetchState, partitions, undefined, undefined, undefined, currentBlockHeight,
|
|
412
|
+
return updateInternal(fetchState, partitions, undefined, undefined, undefined, currentBlockHeight, Belt_Array.concat(fetchState.queue, newItems).sort(compareBufferItem), undefined);
|
|
418
413
|
}));
|
|
419
414
|
}
|
|
420
415
|
|
|
@@ -930,17 +925,33 @@ function hasBatchItem(param) {
|
|
|
930
925
|
}
|
|
931
926
|
}
|
|
932
927
|
|
|
933
|
-
function
|
|
934
|
-
|
|
928
|
+
function hasFullBatch(param, maxBatchSize) {
|
|
929
|
+
var queue = param.queue;
|
|
930
|
+
var targetBlockIdx = queue.length - maxBatchSize | 0;
|
|
931
|
+
if (targetBlockIdx < 0) {
|
|
932
|
+
return false;
|
|
933
|
+
} else {
|
|
934
|
+
return queue[targetBlockIdx].blockNumber <= param.latestFullyFetchedBlock.blockNumber;
|
|
935
|
+
}
|
|
935
936
|
}
|
|
936
937
|
|
|
937
|
-
function filterAndSortForUnorderedBatch(fetchStates) {
|
|
938
|
-
return Belt_Array.keepU(fetchStates, hasBatchItem).sort(
|
|
938
|
+
function filterAndSortForUnorderedBatch(fetchStates, maxBatchSize) {
|
|
939
|
+
return Belt_Array.keepU(fetchStates, hasBatchItem).sort(function (a, b) {
|
|
940
|
+
var match = hasFullBatch(a, maxBatchSize);
|
|
941
|
+
var match$1 = hasFullBatch(b, maxBatchSize);
|
|
942
|
+
if (match) {
|
|
943
|
+
if (!match$1) {
|
|
944
|
+
return -1;
|
|
945
|
+
}
|
|
946
|
+
|
|
947
|
+
} else if (match$1) {
|
|
948
|
+
return 1;
|
|
949
|
+
}
|
|
950
|
+
return Utils.$$Array.lastUnsafe(a.queue).timestamp - Utils.$$Array.lastUnsafe(b.queue).timestamp | 0;
|
|
951
|
+
});
|
|
939
952
|
}
|
|
940
953
|
|
|
941
954
|
exports.copy = copy;
|
|
942
|
-
exports.eventItemGt = eventItemGt;
|
|
943
|
-
exports.mergeSortedEventList = mergeSortedEventList;
|
|
944
955
|
exports.mergeIntoPartition = mergeIntoPartition;
|
|
945
956
|
exports.checkIsWithinSyncRange = checkIsWithinSyncRange;
|
|
946
957
|
exports.updateInternal = updateInternal;
|
|
@@ -949,6 +960,7 @@ exports.warnDifferentContractType = warnDifferentContractType;
|
|
|
949
960
|
exports.registerDynamicContracts = registerDynamicContracts;
|
|
950
961
|
exports.UnexpectedPartitionNotFound = UnexpectedPartitionNotFound;
|
|
951
962
|
exports.UnexpectedMergeQueryResponse = UnexpectedMergeQueryResponse;
|
|
963
|
+
exports.compareBufferItem = compareBufferItem;
|
|
952
964
|
exports.handleQueryResult = handleQueryResult;
|
|
953
965
|
exports.makePartitionQuery = makePartitionQuery;
|
|
954
966
|
exports.startFetchingQueries = startFetchingQueries;
|
package/src/PgStorage.res
CHANGED
|
@@ -325,8 +325,8 @@ let removeInvalidUtf8InPlace = entities =>
|
|
|
325
325
|
})
|
|
326
326
|
})
|
|
327
327
|
|
|
328
|
-
let
|
|
329
|
-
s.
|
|
328
|
+
let pgErrorMessageSchema = S.object(s =>
|
|
329
|
+
s.field("message", S.string)
|
|
330
330
|
)
|
|
331
331
|
|
|
332
332
|
exception PgEncodingError({table: Table.table})
|
package/src/PgStorage.res.js
CHANGED
|
@@ -228,8 +228,8 @@ function removeInvalidUtf8InPlace(entities) {
|
|
|
228
228
|
});
|
|
229
229
|
}
|
|
230
230
|
|
|
231
|
-
var
|
|
232
|
-
s.
|
|
231
|
+
var pgErrorMessageSchema = S$RescriptSchema.object(function (s) {
|
|
232
|
+
return s.f("message", S$RescriptSchema.string);
|
|
233
233
|
});
|
|
234
234
|
|
|
235
235
|
var PgEncodingError = /* @__PURE__ */Caml_exceptions.create("PgStorage.PgEncodingError");
|
|
@@ -631,7 +631,7 @@ exports.maxItemsPerQuery = maxItemsPerQuery;
|
|
|
631
631
|
exports.makeTableBatchSetQuery = makeTableBatchSetQuery;
|
|
632
632
|
exports.chunkArray = chunkArray;
|
|
633
633
|
exports.removeInvalidUtf8InPlace = removeInvalidUtf8InPlace;
|
|
634
|
-
exports.
|
|
634
|
+
exports.pgErrorMessageSchema = pgErrorMessageSchema;
|
|
635
635
|
exports.PgEncodingError = PgEncodingError;
|
|
636
636
|
exports.setQueryCache = setQueryCache;
|
|
637
637
|
exports.setOrThrow = setOrThrow;
|
|
@@ -641,4 +641,4 @@ exports.cacheTablePrefixLength = cacheTablePrefixLength;
|
|
|
641
641
|
exports.makeSchemaCacheTableInfoQuery = makeSchemaCacheTableInfoQuery;
|
|
642
642
|
exports.getConnectedPsqlExec = getConnectedPsqlExec;
|
|
643
643
|
exports.make = make;
|
|
644
|
-
/*
|
|
644
|
+
/* pgErrorMessageSchema Not a pure module */
|
package/src/Prometheus.res
CHANGED
|
@@ -241,25 +241,6 @@ module BenchmarkCounters = {
|
|
|
241
241
|
}
|
|
242
242
|
}
|
|
243
243
|
|
|
244
|
-
module PartitionBlockFetched = {
|
|
245
|
-
type labels = {chainId: int, partitionId: string}
|
|
246
|
-
|
|
247
|
-
let labelSchema = S.schema(s => {
|
|
248
|
-
chainId: s.matches(S.string->S.coerce(S.int)),
|
|
249
|
-
partitionId: s.matches(S.string),
|
|
250
|
-
})
|
|
251
|
-
|
|
252
|
-
let counter = SafeGauge.makeOrThrow(
|
|
253
|
-
~name="partition_block_fetched",
|
|
254
|
-
~help="The latest fetched block number for each partition",
|
|
255
|
-
~labelSchema,
|
|
256
|
-
)
|
|
257
|
-
|
|
258
|
-
let set = (~blockNumber, ~partitionId, ~chainId) => {
|
|
259
|
-
counter->SafeGauge.handleInt(~labels={chainId, partitionId}, ~value=blockNumber)
|
|
260
|
-
}
|
|
261
|
-
}
|
|
262
|
-
|
|
263
244
|
let chainIdLabelsSchema = S.object(s => {
|
|
264
245
|
s.field("chainId", S.string->S.coerce(S.int))
|
|
265
246
|
})
|
|
@@ -440,12 +421,6 @@ module SourceGetHeightDuration = {
|
|
|
440
421
|
}
|
|
441
422
|
|
|
442
423
|
module ReorgCount = {
|
|
443
|
-
let deprecatedCounter = PromClient.Counter.makeCounter({
|
|
444
|
-
"name": "reorgs_detected",
|
|
445
|
-
"help": "Total number of reorgs detected",
|
|
446
|
-
"labelNames": ["chainId"],
|
|
447
|
-
})
|
|
448
|
-
|
|
449
424
|
let gauge = SafeGauge.makeOrThrow(
|
|
450
425
|
~name="envio_reorg_count",
|
|
451
426
|
~help="Total number of reorgs detected",
|
|
@@ -453,9 +428,6 @@ module ReorgCount = {
|
|
|
453
428
|
)
|
|
454
429
|
|
|
455
430
|
let increment = (~chain) => {
|
|
456
|
-
deprecatedCounter
|
|
457
|
-
->PromClient.Counter.labels({"chainId": chain->ChainMap.Chain.toString})
|
|
458
|
-
->PromClient.Counter.inc
|
|
459
431
|
gauge->SafeGauge.increment(~labels=chain->ChainMap.Chain.toChainId)
|
|
460
432
|
}
|
|
461
433
|
}
|
|
@@ -494,15 +466,44 @@ module RollbackEnabled = {
|
|
|
494
466
|
}
|
|
495
467
|
}
|
|
496
468
|
|
|
497
|
-
module
|
|
498
|
-
let
|
|
499
|
-
"name": "
|
|
500
|
-
"help": "Rollback on reorg
|
|
501
|
-
|
|
469
|
+
module RollbackSuccess = {
|
|
470
|
+
let timeCounter = PromClient.Counter.makeCounter({
|
|
471
|
+
"name": "envio_rollback_time",
|
|
472
|
+
"help": "Rollback on reorg total time in milliseconds",
|
|
473
|
+
})
|
|
474
|
+
|
|
475
|
+
let counter = PromClient.Counter.makeCounter({
|
|
476
|
+
"name": "envio_rollback_count",
|
|
477
|
+
"help": "Number of successful rollbacks on reorg",
|
|
502
478
|
})
|
|
503
479
|
|
|
504
|
-
let
|
|
505
|
-
|
|
480
|
+
let increment = (~timeMillis: Hrtime.milliseconds) => {
|
|
481
|
+
timeCounter->PromClient.Counter.incMany(timeMillis->Hrtime.intFromMillis)
|
|
482
|
+
counter->PromClient.Counter.inc
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
module RollbackHistoryPrune = {
|
|
487
|
+
let entityNameLabelsSchema = S.object(s => s.field("entity", S.string))
|
|
488
|
+
|
|
489
|
+
let timeCounter = SafeCounter.makeOrThrow(
|
|
490
|
+
~name="envio_rollback_history_prune_time",
|
|
491
|
+
~help="The total time spent pruning entity history which is not in the reorg threshold. (milliseconds)",
|
|
492
|
+
~labelSchema=entityNameLabelsSchema,
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
let counter = SafeCounter.makeOrThrow(
|
|
496
|
+
~name="envio_rollback_history_prune_count",
|
|
497
|
+
~help="Number of successful entity history prunes",
|
|
498
|
+
~labelSchema=entityNameLabelsSchema,
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
let increment = (~timeMillis, ~entityName) => {
|
|
502
|
+
timeCounter->SafeCounter.handleInt(
|
|
503
|
+
~labels={entityName},
|
|
504
|
+
~value=timeMillis->Hrtime.intFromMillis,
|
|
505
|
+
)
|
|
506
|
+
counter->SafeCounter.increment(~labels={entityName})
|
|
506
507
|
}
|
|
507
508
|
}
|
|
508
509
|
|
|
@@ -613,3 +614,79 @@ module EffectCacheCount = {
|
|
|
613
614
|
gauge->SafeGauge.handleInt(~labels=effectName, ~value=count)
|
|
614
615
|
}
|
|
615
616
|
}
|
|
617
|
+
|
|
618
|
+
module StorageLoad = {
|
|
619
|
+
let operationLabelsSchema = S.object(s => s.field("operation", S.string))
|
|
620
|
+
|
|
621
|
+
let timeCounter = SafeCounter.makeOrThrow(
|
|
622
|
+
~name="envio_storage_load_time",
|
|
623
|
+
~help="Processing time taken to load data from storage. (milliseconds)",
|
|
624
|
+
~labelSchema=operationLabelsSchema,
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
let totalTimeCounter = SafeCounter.makeOrThrow(
|
|
628
|
+
~name="envio_storage_load_total_time",
|
|
629
|
+
~help="Cumulative time spent loading data from storage during the indexing process. (milliseconds)",
|
|
630
|
+
~labelSchema=operationLabelsSchema,
|
|
631
|
+
)
|
|
632
|
+
|
|
633
|
+
let counter = SafeCounter.makeOrThrow(
|
|
634
|
+
~name="envio_storage_load_count",
|
|
635
|
+
~help="Cumulative number of successful storage load operations during the indexing process.",
|
|
636
|
+
~labelSchema=operationLabelsSchema,
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
let whereSizeCounter = SafeCounter.makeOrThrow(
|
|
640
|
+
~name="envio_storage_load_where_size",
|
|
641
|
+
~help="Cumulative number of filter conditions ('where' items) used in storage load operations during the indexing process.",
|
|
642
|
+
~labelSchema=operationLabelsSchema,
|
|
643
|
+
)
|
|
644
|
+
|
|
645
|
+
let sizeCounter = SafeCounter.makeOrThrow(
|
|
646
|
+
~name="envio_storage_load_size",
|
|
647
|
+
~help="Cumulative number of records loaded from storage during the indexing process.",
|
|
648
|
+
~labelSchema=operationLabelsSchema,
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
type operationRef = {
|
|
652
|
+
mutable pendingCount: int,
|
|
653
|
+
timerRef: Hrtime.timeRef,
|
|
654
|
+
}
|
|
655
|
+
let operations = Js.Dict.empty()
|
|
656
|
+
|
|
657
|
+
let startOperation = (~operation) => {
|
|
658
|
+
switch operations->Utils.Dict.dangerouslyGetNonOption(operation) {
|
|
659
|
+
| Some(operationRef) => operationRef.pendingCount = operationRef.pendingCount + 1
|
|
660
|
+
| None =>
|
|
661
|
+
operations->Js.Dict.set(
|
|
662
|
+
operation,
|
|
663
|
+
(
|
|
664
|
+
{
|
|
665
|
+
pendingCount: 1,
|
|
666
|
+
timerRef: Hrtime.makeTimer(),
|
|
667
|
+
}: operationRef
|
|
668
|
+
),
|
|
669
|
+
)
|
|
670
|
+
}
|
|
671
|
+
Hrtime.makeTimer()
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
let endOperation = (timerRef, ~operation, ~whereSize, ~size) => {
|
|
675
|
+
let operationRef = operations->Js.Dict.unsafeGet(operation)
|
|
676
|
+
operationRef.pendingCount = operationRef.pendingCount - 1
|
|
677
|
+
if operationRef.pendingCount === 0 {
|
|
678
|
+
timeCounter->SafeCounter.handleInt(
|
|
679
|
+
~labels={operation},
|
|
680
|
+
~value=operationRef.timerRef->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis,
|
|
681
|
+
)
|
|
682
|
+
operations->Utils.Dict.deleteInPlace(operation)
|
|
683
|
+
}
|
|
684
|
+
totalTimeCounter->SafeCounter.handleInt(
|
|
685
|
+
~labels={operation},
|
|
686
|
+
~value=timerRef->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis,
|
|
687
|
+
)
|
|
688
|
+
counter->SafeCounter.increment(~labels={operation})
|
|
689
|
+
whereSizeCounter->SafeCounter.handleInt(~labels={operation}, ~value=whereSize)
|
|
690
|
+
sizeCounter->SafeCounter.handleInt(~labels={operation}, ~value=size)
|
|
691
|
+
}
|
|
692
|
+
}
|
package/src/Prometheus.res.js
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
// Generated by ReScript, PLEASE EDIT WITH CARE
|
|
2
2
|
'use strict';
|
|
3
3
|
|
|
4
|
+
var Utils = require("./Utils.res.js");
|
|
5
|
+
var Hrtime = require("./bindings/Hrtime.res.js");
|
|
4
6
|
var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
5
7
|
var ChainMap = require("./ChainMap.res.js");
|
|
6
8
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
@@ -346,28 +348,6 @@ var BenchmarkCounters = {
|
|
|
346
348
|
set: set$1
|
|
347
349
|
};
|
|
348
350
|
|
|
349
|
-
var labelSchema$2 = S$RescriptSchema.schema(function (s) {
|
|
350
|
-
return {
|
|
351
|
-
chainId: s.m(S$RescriptSchema.coerce(S$RescriptSchema.string, S$RescriptSchema.$$int)),
|
|
352
|
-
partitionId: s.m(S$RescriptSchema.string)
|
|
353
|
-
};
|
|
354
|
-
});
|
|
355
|
-
|
|
356
|
-
var counter = makeOrThrow$1("partition_block_fetched", "The latest fetched block number for each partition", labelSchema$2);
|
|
357
|
-
|
|
358
|
-
function set$2(blockNumber, partitionId, chainId) {
|
|
359
|
-
handleInt$1(counter, {
|
|
360
|
-
chainId: chainId,
|
|
361
|
-
partitionId: partitionId
|
|
362
|
-
}, blockNumber);
|
|
363
|
-
}
|
|
364
|
-
|
|
365
|
-
var PartitionBlockFetched = {
|
|
366
|
-
labelSchema: labelSchema$2,
|
|
367
|
-
counter: counter,
|
|
368
|
-
set: set$2
|
|
369
|
-
};
|
|
370
|
-
|
|
371
351
|
var chainIdLabelsSchema = S$RescriptSchema.object(function (s) {
|
|
372
352
|
return s.f("chainId", S$RescriptSchema.coerce(S$RescriptSchema.string, S$RescriptSchema.$$int));
|
|
373
353
|
});
|
|
@@ -378,7 +358,7 @@ var gauge$2 = makeOrThrow$1("envio_info", "Information about the indexer", S$Res
|
|
|
378
358
|
};
|
|
379
359
|
}));
|
|
380
360
|
|
|
381
|
-
function set$
|
|
361
|
+
function set$2(version) {
|
|
382
362
|
handleInt$1(gauge$2, {
|
|
383
363
|
version: version
|
|
384
364
|
}, 1);
|
|
@@ -386,80 +366,80 @@ function set$3(version) {
|
|
|
386
366
|
|
|
387
367
|
var Info = {
|
|
388
368
|
gauge: gauge$2,
|
|
389
|
-
set: set$
|
|
369
|
+
set: set$2
|
|
390
370
|
};
|
|
391
371
|
|
|
392
372
|
var gauge$3 = makeOrThrow$1("envio_indexing_addresses", "The number of addresses indexed on chain. Includes both static and dynamic addresses.", chainIdLabelsSchema);
|
|
393
373
|
|
|
394
|
-
function set$
|
|
374
|
+
function set$3(addressesCount, chainId) {
|
|
395
375
|
handleInt$1(gauge$3, chainId, addressesCount);
|
|
396
376
|
}
|
|
397
377
|
|
|
398
378
|
var IndexingAddresses = {
|
|
399
379
|
gauge: gauge$3,
|
|
400
|
-
set: set$
|
|
380
|
+
set: set$3
|
|
401
381
|
};
|
|
402
382
|
|
|
403
383
|
var gauge$4 = makeOrThrow$1("envio_indexing_max_concurrency", "The maximum number of concurrent queries to the chain data-source.", chainIdLabelsSchema);
|
|
404
384
|
|
|
405
|
-
function set$
|
|
385
|
+
function set$4(maxConcurrency, chainId) {
|
|
406
386
|
handleInt$1(gauge$4, chainId, maxConcurrency);
|
|
407
387
|
}
|
|
408
388
|
|
|
409
389
|
var IndexingMaxConcurrency = {
|
|
410
390
|
gauge: gauge$4,
|
|
411
|
-
set: set$
|
|
391
|
+
set: set$4
|
|
412
392
|
};
|
|
413
393
|
|
|
414
394
|
var gauge$5 = makeOrThrow$1("envio_indexing_concurrency", "The number of executing concurrent queries to the chain data-source.", chainIdLabelsSchema);
|
|
415
395
|
|
|
416
|
-
function set$
|
|
396
|
+
function set$5(concurrency, chainId) {
|
|
417
397
|
handleInt$1(gauge$5, chainId, concurrency);
|
|
418
398
|
}
|
|
419
399
|
|
|
420
400
|
var IndexingConcurrency = {
|
|
421
401
|
gauge: gauge$5,
|
|
422
|
-
set: set$
|
|
402
|
+
set: set$5
|
|
423
403
|
};
|
|
424
404
|
|
|
425
405
|
var gauge$6 = makeOrThrow$1("envio_indexing_partitions", "The number of partitions used to split fetching logic by addresses and block ranges.", chainIdLabelsSchema);
|
|
426
406
|
|
|
427
|
-
function set$
|
|
407
|
+
function set$6(partitionsCount, chainId) {
|
|
428
408
|
handleInt$1(gauge$6, chainId, partitionsCount);
|
|
429
409
|
}
|
|
430
410
|
|
|
431
411
|
var IndexingPartitions = {
|
|
432
412
|
gauge: gauge$6,
|
|
433
|
-
set: set$
|
|
413
|
+
set: set$6
|
|
434
414
|
};
|
|
435
415
|
|
|
436
|
-
var counter
|
|
416
|
+
var counter = makeOrThrow("envio_indexing_idle_time", "The number of milliseconds the indexer source syncing has been idle. A high value may indicate the source sync is a bottleneck.", chainIdLabelsSchema);
|
|
437
417
|
|
|
438
418
|
var IndexingIdleTime = {
|
|
439
|
-
counter: counter
|
|
419
|
+
counter: counter
|
|
440
420
|
};
|
|
441
421
|
|
|
442
|
-
var counter$
|
|
422
|
+
var counter$1 = makeOrThrow("envio_indexing_source_waiting_time", "The number of milliseconds the indexer has been waiting for new blocks.", chainIdLabelsSchema);
|
|
443
423
|
|
|
444
424
|
var IndexingSourceWaitingTime = {
|
|
445
|
-
counter: counter$
|
|
425
|
+
counter: counter$1
|
|
446
426
|
};
|
|
447
427
|
|
|
448
|
-
var counter$
|
|
428
|
+
var counter$2 = makeOrThrow("envio_indexing_query_time", "The number of milliseconds spent performing queries to the chain data-source.", chainIdLabelsSchema);
|
|
449
429
|
|
|
450
430
|
var IndexingQueryTime = {
|
|
451
|
-
counter: counter$
|
|
431
|
+
counter: counter$2
|
|
452
432
|
};
|
|
453
433
|
|
|
454
434
|
var gauge$7 = makeOrThrow$1("envio_indexing_buffer_size", "The current number of items in the indexing buffer.", chainIdLabelsSchema);
|
|
455
435
|
|
|
456
|
-
function set$
|
|
436
|
+
function set$7(bufferSize, chainId) {
|
|
457
437
|
handleInt$1(gauge$7, chainId, bufferSize);
|
|
458
438
|
}
|
|
459
439
|
|
|
460
440
|
var IndexingBufferSize = {
|
|
461
441
|
gauge: gauge$7,
|
|
462
|
-
set: set$
|
|
442
|
+
set: set$7
|
|
463
443
|
};
|
|
464
444
|
|
|
465
445
|
var gauge$8 = new PromClient.Gauge({
|
|
@@ -467,13 +447,13 @@ var gauge$8 = new PromClient.Gauge({
|
|
|
467
447
|
help: "The target buffer size per chain for indexing. The actual number of items in the queue may exceed this value, but the indexer always tries to keep the buffer filled up to this target."
|
|
468
448
|
});
|
|
469
449
|
|
|
470
|
-
function set$
|
|
450
|
+
function set$8(targetBufferSize) {
|
|
471
451
|
gauge$8.set(targetBufferSize);
|
|
472
452
|
}
|
|
473
453
|
|
|
474
454
|
var IndexingTargetBufferSize = {
|
|
475
455
|
gauge: gauge$8,
|
|
476
|
-
set: set$
|
|
456
|
+
set: set$8
|
|
477
457
|
};
|
|
478
458
|
|
|
479
459
|
var deprecatedGauge = new PromClient.Gauge({
|
|
@@ -484,7 +464,7 @@ var deprecatedGauge = new PromClient.Gauge({
|
|
|
484
464
|
|
|
485
465
|
var gauge$9 = makeOrThrow$1("envio_indexing_buffer_block_number", "The highest block number that has been fully fetched by the indexer.", chainIdLabelsSchema);
|
|
486
466
|
|
|
487
|
-
function set$
|
|
467
|
+
function set$9(blockNumber, chainId) {
|
|
488
468
|
deprecatedGauge.labels({
|
|
489
469
|
chainId: chainId
|
|
490
470
|
}).set(blockNumber);
|
|
@@ -494,18 +474,18 @@ function set$10(blockNumber, chainId) {
|
|
|
494
474
|
var IndexingBufferBlockNumber = {
|
|
495
475
|
deprecatedGauge: deprecatedGauge,
|
|
496
476
|
gauge: gauge$9,
|
|
497
|
-
set: set$
|
|
477
|
+
set: set$9
|
|
498
478
|
};
|
|
499
479
|
|
|
500
480
|
var gauge$10 = makeOrThrow$1("envio_indexing_end_block", "The block number to stop indexing at. (inclusive)", chainIdLabelsSchema);
|
|
501
481
|
|
|
502
|
-
function set$
|
|
482
|
+
function set$10(endBlock, chainId) {
|
|
503
483
|
handleInt$1(gauge$10, chainId, endBlock);
|
|
504
484
|
}
|
|
505
485
|
|
|
506
486
|
var IndexingEndBlock = {
|
|
507
487
|
gauge: gauge$10,
|
|
508
|
-
set: set$
|
|
488
|
+
set: set$10
|
|
509
489
|
};
|
|
510
490
|
|
|
511
491
|
var sourceLabelsSchema = S$RescriptSchema.schema(function (s) {
|
|
@@ -517,7 +497,7 @@ var sourceLabelsSchema = S$RescriptSchema.schema(function (s) {
|
|
|
517
497
|
|
|
518
498
|
var gauge$11 = makeOrThrow$1("envio_source_height", "The latest known block number reported by the source. This value may lag behind the actual chain height, as it is updated only when queried.", sourceLabelsSchema);
|
|
519
499
|
|
|
520
|
-
function set$
|
|
500
|
+
function set$11(sourceName, chainId, blockNumber) {
|
|
521
501
|
handleInt$1(gauge$11, {
|
|
522
502
|
source: sourceName,
|
|
523
503
|
chainId: chainId
|
|
@@ -526,7 +506,7 @@ function set$12(sourceName, chainId, blockNumber) {
|
|
|
526
506
|
|
|
527
507
|
var SourceHeight = {
|
|
528
508
|
gauge: gauge$11,
|
|
529
|
-
set: set$
|
|
509
|
+
set: set$11
|
|
530
510
|
};
|
|
531
511
|
|
|
532
512
|
var startTimer = makeSafeHistogramOrThrow("envio_source_get_height_duration", "Duration of the source get height requests in seconds", sourceLabelsSchema, [
|
|
@@ -540,36 +520,26 @@ var SourceGetHeightDuration = {
|
|
|
540
520
|
startTimer: startTimer
|
|
541
521
|
};
|
|
542
522
|
|
|
543
|
-
var deprecatedCounter = new PromClient.Counter({
|
|
544
|
-
name: "reorgs_detected",
|
|
545
|
-
help: "Total number of reorgs detected",
|
|
546
|
-
labelNames: ["chainId"]
|
|
547
|
-
});
|
|
548
|
-
|
|
549
523
|
var gauge$12 = makeOrThrow$1("envio_reorg_count", "Total number of reorgs detected", chainIdLabelsSchema);
|
|
550
524
|
|
|
551
525
|
function increment$2(chain) {
|
|
552
|
-
deprecatedCounter.labels({
|
|
553
|
-
chainId: ChainMap.Chain.toString(chain)
|
|
554
|
-
}).inc();
|
|
555
526
|
increment$1(gauge$12, chain);
|
|
556
527
|
}
|
|
557
528
|
|
|
558
529
|
var ReorgCount = {
|
|
559
|
-
deprecatedCounter: deprecatedCounter,
|
|
560
530
|
gauge: gauge$12,
|
|
561
531
|
increment: increment$2
|
|
562
532
|
};
|
|
563
533
|
|
|
564
534
|
var gauge$13 = makeOrThrow$1("envio_reorg_detection_block_number", "The block number where reorg was detected the last time. This doesn't mean that the block was reorged, this is simply where we found block hash to be different.", chainIdLabelsSchema);
|
|
565
535
|
|
|
566
|
-
function set$
|
|
536
|
+
function set$12(blockNumber, chain) {
|
|
567
537
|
handleInt$1(gauge$13, chain, blockNumber);
|
|
568
538
|
}
|
|
569
539
|
|
|
570
540
|
var ReorgDetectionBlockNumber = {
|
|
571
541
|
gauge: gauge$13,
|
|
572
|
-
set: set$
|
|
542
|
+
set: set$12
|
|
573
543
|
};
|
|
574
544
|
|
|
575
545
|
var gauge$14 = new PromClient.Gauge({
|
|
@@ -577,13 +547,13 @@ var gauge$14 = new PromClient.Gauge({
|
|
|
577
547
|
help: "Whether indexing is currently within the reorg threshold"
|
|
578
548
|
});
|
|
579
549
|
|
|
580
|
-
function set$
|
|
550
|
+
function set$13(isInReorgThreshold) {
|
|
581
551
|
gauge$14.set(isInReorgThreshold ? 1 : 0);
|
|
582
552
|
}
|
|
583
553
|
|
|
584
554
|
var ReorgThreshold = {
|
|
585
555
|
gauge: gauge$14,
|
|
586
|
-
set: set$
|
|
556
|
+
set: set$13
|
|
587
557
|
};
|
|
588
558
|
|
|
589
559
|
var gauge$15 = new PromClient.Gauge({
|
|
@@ -591,66 +561,87 @@ var gauge$15 = new PromClient.Gauge({
|
|
|
591
561
|
help: "Whether rollback on reorg is enabled"
|
|
592
562
|
});
|
|
593
563
|
|
|
594
|
-
function set$
|
|
564
|
+
function set$14(enabled) {
|
|
595
565
|
gauge$15.set(enabled ? 1 : 0);
|
|
596
566
|
}
|
|
597
567
|
|
|
598
568
|
var RollbackEnabled = {
|
|
599
569
|
gauge: gauge$15,
|
|
600
|
-
set: set$
|
|
570
|
+
set: set$14
|
|
601
571
|
};
|
|
602
572
|
|
|
603
|
-
var
|
|
604
|
-
name: "
|
|
605
|
-
help: "Rollback on reorg
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
]
|
|
573
|
+
var timeCounter = new PromClient.Counter({
|
|
574
|
+
name: "envio_rollback_time",
|
|
575
|
+
help: "Rollback on reorg total time in milliseconds"
|
|
576
|
+
});
|
|
577
|
+
|
|
578
|
+
var counter$3 = new PromClient.Counter({
|
|
579
|
+
name: "envio_rollback_count",
|
|
580
|
+
help: "Number of successful rollbacks on reorg"
|
|
612
581
|
});
|
|
613
582
|
|
|
614
|
-
function
|
|
615
|
-
|
|
583
|
+
function increment$3(timeMillis) {
|
|
584
|
+
timeCounter.inc(Hrtime.intFromMillis(timeMillis));
|
|
585
|
+
counter$3.inc();
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
var RollbackSuccess = {
|
|
589
|
+
timeCounter: timeCounter,
|
|
590
|
+
counter: counter$3,
|
|
591
|
+
increment: increment$3
|
|
592
|
+
};
|
|
593
|
+
|
|
594
|
+
var entityNameLabelsSchema = S$RescriptSchema.object(function (s) {
|
|
595
|
+
return s.f("entity", S$RescriptSchema.string);
|
|
596
|
+
});
|
|
597
|
+
|
|
598
|
+
var timeCounter$1 = makeOrThrow("envio_rollback_history_prune_time", "The total time spent pruning entity history which is not in the reorg threshold. (milliseconds)", entityNameLabelsSchema);
|
|
599
|
+
|
|
600
|
+
var counter$4 = makeOrThrow("envio_rollback_history_prune_count", "Number of successful entity history prunes", entityNameLabelsSchema);
|
|
601
|
+
|
|
602
|
+
function increment$4(timeMillis, entityName) {
|
|
603
|
+
handleInt(timeCounter$1, entityName, Hrtime.intFromMillis(timeMillis));
|
|
604
|
+
increment(counter$4, entityName);
|
|
616
605
|
}
|
|
617
606
|
|
|
618
|
-
var
|
|
619
|
-
|
|
620
|
-
|
|
607
|
+
var RollbackHistoryPrune = {
|
|
608
|
+
entityNameLabelsSchema: entityNameLabelsSchema,
|
|
609
|
+
timeCounter: timeCounter$1,
|
|
610
|
+
counter: counter$4,
|
|
611
|
+
increment: increment$4
|
|
621
612
|
};
|
|
622
613
|
|
|
623
614
|
var gauge$16 = makeOrThrow$1("envio_rollback_target_block_number", "The block number reorg was rollbacked to the last time.", chainIdLabelsSchema);
|
|
624
615
|
|
|
625
|
-
function set$
|
|
616
|
+
function set$15(blockNumber, chain) {
|
|
626
617
|
handleInt$1(gauge$16, chain, blockNumber);
|
|
627
618
|
}
|
|
628
619
|
|
|
629
620
|
var RollbackTargetBlockNumber = {
|
|
630
621
|
gauge: gauge$16,
|
|
631
|
-
set: set$
|
|
622
|
+
set: set$15
|
|
632
623
|
};
|
|
633
624
|
|
|
634
625
|
var gauge$17 = makeOrThrow$1("envio_processing_block_number", "The latest item block number included in the currently processing batch for the chain.", chainIdLabelsSchema);
|
|
635
626
|
|
|
636
|
-
function set$
|
|
627
|
+
function set$16(blockNumber, chainId) {
|
|
637
628
|
handleInt$1(gauge$17, chainId, blockNumber);
|
|
638
629
|
}
|
|
639
630
|
|
|
640
631
|
var ProcessingBlockNumber = {
|
|
641
632
|
gauge: gauge$17,
|
|
642
|
-
set: set$
|
|
633
|
+
set: set$16
|
|
643
634
|
};
|
|
644
635
|
|
|
645
636
|
var gauge$18 = makeOrThrow$1("envio_processing_batch_size", "The number of items included in the currently processing batch for the chain.", chainIdLabelsSchema);
|
|
646
637
|
|
|
647
|
-
function set$
|
|
638
|
+
function set$17(batchSize, chainId) {
|
|
648
639
|
handleInt$1(gauge$18, chainId, batchSize);
|
|
649
640
|
}
|
|
650
641
|
|
|
651
642
|
var ProcessingBatchSize = {
|
|
652
643
|
gauge: gauge$18,
|
|
653
|
-
set: set$
|
|
644
|
+
set: set$17
|
|
654
645
|
};
|
|
655
646
|
|
|
656
647
|
var gauge$19 = new PromClient.Gauge({
|
|
@@ -658,24 +649,24 @@ var gauge$19 = new PromClient.Gauge({
|
|
|
658
649
|
help: "The maximum number of items to process in a single batch."
|
|
659
650
|
});
|
|
660
651
|
|
|
661
|
-
function set$
|
|
652
|
+
function set$18(maxBatchSize) {
|
|
662
653
|
gauge$19.set(maxBatchSize);
|
|
663
654
|
}
|
|
664
655
|
|
|
665
656
|
var ProcessingMaxBatchSize = {
|
|
666
657
|
gauge: gauge$19,
|
|
667
|
-
set: set$
|
|
658
|
+
set: set$18
|
|
668
659
|
};
|
|
669
660
|
|
|
670
661
|
var gauge$20 = makeOrThrow$1("envio_progress_block_number", "The block number of the latest block processed and stored in the database.", chainIdLabelsSchema);
|
|
671
662
|
|
|
672
|
-
function set$
|
|
663
|
+
function set$19(blockNumber, chainId) {
|
|
673
664
|
handleInt$1(gauge$20, chainId, blockNumber);
|
|
674
665
|
}
|
|
675
666
|
|
|
676
667
|
var ProgressBlockNumber = {
|
|
677
668
|
gauge: gauge$20,
|
|
678
|
-
set: set$
|
|
669
|
+
set: set$19
|
|
679
670
|
};
|
|
680
671
|
|
|
681
672
|
var deprecatedGauge$1 = new PromClient.Gauge({
|
|
@@ -686,7 +677,7 @@ var deprecatedGauge$1 = new PromClient.Gauge({
|
|
|
686
677
|
|
|
687
678
|
var gauge$21 = makeOrThrow$1("envio_progress_events_count", "The number of events processed and reflected in the database.", chainIdLabelsSchema);
|
|
688
679
|
|
|
689
|
-
function set$
|
|
680
|
+
function set$20(processedCount, chainId) {
|
|
690
681
|
deprecatedGauge$1.labels({
|
|
691
682
|
chainId: chainId
|
|
692
683
|
}).set(processedCount);
|
|
@@ -696,7 +687,7 @@ function set$21(processedCount, chainId) {
|
|
|
696
687
|
var ProgressEventsCount = {
|
|
697
688
|
deprecatedGauge: deprecatedGauge$1,
|
|
698
689
|
gauge: gauge$21,
|
|
699
|
-
set: set$
|
|
690
|
+
set: set$20
|
|
700
691
|
};
|
|
701
692
|
|
|
702
693
|
var effectLabelsSchema = S$RescriptSchema.object(function (s) {
|
|
@@ -705,24 +696,78 @@ var effectLabelsSchema = S$RescriptSchema.object(function (s) {
|
|
|
705
696
|
|
|
706
697
|
var gauge$22 = makeOrThrow$1("envio_effect_calls_count", "The number of calls to the effect. Including both handler execution and cache hits.", effectLabelsSchema);
|
|
707
698
|
|
|
708
|
-
function set$
|
|
699
|
+
function set$21(callsCount, effectName) {
|
|
709
700
|
handleInt$1(gauge$22, effectName, callsCount);
|
|
710
701
|
}
|
|
711
702
|
|
|
712
703
|
var EffectCallsCount = {
|
|
713
704
|
gauge: gauge$22,
|
|
714
|
-
set: set$
|
|
705
|
+
set: set$21
|
|
715
706
|
};
|
|
716
707
|
|
|
717
708
|
var gauge$23 = makeOrThrow$1("envio_effect_cache_count", "The number of items in the effect cache.", effectLabelsSchema);
|
|
718
709
|
|
|
719
|
-
function set$
|
|
710
|
+
function set$22(count, effectName) {
|
|
720
711
|
handleInt$1(gauge$23, effectName, count);
|
|
721
712
|
}
|
|
722
713
|
|
|
723
714
|
var EffectCacheCount = {
|
|
724
715
|
gauge: gauge$23,
|
|
725
|
-
set: set$
|
|
716
|
+
set: set$22
|
|
717
|
+
};
|
|
718
|
+
|
|
719
|
+
var operationLabelsSchema = S$RescriptSchema.object(function (s) {
|
|
720
|
+
return s.f("operation", S$RescriptSchema.string);
|
|
721
|
+
});
|
|
722
|
+
|
|
723
|
+
var timeCounter$2 = makeOrThrow("envio_storage_load_time", "Processing time taken to load data from storage. (milliseconds)", operationLabelsSchema);
|
|
724
|
+
|
|
725
|
+
var totalTimeCounter = makeOrThrow("envio_storage_load_total_time", "Cumulative time spent loading data from storage during the indexing process. (milliseconds)", operationLabelsSchema);
|
|
726
|
+
|
|
727
|
+
var counter$5 = makeOrThrow("envio_storage_load_count", "Cumulative number of successful storage load operations during the indexing process.", operationLabelsSchema);
|
|
728
|
+
|
|
729
|
+
var whereSizeCounter = makeOrThrow("envio_storage_load_where_size", "Cumulative number of filter conditions ('where' items) used in storage load operations during the indexing process.", operationLabelsSchema);
|
|
730
|
+
|
|
731
|
+
var sizeCounter = makeOrThrow("envio_storage_load_size", "Cumulative number of records loaded from storage during the indexing process.", operationLabelsSchema);
|
|
732
|
+
|
|
733
|
+
var operations = {};
|
|
734
|
+
|
|
735
|
+
function startOperation(operation) {
|
|
736
|
+
var operationRef = operations[operation];
|
|
737
|
+
if (operationRef !== undefined) {
|
|
738
|
+
operationRef.pendingCount = operationRef.pendingCount + 1 | 0;
|
|
739
|
+
} else {
|
|
740
|
+
operations[operation] = {
|
|
741
|
+
pendingCount: 1,
|
|
742
|
+
timerRef: Hrtime.makeTimer()
|
|
743
|
+
};
|
|
744
|
+
}
|
|
745
|
+
return Hrtime.makeTimer();
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
function endOperation(timerRef, operation, whereSize, size) {
|
|
749
|
+
var operationRef = operations[operation];
|
|
750
|
+
operationRef.pendingCount = operationRef.pendingCount - 1 | 0;
|
|
751
|
+
if (operationRef.pendingCount === 0) {
|
|
752
|
+
handleInt(timeCounter$2, operation, Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(operationRef.timerRef))));
|
|
753
|
+
Utils.Dict.deleteInPlace(operations, operation);
|
|
754
|
+
}
|
|
755
|
+
handleInt(totalTimeCounter, operation, Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(timerRef))));
|
|
756
|
+
increment(counter$5, operation);
|
|
757
|
+
handleInt(whereSizeCounter, operation, whereSize);
|
|
758
|
+
handleInt(sizeCounter, operation, size);
|
|
759
|
+
}
|
|
760
|
+
|
|
761
|
+
var StorageLoad = {
|
|
762
|
+
operationLabelsSchema: operationLabelsSchema,
|
|
763
|
+
timeCounter: timeCounter$2,
|
|
764
|
+
totalTimeCounter: totalTimeCounter,
|
|
765
|
+
counter: counter$5,
|
|
766
|
+
whereSizeCounter: whereSizeCounter,
|
|
767
|
+
sizeCounter: sizeCounter,
|
|
768
|
+
operations: operations,
|
|
769
|
+
startOperation: startOperation,
|
|
770
|
+
endOperation: endOperation
|
|
726
771
|
};
|
|
727
772
|
|
|
728
773
|
exports.loadEntitiesDurationCounter = loadEntitiesDurationCounter;
|
|
@@ -743,7 +788,6 @@ exports.incrementExecuteBatchDurationCounter = incrementExecuteBatchDurationCoun
|
|
|
743
788
|
exports.setSourceChainHeight = setSourceChainHeight;
|
|
744
789
|
exports.setAllChainsSyncedToHead = setAllChainsSyncedToHead;
|
|
745
790
|
exports.BenchmarkCounters = BenchmarkCounters;
|
|
746
|
-
exports.PartitionBlockFetched = PartitionBlockFetched;
|
|
747
791
|
exports.chainIdLabelsSchema = chainIdLabelsSchema;
|
|
748
792
|
exports.Info = Info;
|
|
749
793
|
exports.IndexingAddresses = IndexingAddresses;
|
|
@@ -764,7 +808,8 @@ exports.ReorgCount = ReorgCount;
|
|
|
764
808
|
exports.ReorgDetectionBlockNumber = ReorgDetectionBlockNumber;
|
|
765
809
|
exports.ReorgThreshold = ReorgThreshold;
|
|
766
810
|
exports.RollbackEnabled = RollbackEnabled;
|
|
767
|
-
exports.
|
|
811
|
+
exports.RollbackSuccess = RollbackSuccess;
|
|
812
|
+
exports.RollbackHistoryPrune = RollbackHistoryPrune;
|
|
768
813
|
exports.RollbackTargetBlockNumber = RollbackTargetBlockNumber;
|
|
769
814
|
exports.ProcessingBlockNumber = ProcessingBlockNumber;
|
|
770
815
|
exports.ProcessingBatchSize = ProcessingBatchSize;
|
|
@@ -774,4 +819,5 @@ exports.ProgressEventsCount = ProgressEventsCount;
|
|
|
774
819
|
exports.effectLabelsSchema = effectLabelsSchema;
|
|
775
820
|
exports.EffectCallsCount = EffectCallsCount;
|
|
776
821
|
exports.EffectCacheCount = EffectCacheCount;
|
|
822
|
+
exports.StorageLoad = StorageLoad;
|
|
777
823
|
/* loadEntitiesDurationCounter Not a pure module */
|
package/src/db/Table.res
CHANGED
|
@@ -240,6 +240,7 @@ let toSqlParams = (table: table, ~schema, ~pgSchema) => {
|
|
|
240
240
|
switch field {
|
|
241
241
|
| Field(f) =>
|
|
242
242
|
switch f.fieldType {
|
|
243
|
+
| Custom(fieldType) if fieldType->Js.String2.startsWith("NUMERIC(") => fieldType
|
|
243
244
|
| Custom(fieldType) => `${(Text :> string)}[]::"${pgSchema}".${(fieldType :> string)}`
|
|
244
245
|
| Boolean => `${(Integer :> string)}[]::${(f.fieldType :> string)}`
|
|
245
246
|
| fieldType => (fieldType :> string)
|
package/src/db/Table.res.js
CHANGED
|
@@ -261,7 +261,9 @@ function toSqlParams(table, schema, pgSchema) {
|
|
|
261
261
|
var fieldType = f.fieldType;
|
|
262
262
|
tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL" ? (
|
|
263
263
|
fieldType === "BOOLEAN" ? "INTEGER[]::" + f.fieldType : fieldType
|
|
264
|
-
) :
|
|
264
|
+
) : (
|
|
265
|
+
fieldType.startsWith("NUMERIC(") ? fieldType : "TEXT[]::\"" + pgSchema + "\"." + fieldType
|
|
266
|
+
);
|
|
265
267
|
} else {
|
|
266
268
|
tmp = "TEXT";
|
|
267
269
|
}
|
|
@@ -368,9 +368,20 @@ let queryRoute = Rest.route(() => {
|
|
|
368
368
|
responses: [s => s.data(ResponseTypes.queryResponseSchema)],
|
|
369
369
|
})
|
|
370
370
|
|
|
371
|
+
@unboxed
|
|
372
|
+
type heightResult = Value(int) | ErrorMessage(string)
|
|
373
|
+
|
|
371
374
|
let heightRoute = Rest.route(() => {
|
|
372
375
|
path: "/height",
|
|
373
376
|
method: Get,
|
|
374
377
|
input: s => s.auth(Bearer),
|
|
375
|
-
responses: [
|
|
378
|
+
responses: [
|
|
379
|
+
s =>
|
|
380
|
+
s.data(
|
|
381
|
+
S.union([
|
|
382
|
+
S.object(s => Value(s.field("height", S.int))),
|
|
383
|
+
S.string->S.shape(s => ErrorMessage(s)),
|
|
384
|
+
]),
|
|
385
|
+
),
|
|
386
|
+
],
|
|
376
387
|
})
|
|
@@ -247,7 +247,14 @@ function heightRoute() {
|
|
|
247
247
|
return s.auth("Bearer");
|
|
248
248
|
}),
|
|
249
249
|
responses: [(function (s) {
|
|
250
|
-
return s.
|
|
250
|
+
return s.data(S$RescriptSchema.union([
|
|
251
|
+
S$RescriptSchema.object(function (s) {
|
|
252
|
+
return s.f("height", S$RescriptSchema.$$int);
|
|
253
|
+
}),
|
|
254
|
+
S$RescriptSchema.shape(S$RescriptSchema.string, (function (s) {
|
|
255
|
+
return s;
|
|
256
|
+
}))
|
|
257
|
+
]));
|
|
251
258
|
})]
|
|
252
259
|
};
|
|
253
260
|
}
|