envio 2.27.3 → 2.27.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/index.js +1 -0
- package/package.json +5 -5
- package/src/FetchState.res +25 -5
- package/src/FetchState.res.js +22 -4
- package/src/PgStorage.res +2 -2
- package/src/PgStorage.res.js +4 -4
- package/src/Prometheus.res +36 -7
- package/src/Prometheus.res.js +38 -15
- package/src/sources/HyperSyncJsonApi.res +12 -1
- package/src/sources/HyperSyncJsonApi.res.js +8 -1
package/README.md
CHANGED
|
@@ -12,7 +12,7 @@ HyperIndex is a fast, developer-friendly multichain indexer, optimized for both
|
|
|
12
12
|
## Key Features
|
|
13
13
|
|
|
14
14
|
- **[Indexer auto-generation](https://docs.envio.dev/docs/HyperIndex/contract-import)** – Generate Indexers directly from smart contract addresses
|
|
15
|
-
- **High performance** – Historical backfills at over
|
|
15
|
+
- **High performance** – Historical backfills at over 10,000+ events per second ([fastest in market](https://docs.envio.dev/blog/indexer-benchmarking-results))
|
|
16
16
|
- **Local development** – Full-featured local environment with Docker
|
|
17
17
|
- **[Multichain indexing](https://docs.envio.dev/docs/HyperIndex/multichain-indexing)** – Index any EVM-compatible blockchain and Fuel (simultaneously)
|
|
18
18
|
- **Real-time indexing** – Instantly track blockchain events
|
package/index.js
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.27.
|
|
3
|
+
"version": "v2.27.4",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.27.
|
|
29
|
-
"envio-linux-arm64": "v2.27.
|
|
30
|
-
"envio-darwin-x64": "v2.27.
|
|
31
|
-
"envio-darwin-arm64": "v2.27.
|
|
28
|
+
"envio-linux-x64": "v2.27.4",
|
|
29
|
+
"envio-linux-arm64": "v2.27.4",
|
|
30
|
+
"envio-darwin-x64": "v2.27.4",
|
|
31
|
+
"envio-darwin-arm64": "v2.27.4"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.5",
|
package/src/FetchState.res
CHANGED
|
@@ -1235,14 +1235,34 @@ let filterAndSortForUnorderedBatch = {
|
|
|
1235
1235
|
}
|
|
1236
1236
|
}
|
|
1237
1237
|
|
|
1238
|
-
let
|
|
1239
|
-
//
|
|
1240
|
-
|
|
1238
|
+
let hasFullBatch = ({queue, latestFullyFetchedBlock}: t, ~maxBatchSize) => {
|
|
1239
|
+
// Queue is ordered from latest to earliest, so the earliest eligible
|
|
1240
|
+
// item for a full batch of size B is at index (length - B).
|
|
1241
|
+
// Do NOT subtract an extra 1 here; when length === B we should still
|
|
1242
|
+
// classify the queue as full and probe index 0.
|
|
1243
|
+
let targetBlockIdx = queue->Array.length - maxBatchSize
|
|
1244
|
+
if targetBlockIdx < 0 {
|
|
1245
|
+
false
|
|
1246
|
+
} else {
|
|
1247
|
+
// Unsafe can fail when maxBatchSize is 0,
|
|
1248
|
+
// but we ignore the case
|
|
1249
|
+
(queue->Js.Array2.unsafe_get(targetBlockIdx)).blockNumber <=
|
|
1250
|
+
latestFullyFetchedBlock.blockNumber
|
|
1251
|
+
}
|
|
1241
1252
|
}
|
|
1242
1253
|
|
|
1243
|
-
(fetchStates: array<t
|
|
1254
|
+
(fetchStates: array<t>, ~maxBatchSize: int) => {
|
|
1244
1255
|
fetchStates
|
|
1245
1256
|
->Array.keepU(hasBatchItem)
|
|
1246
|
-
->Js.Array2.sortInPlaceWith(
|
|
1257
|
+
->Js.Array2.sortInPlaceWith((a: t, b: t) => {
|
|
1258
|
+
switch (a->hasFullBatch(~maxBatchSize), b->hasFullBatch(~maxBatchSize)) {
|
|
1259
|
+
| (true, true)
|
|
1260
|
+
| (false, false) =>
|
|
1261
|
+
// Use unsafe since we filtered out all queues without batch items
|
|
1262
|
+
(a.queue->Utils.Array.lastUnsafe).timestamp - (b.queue->Utils.Array.lastUnsafe).timestamp
|
|
1263
|
+
| (true, false) => -1
|
|
1264
|
+
| (false, true) => 1
|
|
1265
|
+
}
|
|
1266
|
+
})
|
|
1247
1267
|
}
|
|
1248
1268
|
}
|
package/src/FetchState.res.js
CHANGED
|
@@ -930,12 +930,30 @@ function hasBatchItem(param) {
|
|
|
930
930
|
}
|
|
931
931
|
}
|
|
932
932
|
|
|
933
|
-
function
|
|
934
|
-
|
|
933
|
+
function hasFullBatch(param, maxBatchSize) {
|
|
934
|
+
var queue = param.queue;
|
|
935
|
+
var targetBlockIdx = queue.length - maxBatchSize | 0;
|
|
936
|
+
if (targetBlockIdx < 0) {
|
|
937
|
+
return false;
|
|
938
|
+
} else {
|
|
939
|
+
return queue[targetBlockIdx].blockNumber <= param.latestFullyFetchedBlock.blockNumber;
|
|
940
|
+
}
|
|
935
941
|
}
|
|
936
942
|
|
|
937
|
-
function filterAndSortForUnorderedBatch(fetchStates) {
|
|
938
|
-
return Belt_Array.keepU(fetchStates, hasBatchItem).sort(
|
|
943
|
+
function filterAndSortForUnorderedBatch(fetchStates, maxBatchSize) {
|
|
944
|
+
return Belt_Array.keepU(fetchStates, hasBatchItem).sort(function (a, b) {
|
|
945
|
+
var match = hasFullBatch(a, maxBatchSize);
|
|
946
|
+
var match$1 = hasFullBatch(b, maxBatchSize);
|
|
947
|
+
if (match) {
|
|
948
|
+
if (!match$1) {
|
|
949
|
+
return -1;
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
} else if (match$1) {
|
|
953
|
+
return 1;
|
|
954
|
+
}
|
|
955
|
+
return Utils.$$Array.lastUnsafe(a.queue).timestamp - Utils.$$Array.lastUnsafe(b.queue).timestamp | 0;
|
|
956
|
+
});
|
|
939
957
|
}
|
|
940
958
|
|
|
941
959
|
exports.copy = copy;
|
package/src/PgStorage.res
CHANGED
|
@@ -325,8 +325,8 @@ let removeInvalidUtf8InPlace = entities =>
|
|
|
325
325
|
})
|
|
326
326
|
})
|
|
327
327
|
|
|
328
|
-
let
|
|
329
|
-
s.
|
|
328
|
+
let pgErrorMessageSchema = S.object(s =>
|
|
329
|
+
s.field("message", S.string)
|
|
330
330
|
)
|
|
331
331
|
|
|
332
332
|
exception PgEncodingError({table: Table.table})
|
package/src/PgStorage.res.js
CHANGED
|
@@ -228,8 +228,8 @@ function removeInvalidUtf8InPlace(entities) {
|
|
|
228
228
|
});
|
|
229
229
|
}
|
|
230
230
|
|
|
231
|
-
var
|
|
232
|
-
s.
|
|
231
|
+
var pgErrorMessageSchema = S$RescriptSchema.object(function (s) {
|
|
232
|
+
return s.f("message", S$RescriptSchema.string);
|
|
233
233
|
});
|
|
234
234
|
|
|
235
235
|
var PgEncodingError = /* @__PURE__ */Caml_exceptions.create("PgStorage.PgEncodingError");
|
|
@@ -631,7 +631,7 @@ exports.maxItemsPerQuery = maxItemsPerQuery;
|
|
|
631
631
|
exports.makeTableBatchSetQuery = makeTableBatchSetQuery;
|
|
632
632
|
exports.chunkArray = chunkArray;
|
|
633
633
|
exports.removeInvalidUtf8InPlace = removeInvalidUtf8InPlace;
|
|
634
|
-
exports.
|
|
634
|
+
exports.pgErrorMessageSchema = pgErrorMessageSchema;
|
|
635
635
|
exports.PgEncodingError = PgEncodingError;
|
|
636
636
|
exports.setQueryCache = setQueryCache;
|
|
637
637
|
exports.setOrThrow = setOrThrow;
|
|
@@ -641,4 +641,4 @@ exports.cacheTablePrefixLength = cacheTablePrefixLength;
|
|
|
641
641
|
exports.makeSchemaCacheTableInfoQuery = makeSchemaCacheTableInfoQuery;
|
|
642
642
|
exports.getConnectedPsqlExec = getConnectedPsqlExec;
|
|
643
643
|
exports.make = make;
|
|
644
|
-
/*
|
|
644
|
+
/* pgErrorMessageSchema Not a pure module */
|
package/src/Prometheus.res
CHANGED
|
@@ -494,15 +494,44 @@ module RollbackEnabled = {
|
|
|
494
494
|
}
|
|
495
495
|
}
|
|
496
496
|
|
|
497
|
-
module
|
|
498
|
-
let
|
|
499
|
-
"name": "
|
|
500
|
-
"help": "Rollback on reorg
|
|
501
|
-
|
|
497
|
+
module RollbackSuccess = {
|
|
498
|
+
let timeCounter = PromClient.Counter.makeCounter({
|
|
499
|
+
"name": "envio_rollback_time",
|
|
500
|
+
"help": "Rollback on reorg total time in milliseconds",
|
|
501
|
+
})
|
|
502
|
+
|
|
503
|
+
let counter = PromClient.Counter.makeCounter({
|
|
504
|
+
"name": "envio_rollback_count",
|
|
505
|
+
"help": "Number of successful rollbacks on reorg",
|
|
502
506
|
})
|
|
503
507
|
|
|
504
|
-
let
|
|
505
|
-
|
|
508
|
+
let increment = (~timeMillis: Hrtime.milliseconds) => {
|
|
509
|
+
timeCounter->PromClient.Counter.incMany(timeMillis->Hrtime.intFromMillis)
|
|
510
|
+
counter->PromClient.Counter.inc
|
|
511
|
+
}
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
module RollbackHistoryPrune = {
|
|
515
|
+
let entityNameLabelsSchema = S.object(s => s.field("entity", S.string))
|
|
516
|
+
|
|
517
|
+
let timeCounter = SafeCounter.makeOrThrow(
|
|
518
|
+
~name="envio_rollback_history_prune_time",
|
|
519
|
+
~help="The total time spent pruning entity history which is not in the reorg threshold. (milliseconds)",
|
|
520
|
+
~labelSchema=entityNameLabelsSchema,
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
let counter = SafeCounter.makeOrThrow(
|
|
524
|
+
~name="envio_rollback_history_prune_count",
|
|
525
|
+
~help="Number of successful entity history prunes",
|
|
526
|
+
~labelSchema=entityNameLabelsSchema,
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
let increment = (~timeMillis, ~entityName) => {
|
|
530
|
+
timeCounter->SafeCounter.handleInt(
|
|
531
|
+
~labels={entityName},
|
|
532
|
+
~value=timeMillis->Hrtime.intFromMillis,
|
|
533
|
+
)
|
|
534
|
+
counter->SafeCounter.increment(~labels={entityName})
|
|
506
535
|
}
|
|
507
536
|
}
|
|
508
537
|
|
package/src/Prometheus.res.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
// Generated by ReScript, PLEASE EDIT WITH CARE
|
|
2
2
|
'use strict';
|
|
3
3
|
|
|
4
|
+
var Hrtime = require("./bindings/Hrtime.res.js");
|
|
4
5
|
var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
5
6
|
var ChainMap = require("./ChainMap.res.js");
|
|
6
7
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
@@ -600,24 +601,45 @@ var RollbackEnabled = {
|
|
|
600
601
|
set: set$15
|
|
601
602
|
};
|
|
602
603
|
|
|
603
|
-
var
|
|
604
|
-
name: "
|
|
605
|
-
help: "Rollback on reorg
|
|
606
|
-
buckets: [
|
|
607
|
-
0.5,
|
|
608
|
-
1,
|
|
609
|
-
5,
|
|
610
|
-
10
|
|
611
|
-
]
|
|
604
|
+
var timeCounter = new PromClient.Counter({
|
|
605
|
+
name: "envio_rollback_time",
|
|
606
|
+
help: "Rollback on reorg total time in milliseconds"
|
|
612
607
|
});
|
|
613
608
|
|
|
614
|
-
|
|
615
|
-
|
|
609
|
+
var counter$4 = new PromClient.Counter({
|
|
610
|
+
name: "envio_rollback_count",
|
|
611
|
+
help: "Number of successful rollbacks on reorg"
|
|
612
|
+
});
|
|
613
|
+
|
|
614
|
+
function increment$3(timeMillis) {
|
|
615
|
+
timeCounter.inc(Hrtime.intFromMillis(timeMillis));
|
|
616
|
+
counter$4.inc();
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
var RollbackSuccess = {
|
|
620
|
+
timeCounter: timeCounter,
|
|
621
|
+
counter: counter$4,
|
|
622
|
+
increment: increment$3
|
|
623
|
+
};
|
|
624
|
+
|
|
625
|
+
var entityNameLabelsSchema = S$RescriptSchema.object(function (s) {
|
|
626
|
+
return s.f("entity", S$RescriptSchema.string);
|
|
627
|
+
});
|
|
628
|
+
|
|
629
|
+
var timeCounter$1 = makeOrThrow("envio_rollback_history_prune_time", "The total time spent pruning entity history which is not in the reorg threshold. (milliseconds)", entityNameLabelsSchema);
|
|
630
|
+
|
|
631
|
+
var counter$5 = makeOrThrow("envio_rollback_history_prune_count", "Number of successful entity history prunes", entityNameLabelsSchema);
|
|
632
|
+
|
|
633
|
+
function increment$4(timeMillis, entityName) {
|
|
634
|
+
handleInt(timeCounter$1, entityName, Hrtime.intFromMillis(timeMillis));
|
|
635
|
+
increment(counter$5, entityName);
|
|
616
636
|
}
|
|
617
637
|
|
|
618
|
-
var
|
|
619
|
-
|
|
620
|
-
|
|
638
|
+
var RollbackHistoryPrune = {
|
|
639
|
+
entityNameLabelsSchema: entityNameLabelsSchema,
|
|
640
|
+
timeCounter: timeCounter$1,
|
|
641
|
+
counter: counter$5,
|
|
642
|
+
increment: increment$4
|
|
621
643
|
};
|
|
622
644
|
|
|
623
645
|
var gauge$16 = makeOrThrow$1("envio_rollback_target_block_number", "The block number reorg was rollbacked to the last time.", chainIdLabelsSchema);
|
|
@@ -764,7 +786,8 @@ exports.ReorgCount = ReorgCount;
|
|
|
764
786
|
exports.ReorgDetectionBlockNumber = ReorgDetectionBlockNumber;
|
|
765
787
|
exports.ReorgThreshold = ReorgThreshold;
|
|
766
788
|
exports.RollbackEnabled = RollbackEnabled;
|
|
767
|
-
exports.
|
|
789
|
+
exports.RollbackSuccess = RollbackSuccess;
|
|
790
|
+
exports.RollbackHistoryPrune = RollbackHistoryPrune;
|
|
768
791
|
exports.RollbackTargetBlockNumber = RollbackTargetBlockNumber;
|
|
769
792
|
exports.ProcessingBlockNumber = ProcessingBlockNumber;
|
|
770
793
|
exports.ProcessingBatchSize = ProcessingBatchSize;
|
|
@@ -368,9 +368,20 @@ let queryRoute = Rest.route(() => {
|
|
|
368
368
|
responses: [s => s.data(ResponseTypes.queryResponseSchema)],
|
|
369
369
|
})
|
|
370
370
|
|
|
371
|
+
@unboxed
|
|
372
|
+
type heightResult = Value(int) | ErrorMessage(string)
|
|
373
|
+
|
|
371
374
|
let heightRoute = Rest.route(() => {
|
|
372
375
|
path: "/height",
|
|
373
376
|
method: Get,
|
|
374
377
|
input: s => s.auth(Bearer),
|
|
375
|
-
responses: [
|
|
378
|
+
responses: [
|
|
379
|
+
s =>
|
|
380
|
+
s.data(
|
|
381
|
+
S.union([
|
|
382
|
+
S.object(s => Value(s.field("height", S.int))),
|
|
383
|
+
S.string->S.shape(s => ErrorMessage(s)),
|
|
384
|
+
]),
|
|
385
|
+
),
|
|
386
|
+
],
|
|
376
387
|
})
|
|
@@ -247,7 +247,14 @@ function heightRoute() {
|
|
|
247
247
|
return s.auth("Bearer");
|
|
248
248
|
}),
|
|
249
249
|
responses: [(function (s) {
|
|
250
|
-
return s.
|
|
250
|
+
return s.data(S$RescriptSchema.union([
|
|
251
|
+
S$RescriptSchema.object(function (s) {
|
|
252
|
+
return s.f("height", S$RescriptSchema.$$int);
|
|
253
|
+
}),
|
|
254
|
+
S$RescriptSchema.shape(S$RescriptSchema.string, (function (s) {
|
|
255
|
+
return s;
|
|
256
|
+
}))
|
|
257
|
+
]));
|
|
251
258
|
})]
|
|
252
259
|
};
|
|
253
260
|
}
|