envio 2.30.1 → 2.31.0-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/Internal.gen.ts +2 -0
- package/src/Internal.res +13 -1
- package/src/PgStorage.res +2 -2
- package/src/PgStorage.res.js +1 -1
- package/src/sources/RpcSource.res +13 -6
- package/src/sources/RpcSource.res.js +19 -5
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.
|
|
3
|
+
"version": "v2.31.0-alpha.0",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.
|
|
29
|
-
"envio-linux-arm64": "v2.
|
|
30
|
-
"envio-darwin-x64": "v2.
|
|
31
|
-
"envio-darwin-arm64": "v2.
|
|
28
|
+
"envio-linux-x64": "v2.31.0-alpha.0",
|
|
29
|
+
"envio-linux-arm64": "v2.31.0-alpha.0",
|
|
30
|
+
"envio-darwin-x64": "v2.31.0-alpha.0",
|
|
31
|
+
"envio-darwin-arm64": "v2.31.0-alpha.0"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.6",
|
package/src/Internal.gen.ts
CHANGED
|
@@ -42,6 +42,8 @@ export type entityHandlerContext<entity> = {
|
|
|
42
42
|
readonly deleteUnsafe: (_1:string) => void
|
|
43
43
|
};
|
|
44
44
|
|
|
45
|
+
export type chainInfo = { readonly isReady: boolean };
|
|
46
|
+
|
|
45
47
|
export type genericHandlerWithLoader<loader,handler,eventFilters> = {
|
|
46
48
|
readonly loader: loader;
|
|
47
49
|
readonly handler: handler;
|
package/src/Internal.res
CHANGED
|
@@ -54,8 +54,20 @@ type entityHandlerContext<'entity> = {
|
|
|
54
54
|
deleteUnsafe: string => unit,
|
|
55
55
|
}
|
|
56
56
|
|
|
57
|
+
@genType
|
|
58
|
+
type chainInfo = {
|
|
59
|
+
// true when the chain has completed initial sync and is processing live events
|
|
60
|
+
// false during historical synchronization
|
|
61
|
+
isReady: bool,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
type chains = dict<chainInfo>
|
|
65
|
+
|
|
57
66
|
type loaderReturn
|
|
58
|
-
type handlerContext = private {
|
|
67
|
+
type handlerContext = private {
|
|
68
|
+
isPreload: bool,
|
|
69
|
+
chains: chains,
|
|
70
|
+
}
|
|
59
71
|
type handlerArgs = {
|
|
60
72
|
event: event,
|
|
61
73
|
context: handlerContext,
|
package/src/PgStorage.res
CHANGED
|
@@ -566,8 +566,8 @@ let make = (
|
|
|
566
566
|
|
|
567
567
|
let isInitialized = async () => {
|
|
568
568
|
let envioTables = await sql->Postgres.unsafe(
|
|
569
|
-
`SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND table_name = '${// This is for indexer before envio@2.28
|
|
570
|
-
"event_sync_state"}' OR table_name = '${InternalTable.Chains.table.tableName}';`,
|
|
569
|
+
`SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND (table_name = '${// This is for indexer before envio@2.28
|
|
570
|
+
"event_sync_state"}' OR table_name = '${InternalTable.Chains.table.tableName}');`,
|
|
571
571
|
)
|
|
572
572
|
envioTables->Utils.Array.notEmpty
|
|
573
573
|
}
|
package/src/PgStorage.res.js
CHANGED
|
@@ -394,7 +394,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
|
|
|
394
394
|
};
|
|
395
395
|
var cacheDirPath = Path.resolve("..", ".envio", "cache");
|
|
396
396
|
var isInitialized = async function () {
|
|
397
|
-
var envioTables = await sql.unsafe("SELECT table_schema FROM information_schema.tables WHERE table_schema = '" + pgSchema + "' AND table_name = 'event_sync_state' OR table_name = '" + InternalTable.Chains.table.tableName + "';");
|
|
397
|
+
var envioTables = await sql.unsafe("SELECT table_schema FROM information_schema.tables WHERE table_schema = '" + pgSchema + "' AND (table_name = 'event_sync_state' OR table_name = '" + InternalTable.Chains.table.tableName + "');");
|
|
398
398
|
return Utils.$$Array.notEmpty(envioTables);
|
|
399
399
|
};
|
|
400
400
|
var restoreEffectCache = async function (withUpload) {
|
|
@@ -27,7 +27,7 @@ let rec getKnownBlockWithBackoff = async (
|
|
|
27
27
|
switch await getKnownBlock(provider, blockNumber) {
|
|
28
28
|
| exception err =>
|
|
29
29
|
Logging.warn({
|
|
30
|
-
"err": err,
|
|
30
|
+
"err": err->Utils.prettifyExn,
|
|
31
31
|
"msg": `Issue while running fetching batch of events from the RPC. Will wait ${backoffMsOnFailure->Belt.Int.toString}ms and try again.`,
|
|
32
32
|
"source": sourceName,
|
|
33
33
|
"chainId": chain->ChainMap.Chain.toChainId,
|
|
@@ -47,10 +47,17 @@ let rec getKnownBlockWithBackoff = async (
|
|
|
47
47
|
// NOTE: this is wasteful if these fields are not selected in the users config.
|
|
48
48
|
// There might be a better way to do this based on the block schema.
|
|
49
49
|
// However this is not extremely expensive and good enough for now (only on rpc sync also).
|
|
50
|
-
|
|
51
|
-
|
|
50
|
+
|
|
51
|
+
{
|
|
52
|
+
...result,
|
|
53
|
+
// Mutation would be cheaper,
|
|
54
|
+
// BUT "result" is an Ethers.js Block object,
|
|
55
|
+
// which has the fields as readonly.
|
|
56
|
+
miner: result.miner->Address.Evm.fromAddressLowercaseOrThrow,
|
|
57
|
+
}
|
|
58
|
+
} else {
|
|
59
|
+
result
|
|
52
60
|
}
|
|
53
|
-
result
|
|
54
61
|
}
|
|
55
62
|
let getSuggestedBlockIntervalFromExn = {
|
|
56
63
|
// Unknown provider: "retry with the range 123-456"
|
|
@@ -500,7 +507,7 @@ let make = (
|
|
|
500
507
|
~loaderFn=transactionHash => provider->Ethers.JsonRpcProvider.getTransaction(~transactionHash),
|
|
501
508
|
~onError=(am, ~exn) => {
|
|
502
509
|
Logging.error({
|
|
503
|
-
"err": exn,
|
|
510
|
+
"err": exn->Utils.prettifyExn,
|
|
504
511
|
"msg": `EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in ${(am._retryDelayMillis / 1000)
|
|
505
512
|
->Belt.Int.toString} seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the "suggestedFix" in the metadata of this command`,
|
|
506
513
|
"source": name,
|
|
@@ -527,7 +534,7 @@ let make = (
|
|
|
527
534
|
),
|
|
528
535
|
~onError=(am, ~exn) => {
|
|
529
536
|
Logging.error({
|
|
530
|
-
"err": exn,
|
|
537
|
+
"err": exn->Utils.prettifyExn,
|
|
531
538
|
"msg": `EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in ${(am._retryDelayMillis / 1000)
|
|
532
539
|
->Belt.Int.toString} seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the "suggestedFix" in the metadata of this command`,
|
|
533
540
|
"source": name,
|
|
@@ -48,7 +48,7 @@ async function getKnownBlockWithBackoff(provider, sourceName, chain, blockNumber
|
|
|
48
48
|
catch (raw_err){
|
|
49
49
|
var err = Caml_js_exceptions.internalToOCamlException(raw_err);
|
|
50
50
|
Logging.warn({
|
|
51
|
-
err: err,
|
|
51
|
+
err: Utils.prettifyExn(err),
|
|
52
52
|
msg: "Issue while running fetching batch of events from the RPC. Will wait " + String(backoffMsOnFailure) + "ms and try again.",
|
|
53
53
|
source: sourceName,
|
|
54
54
|
chainId: chain,
|
|
@@ -58,9 +58,23 @@ async function getKnownBlockWithBackoff(provider, sourceName, chain, blockNumber
|
|
|
58
58
|
return await getKnownBlockWithBackoff(provider, sourceName, chain, blockNumber, (backoffMsOnFailure << 1), lowercaseAddresses);
|
|
59
59
|
}
|
|
60
60
|
if (lowercaseAddresses) {
|
|
61
|
-
|
|
61
|
+
return {
|
|
62
|
+
_difficulty: result._difficulty,
|
|
63
|
+
difficulty: result.difficulty,
|
|
64
|
+
extraData: result.extraData,
|
|
65
|
+
gasLimit: result.gasLimit,
|
|
66
|
+
gasUsed: result.gasUsed,
|
|
67
|
+
hash: result.hash,
|
|
68
|
+
miner: Address.Evm.fromAddressLowercaseOrThrow(result.miner),
|
|
69
|
+
nonce: result.nonce,
|
|
70
|
+
number: result.number,
|
|
71
|
+
parentHash: result.parentHash,
|
|
72
|
+
timestamp: result.timestamp,
|
|
73
|
+
transactions: result.transactions
|
|
74
|
+
};
|
|
75
|
+
} else {
|
|
76
|
+
return result;
|
|
62
77
|
}
|
|
63
|
-
return result;
|
|
64
78
|
}
|
|
65
79
|
|
|
66
80
|
var suggestedRangeRegExp = /retry with the range (\d+)-(\d+)/;
|
|
@@ -509,7 +523,7 @@ function make(param) {
|
|
|
509
523
|
return provider.getTransaction(transactionHash);
|
|
510
524
|
}), (function (am, exn) {
|
|
511
525
|
Logging.error({
|
|
512
|
-
err: exn,
|
|
526
|
+
err: Utils.prettifyExn(exn),
|
|
513
527
|
msg: "EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in " + String(am._retryDelayMillis / 1000 | 0) + " seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the \"suggestedFix\" in the metadata of this command",
|
|
514
528
|
source: name,
|
|
515
529
|
chainId: chain,
|
|
@@ -523,7 +537,7 @@ function make(param) {
|
|
|
523
537
|
return getKnownBlockWithBackoff(provider, name, chain, blockNumber, 1000, lowercaseAddresses);
|
|
524
538
|
}), (function (am, exn) {
|
|
525
539
|
Logging.error({
|
|
526
|
-
err: exn,
|
|
540
|
+
err: Utils.prettifyExn(exn),
|
|
527
541
|
msg: "EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in " + String(am._retryDelayMillis / 1000 | 0) + " seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the \"suggestedFix\" in the metadata of this command",
|
|
528
542
|
source: name,
|
|
529
543
|
chainId: chain,
|