envio 2.27.6 → 2.28.0-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/rescript.json +3 -0
- package/src/FetchState.res +21 -14
- package/src/FetchState.res.js +14 -5
- package/src/Hasura.res +31 -12
- package/src/Hasura.res.js +31 -13
- package/src/Internal.res +7 -4
- package/src/InternalConfig.res +20 -0
- package/src/InternalConfig.res.js +2 -0
- package/src/Js.shim.ts +11 -0
- package/src/LoadManager.res +12 -6
- package/src/LoadManager.res.js +13 -6
- package/src/Persistence.res +25 -33
- package/src/Persistence.res.js +18 -20
- package/src/PgStorage.res +162 -102
- package/src/PgStorage.res.js +146 -103
- package/src/Prometheus.res +2 -2
- package/src/Prometheus.res.js +2 -3
- package/src/bindings/Pino.res +1 -1
- package/src/bindings/Pino.res.js +2 -1
- package/src/bindings/Postgres.res +1 -1
- package/src/db/EntityHistory.res +18 -17
- package/src/db/EntityHistory.res.js +28 -26
- package/src/db/InternalTable.gen.ts +43 -0
- package/src/db/InternalTable.res +430 -0
- package/src/db/InternalTable.res.js +315 -0
- package/src/vendored/Rest.res +11 -2
- package/src/vendored/Rest.res.js +44 -35
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.
|
|
3
|
+
"version": "v2.28.0-alpha.2",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.
|
|
29
|
-
"envio-linux-arm64": "v2.
|
|
30
|
-
"envio-darwin-x64": "v2.
|
|
31
|
-
"envio-darwin-arm64": "v2.
|
|
28
|
+
"envio-linux-x64": "v2.28.0-alpha.2",
|
|
29
|
+
"envio-linux-arm64": "v2.28.0-alpha.2",
|
|
30
|
+
"envio-darwin-x64": "v2.28.0-alpha.2",
|
|
31
|
+
"envio-darwin-arm64": "v2.28.0-alpha.2"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.5",
|
package/rescript.json
CHANGED
package/src/FetchState.res
CHANGED
|
@@ -818,20 +818,27 @@ let getNextQuery = (
|
|
|
818
818
|
if (
|
|
819
819
|
p->checkIsFetchingPartition->not && p.latestFetchedBlock.blockNumber < maxQueryBlockNumber
|
|
820
820
|
) {
|
|
821
|
-
switch
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
821
|
+
let endBlock = switch blockLag {
|
|
822
|
+
| 0 => endBlock
|
|
823
|
+
| _ =>
|
|
824
|
+
switch endBlock {
|
|
825
|
+
| Some(endBlock) => Some(Pervasives.min(headBlock, endBlock))
|
|
826
|
+
// Force head block as an endBlock when blockLag is set
|
|
827
|
+
// because otherwise HyperSync might return bigger range
|
|
828
|
+
| None => Some(headBlock)
|
|
829
|
+
}
|
|
830
|
+
}
|
|
831
|
+
// Enforce the respose range up until target block
|
|
832
|
+
// Otherwise for indexers with 100+ partitions
|
|
833
|
+
// we might blow up the buffer size to more than 600k events
|
|
834
|
+
// simply because of HyperSync returning extra blocks
|
|
835
|
+
let endBlock = switch (endBlock, maxQueryBlockNumber < currentBlockHeight) {
|
|
836
|
+
| (Some(endBlock), true) => Some(Pervasives.min(maxQueryBlockNumber, endBlock))
|
|
837
|
+
| (None, true) => Some(maxQueryBlockNumber)
|
|
838
|
+
| (_, false) => endBlock
|
|
839
|
+
}
|
|
840
|
+
|
|
841
|
+
switch p->makePartitionQuery(~indexingContracts, ~endBlock, ~mergeTarget) {
|
|
835
842
|
| Some(q) => queries->Array.push(q)
|
|
836
843
|
| None => ()
|
|
837
844
|
}
|
package/src/FetchState.res.js
CHANGED
|
@@ -571,11 +571,20 @@ function getNextQuery(param, concurrencyLimit, targetBufferSize, currentBlockHei
|
|
|
571
571
|
if (!(!checkIsFetchingPartition(p) && p.latestFetchedBlock.blockNumber < maxQueryBlockNumber)) {
|
|
572
572
|
return ;
|
|
573
573
|
}
|
|
574
|
-
var
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
574
|
+
var endBlock$1 = blockLag !== 0 ? (
|
|
575
|
+
endBlock !== undefined ? (
|
|
576
|
+
headBlock < endBlock ? headBlock : endBlock
|
|
577
|
+
) : headBlock
|
|
578
|
+
) : endBlock;
|
|
579
|
+
var match = maxQueryBlockNumber < currentBlockHeight;
|
|
580
|
+
var endBlock$2 = endBlock$1 !== undefined ? (
|
|
581
|
+
match ? (
|
|
582
|
+
maxQueryBlockNumber < endBlock$1 ? maxQueryBlockNumber : endBlock$1
|
|
583
|
+
) : endBlock$1
|
|
584
|
+
) : (
|
|
585
|
+
match ? maxQueryBlockNumber : endBlock$1
|
|
586
|
+
);
|
|
587
|
+
var q = makePartitionQuery(p, indexingContracts, endBlock$2, mergeTarget);
|
|
579
588
|
if (q !== undefined) {
|
|
580
589
|
queries.push(q);
|
|
581
590
|
return ;
|
package/src/Hasura.res
CHANGED
|
@@ -222,24 +222,43 @@ let trackDatabase = async (
|
|
|
222
222
|
~endpoint,
|
|
223
223
|
~auth,
|
|
224
224
|
~pgSchema,
|
|
225
|
-
~
|
|
226
|
-
~allEntityTables,
|
|
225
|
+
~userEntities: array<Internal.entityConfig>,
|
|
227
226
|
~aggregateEntities,
|
|
228
227
|
~responseLimit,
|
|
229
228
|
~schema,
|
|
230
229
|
) => {
|
|
230
|
+
let trackOnlyInternalTableNames = [
|
|
231
|
+
InternalTable.Chains.table.tableName,
|
|
232
|
+
InternalTable.EventSyncState.table.tableName,
|
|
233
|
+
InternalTable.PersistedState.table.tableName,
|
|
234
|
+
InternalTable.EndOfBlockRangeScannedData.table.tableName,
|
|
235
|
+
InternalTable.DynamicContractRegistry.table.tableName,
|
|
236
|
+
]
|
|
237
|
+
let exposedInternalTableNames = [
|
|
238
|
+
InternalTable.RawEvents.table.tableName,
|
|
239
|
+
InternalTable.Views.metaViewName,
|
|
240
|
+
InternalTable.Views.chainMetadataViewName,
|
|
241
|
+
]
|
|
242
|
+
let userTableNames = userEntities->Js.Array2.map(entity => entity.table.tableName)
|
|
243
|
+
|
|
231
244
|
Logging.info("Tracking tables in Hasura")
|
|
232
245
|
|
|
233
246
|
let _ = await clearHasuraMetadata(~endpoint, ~auth)
|
|
234
|
-
let tableNames =
|
|
235
|
-
[allStaticTables, allEntityTables]
|
|
236
|
-
->Belt.Array.concatMany
|
|
237
|
-
->Js.Array2.map(({tableName}: Table.table) => tableName)
|
|
238
247
|
|
|
239
|
-
await trackTables(
|
|
248
|
+
await trackTables(
|
|
249
|
+
~endpoint,
|
|
250
|
+
~auth,
|
|
251
|
+
~pgSchema,
|
|
252
|
+
~tableNames=[
|
|
253
|
+
exposedInternalTableNames,
|
|
254
|
+
trackOnlyInternalTableNames,
|
|
255
|
+
userTableNames,
|
|
256
|
+
]->Belt.Array.concatMany,
|
|
257
|
+
)
|
|
240
258
|
|
|
241
259
|
let _ =
|
|
242
|
-
await
|
|
260
|
+
await [exposedInternalTableNames, userTableNames]
|
|
261
|
+
->Belt.Array.concatMany
|
|
243
262
|
->Js.Array2.map(tableName =>
|
|
244
263
|
createSelectPermissions(
|
|
245
264
|
~endpoint,
|
|
@@ -251,11 +270,11 @@ let trackDatabase = async (
|
|
|
251
270
|
)
|
|
252
271
|
)
|
|
253
272
|
->Js.Array2.concatMany(
|
|
254
|
-
|
|
255
|
-
let {tableName} = table
|
|
273
|
+
userEntities->Js.Array2.map(entityConfig => {
|
|
274
|
+
let {tableName} = entityConfig.table
|
|
256
275
|
[
|
|
257
276
|
//Set array relationships
|
|
258
|
-
table
|
|
277
|
+
entityConfig.table
|
|
259
278
|
->Table.getDerivedFromFields
|
|
260
279
|
->Js.Array2.map(derivedFromField => {
|
|
261
280
|
//determines the actual name of the underlying relational field (if it's an entity mapping then suffixes _id for eg.)
|
|
@@ -275,7 +294,7 @@ let trackDatabase = async (
|
|
|
275
294
|
)
|
|
276
295
|
}),
|
|
277
296
|
//Set object relationships
|
|
278
|
-
table
|
|
297
|
+
entityConfig.table
|
|
279
298
|
->Table.getLinkedEntityFields
|
|
280
299
|
->Js.Array2.map(((field, linkedEntityName)) => {
|
|
281
300
|
createEntityRelationship(
|
package/src/Hasura.res.js
CHANGED
|
@@ -7,6 +7,7 @@ var Utils = require("./Utils.res.js");
|
|
|
7
7
|
var Schema = require("./db/Schema.res.js");
|
|
8
8
|
var Logging = require("./Logging.res.js");
|
|
9
9
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
10
|
+
var InternalTable = require("./db/InternalTable.res.js");
|
|
10
11
|
var Caml_splice_call = require("rescript/lib/js/caml_splice_call.js");
|
|
11
12
|
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
12
13
|
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
@@ -203,26 +204,43 @@ async function createEntityRelationship(pgSchema, endpoint, auth, tableName, rel
|
|
|
203
204
|
}
|
|
204
205
|
}
|
|
205
206
|
|
|
206
|
-
async function trackDatabase(endpoint, auth, pgSchema,
|
|
207
|
+
async function trackDatabase(endpoint, auth, pgSchema, userEntities, aggregateEntities, responseLimit, schema) {
|
|
208
|
+
var trackOnlyInternalTableNames = [
|
|
209
|
+
InternalTable.Chains.table.tableName,
|
|
210
|
+
InternalTable.EventSyncState.table.tableName,
|
|
211
|
+
InternalTable.PersistedState.table.tableName,
|
|
212
|
+
InternalTable.EndOfBlockRangeScannedData.table.tableName,
|
|
213
|
+
InternalTable.DynamicContractRegistry.table.tableName
|
|
214
|
+
];
|
|
215
|
+
var exposedInternalTableNames = [
|
|
216
|
+
InternalTable.RawEvents.table.tableName,
|
|
217
|
+
InternalTable.Views.metaViewName,
|
|
218
|
+
InternalTable.Views.chainMetadataViewName
|
|
219
|
+
];
|
|
220
|
+
var userTableNames = userEntities.map(function (entity) {
|
|
221
|
+
return entity.table.tableName;
|
|
222
|
+
});
|
|
207
223
|
Logging.info("Tracking tables in Hasura");
|
|
208
224
|
await clearHasuraMetadata(endpoint, auth);
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
225
|
+
await trackTables(endpoint, auth, pgSchema, Belt_Array.concatMany([
|
|
226
|
+
exposedInternalTableNames,
|
|
227
|
+
trackOnlyInternalTableNames,
|
|
228
|
+
userTableNames
|
|
229
|
+
]));
|
|
230
|
+
await Promise.all(Caml_splice_call.spliceObjApply(Belt_Array.concatMany([
|
|
231
|
+
exposedInternalTableNames,
|
|
232
|
+
userTableNames
|
|
233
|
+
]).map(function (tableName) {
|
|
217
234
|
return createSelectPermissions(auth, endpoint, tableName, pgSchema, responseLimit, aggregateEntities);
|
|
218
|
-
}), "concat", [
|
|
219
|
-
var
|
|
235
|
+
}), "concat", [userEntities.map(function (entityConfig) {
|
|
236
|
+
var match = entityConfig.table;
|
|
237
|
+
var tableName = match.tableName;
|
|
220
238
|
return [
|
|
221
|
-
Table.getDerivedFromFields(table).map(function (derivedFromField) {
|
|
239
|
+
Table.getDerivedFromFields(entityConfig.table).map(function (derivedFromField) {
|
|
222
240
|
var relationalFieldName = Utils.unwrapResultExn(Schema.getDerivedFromFieldName(schema, derivedFromField));
|
|
223
241
|
return createEntityRelationship(pgSchema, endpoint, auth, tableName, "array", relationalFieldName, derivedFromField.fieldName, derivedFromField.derivedFromEntity, true);
|
|
224
242
|
}),
|
|
225
|
-
Table.getLinkedEntityFields(table).map(function (param) {
|
|
243
|
+
Table.getLinkedEntityFields(entityConfig.table).map(function (param) {
|
|
226
244
|
var field = param[0];
|
|
227
245
|
return createEntityRelationship(pgSchema, endpoint, auth, tableName, "object", field.fieldName, field.fieldName, param[1], false);
|
|
228
246
|
})
|
package/src/Internal.res
CHANGED
|
@@ -176,13 +176,16 @@ let fuelTransferParamsSchema = S.schema(s => {
|
|
|
176
176
|
})
|
|
177
177
|
|
|
178
178
|
type entity = private {id: string}
|
|
179
|
-
type
|
|
179
|
+
type genericEntityConfig<'entity> = {
|
|
180
180
|
name: string,
|
|
181
|
-
schema: S.t<entity>,
|
|
182
|
-
rowsSchema: S.t<array<entity>>,
|
|
181
|
+
schema: S.t<'entity>,
|
|
182
|
+
rowsSchema: S.t<array<'entity>>,
|
|
183
183
|
table: Table.table,
|
|
184
|
-
entityHistory: EntityHistory.t<entity>,
|
|
184
|
+
entityHistory: EntityHistory.t<'entity>,
|
|
185
185
|
}
|
|
186
|
+
type entityConfig = genericEntityConfig<entity>
|
|
187
|
+
external fromGenericEntityConfig: genericEntityConfig<'entity> => entityConfig = "%identity"
|
|
188
|
+
|
|
186
189
|
type enum
|
|
187
190
|
type enumConfig<'enum> = {
|
|
188
191
|
name: string,
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
// TODO: rename the file to Config.res after finishing the migration from codegen
|
|
2
|
+
// And turn it into PublicConfig instead
|
|
3
|
+
// For internal use we should create Indexer.res with a stateful type
|
|
4
|
+
|
|
5
|
+
type contract = {
|
|
6
|
+
name: string,
|
|
7
|
+
abi: EvmTypes.Abi.t,
|
|
8
|
+
addresses: array<Address.t>,
|
|
9
|
+
events: array<Internal.eventConfig>,
|
|
10
|
+
startBlock: option<int>,
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
type chain = {
|
|
14
|
+
id: int,
|
|
15
|
+
startBlock: int,
|
|
16
|
+
endBlock?: int,
|
|
17
|
+
confirmedBlockThreshold: int,
|
|
18
|
+
contracts: array<contract>,
|
|
19
|
+
sources: array<Source.t>,
|
|
20
|
+
}
|
package/src/Js.shim.ts
ADDED
package/src/LoadManager.res
CHANGED
|
@@ -66,9 +66,10 @@ let schedule = async loadManager => {
|
|
|
66
66
|
}
|
|
67
67
|
})
|
|
68
68
|
|
|
69
|
-
if inputsToLoad->Utils.Array.isEmpty->not {
|
|
69
|
+
let isSuccess = if inputsToLoad->Utils.Array.isEmpty->not {
|
|
70
70
|
try {
|
|
71
71
|
await group.load(inputsToLoad)
|
|
72
|
+
true
|
|
72
73
|
} catch {
|
|
73
74
|
| exn => {
|
|
74
75
|
let exn = exn->Utils.prettifyExn
|
|
@@ -76,16 +77,21 @@ let schedule = async loadManager => {
|
|
|
76
77
|
let call = calls->Js.Dict.unsafeGet(inputKey)
|
|
77
78
|
call.reject(exn)
|
|
78
79
|
})
|
|
80
|
+
false
|
|
79
81
|
}
|
|
80
82
|
}
|
|
83
|
+
} else {
|
|
84
|
+
true
|
|
81
85
|
}
|
|
82
86
|
|
|
83
87
|
if currentInputKeys->Utils.Array.isEmpty->not {
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
88
|
+
if isSuccess {
|
|
89
|
+
currentInputKeys->Js.Array2.forEach(inputKey => {
|
|
90
|
+
let call = calls->Js.Dict.unsafeGet(inputKey)
|
|
91
|
+
calls->Utils.Dict.deleteInPlace(inputKey)
|
|
92
|
+
call.resolve(group.getUnsafeInMemory(inputKey))
|
|
93
|
+
})
|
|
94
|
+
}
|
|
89
95
|
|
|
90
96
|
// Clean up executed batch to reset
|
|
91
97
|
// provided load function which
|
package/src/LoadManager.res.js
CHANGED
|
@@ -40,9 +40,13 @@ async function schedule(loadManager) {
|
|
|
40
40
|
}
|
|
41
41
|
|
|
42
42
|
});
|
|
43
|
-
|
|
43
|
+
var isSuccess;
|
|
44
|
+
if (Utils.$$Array.isEmpty(inputsToLoad)) {
|
|
45
|
+
isSuccess = true;
|
|
46
|
+
} else {
|
|
44
47
|
try {
|
|
45
48
|
await group.load(inputsToLoad);
|
|
49
|
+
isSuccess = true;
|
|
46
50
|
}
|
|
47
51
|
catch (raw_exn){
|
|
48
52
|
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
@@ -51,16 +55,19 @@ async function schedule(loadManager) {
|
|
|
51
55
|
var call = calls[inputKey];
|
|
52
56
|
call.reject(exn$1);
|
|
53
57
|
}));
|
|
58
|
+
isSuccess = false;
|
|
54
59
|
}
|
|
55
60
|
}
|
|
56
61
|
if (Utils.$$Array.isEmpty(currentInputKeys)) {
|
|
57
62
|
return ;
|
|
58
63
|
}
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
+
if (isSuccess) {
|
|
65
|
+
currentInputKeys.forEach(function (inputKey) {
|
|
66
|
+
var call = calls[inputKey];
|
|
67
|
+
Utils.Dict.deleteInPlace(calls, inputKey);
|
|
68
|
+
call.resolve(group.getUnsafeInMemory(inputKey));
|
|
69
|
+
});
|
|
70
|
+
}
|
|
64
71
|
var latestGroup = groups[key];
|
|
65
72
|
if (Utils.$$Array.isEmpty(Object.keys(latestGroup.calls))) {
|
|
66
73
|
return Utils.Dict.deleteInPlace(groups, key);
|
package/src/Persistence.res
CHANGED
|
@@ -13,6 +13,12 @@ type effectCacheRecord = {
|
|
|
13
13
|
mutable count: int,
|
|
14
14
|
}
|
|
15
15
|
|
|
16
|
+
type initialState = {
|
|
17
|
+
cleanRun: bool,
|
|
18
|
+
cache: dict<effectCacheRecord>,
|
|
19
|
+
chains: array<InternalTable.Chains.t>,
|
|
20
|
+
}
|
|
21
|
+
|
|
16
22
|
type operator = [#">" | #"="]
|
|
17
23
|
|
|
18
24
|
type storage = {
|
|
@@ -22,10 +28,11 @@ type storage = {
|
|
|
22
28
|
// Should initialize the storage so we can start interacting with it
|
|
23
29
|
// Eg create connection, schema, tables, etc.
|
|
24
30
|
initialize: (
|
|
31
|
+
~chainConfigs: array<InternalConfig.chain>=?,
|
|
25
32
|
~entities: array<Internal.entityConfig>=?,
|
|
26
|
-
~generalTables: array<Table.table>=?,
|
|
27
33
|
~enums: array<Internal.enumConfig<Internal.enum>>=?,
|
|
28
|
-
) => promise<
|
|
34
|
+
) => promise<initialState>,
|
|
35
|
+
loadInitialState: unit => promise<initialState>,
|
|
29
36
|
@raises("StorageError")
|
|
30
37
|
loadByIdsOrThrow: 'item. (
|
|
31
38
|
~ids: array<string>,
|
|
@@ -55,10 +62,6 @@ type storage = {
|
|
|
55
62
|
) => promise<unit>,
|
|
56
63
|
// This is to download cache from the database to .envio/cache
|
|
57
64
|
dumpEffectCache: unit => promise<unit>,
|
|
58
|
-
// This is not good, but the function does two things:
|
|
59
|
-
// - Gets info about existing cache tables
|
|
60
|
-
// - if withUpload is true, it also populates the cache from .envio/cache to the database
|
|
61
|
-
restoreEffectCache: (~withUpload: bool) => promise<array<effectCacheRecord>>,
|
|
62
65
|
}
|
|
63
66
|
|
|
64
67
|
exception StorageError({message: string, reason: exn})
|
|
@@ -66,11 +69,10 @@ exception StorageError({message: string, reason: exn})
|
|
|
66
69
|
type storageStatus =
|
|
67
70
|
| Unknown
|
|
68
71
|
| Initializing(promise<unit>)
|
|
69
|
-
| Ready(
|
|
72
|
+
| Ready(initialState)
|
|
70
73
|
|
|
71
74
|
type t = {
|
|
72
75
|
userEntities: array<Internal.entityConfig>,
|
|
73
|
-
staticTables: array<Table.table>,
|
|
74
76
|
allEntities: array<Internal.entityConfig>,
|
|
75
77
|
allEnums: array<Internal.enumConfig<Internal.enum>>,
|
|
76
78
|
mutable storageStatus: storageStatus,
|
|
@@ -86,18 +88,15 @@ let entityHistoryActionEnumConfig: Internal.enumConfig<EntityHistory.RowAction.t
|
|
|
86
88
|
|
|
87
89
|
let make = (
|
|
88
90
|
~userEntities,
|
|
89
|
-
~dcRegistryEntityConfig,
|
|
90
91
|
// TODO: Should only pass userEnums and create internal config in runtime
|
|
91
92
|
~allEnums,
|
|
92
|
-
~staticTables,
|
|
93
93
|
~storage,
|
|
94
94
|
) => {
|
|
95
|
-
let allEntities = userEntities->Js.Array2.concat([
|
|
95
|
+
let allEntities = userEntities->Js.Array2.concat([InternalTable.DynamicContractRegistry.config])
|
|
96
96
|
let allEnums =
|
|
97
97
|
allEnums->Js.Array2.concat([entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig])
|
|
98
98
|
{
|
|
99
99
|
userEntities,
|
|
100
|
-
staticTables,
|
|
101
100
|
allEntities,
|
|
102
101
|
allEnums,
|
|
103
102
|
storageStatus: Unknown,
|
|
@@ -106,17 +105,7 @@ let make = (
|
|
|
106
105
|
}
|
|
107
106
|
|
|
108
107
|
let init = {
|
|
109
|
-
|
|
110
|
-
let effectCacheRecords = await persistence.storage.restoreEffectCache(~withUpload)
|
|
111
|
-
let cache = Js.Dict.empty()
|
|
112
|
-
effectCacheRecords->Js.Array2.forEach(record => {
|
|
113
|
-
Prometheus.EffectCacheCount.set(~count=record.count, ~effectName=record.effectName)
|
|
114
|
-
cache->Js.Dict.set(record.effectName, record)
|
|
115
|
-
})
|
|
116
|
-
cache
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
async (persistence, ~reset=false) => {
|
|
108
|
+
async (persistence, ~chainConfigs, ~reset=false) => {
|
|
120
109
|
try {
|
|
121
110
|
let shouldRun = switch persistence.storageStatus {
|
|
122
111
|
| Unknown => true
|
|
@@ -135,17 +124,14 @@ let init = {
|
|
|
135
124
|
if reset || !(await persistence.storage.isInitialized()) {
|
|
136
125
|
Logging.info(`Initializing the indexer storage...`)
|
|
137
126
|
|
|
138
|
-
await persistence.storage.initialize(
|
|
127
|
+
let initialState = await persistence.storage.initialize(
|
|
139
128
|
~entities=persistence.allEntities,
|
|
140
|
-
~generalTables=persistence.staticTables,
|
|
141
129
|
~enums=persistence.allEnums,
|
|
130
|
+
~chainConfigs,
|
|
142
131
|
)
|
|
143
132
|
|
|
144
133
|
Logging.info(`The indexer storage is ready. Uploading cache...`)
|
|
145
|
-
persistence.storageStatus = Ready(
|
|
146
|
-
cleanRun: true,
|
|
147
|
-
cache: await loadInitialCache(persistence, ~withUpload=true),
|
|
148
|
-
})
|
|
134
|
+
persistence.storageStatus = Ready(initialState)
|
|
149
135
|
} else if (
|
|
150
136
|
// In case of a race condition,
|
|
151
137
|
// we want to set the initial status to Ready only once.
|
|
@@ -155,10 +141,7 @@ let init = {
|
|
|
155
141
|
}
|
|
156
142
|
) {
|
|
157
143
|
Logging.info(`The indexer storage is ready.`)
|
|
158
|
-
persistence.storageStatus = Ready(
|
|
159
|
-
cleanRun: false,
|
|
160
|
-
cache: await loadInitialCache(persistence, ~withUpload=false),
|
|
161
|
-
})
|
|
144
|
+
persistence.storageStatus = Ready(await persistence.storage.loadInitialState())
|
|
162
145
|
}
|
|
163
146
|
resolveRef.contents()
|
|
164
147
|
}
|
|
@@ -178,6 +161,15 @@ let getInitializedStorageOrThrow = persistence => {
|
|
|
178
161
|
}
|
|
179
162
|
}
|
|
180
163
|
|
|
164
|
+
let getInitializedState = persistence => {
|
|
165
|
+
switch persistence.storageStatus {
|
|
166
|
+
| Unknown
|
|
167
|
+
| Initializing(_) =>
|
|
168
|
+
Js.Exn.raiseError(`Failed to access the initial state. The Persistence layer is not initialized.`)
|
|
169
|
+
| Ready(initialState) => initialState
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
181
173
|
let setEffectCacheOrThrow = async (persistence, ~effect: Internal.effect, ~items) => {
|
|
182
174
|
switch persistence.storageStatus {
|
|
183
175
|
| Unknown
|
package/src/Persistence.res.js
CHANGED
|
@@ -6,6 +6,7 @@ var Logging = require("./Logging.res.js");
|
|
|
6
6
|
var Prometheus = require("./Prometheus.res.js");
|
|
7
7
|
var EntityHistory = require("./db/EntityHistory.res.js");
|
|
8
8
|
var ErrorHandling = require("./ErrorHandling.res.js");
|
|
9
|
+
var InternalTable = require("./db/InternalTable.res.js");
|
|
9
10
|
var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
|
|
10
11
|
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
11
12
|
|
|
@@ -24,12 +25,11 @@ var entityHistoryActionEnumConfig = {
|
|
|
24
25
|
default: "SET"
|
|
25
26
|
};
|
|
26
27
|
|
|
27
|
-
function make(userEntities,
|
|
28
|
-
var allEntities = userEntities.concat([
|
|
28
|
+
function make(userEntities, allEnums, storage) {
|
|
29
|
+
var allEntities = userEntities.concat([InternalTable.DynamicContractRegistry.config]);
|
|
29
30
|
var allEnums$1 = allEnums.concat([entityHistoryActionEnumConfig]);
|
|
30
31
|
return {
|
|
31
32
|
userEntities: userEntities,
|
|
32
|
-
staticTables: staticTables,
|
|
33
33
|
allEntities: allEntities,
|
|
34
34
|
allEnums: allEnums$1,
|
|
35
35
|
storageStatus: "Unknown",
|
|
@@ -37,17 +37,7 @@ function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, stor
|
|
|
37
37
|
};
|
|
38
38
|
}
|
|
39
39
|
|
|
40
|
-
async function
|
|
41
|
-
var effectCacheRecords = await persistence.storage.restoreEffectCache(withUpload);
|
|
42
|
-
var cache = {};
|
|
43
|
-
effectCacheRecords.forEach(function (record) {
|
|
44
|
-
Prometheus.EffectCacheCount.set(record.count, record.effectName);
|
|
45
|
-
cache[record.effectName] = record;
|
|
46
|
-
});
|
|
47
|
-
return cache;
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
async function init(persistence, resetOpt) {
|
|
40
|
+
async function init(persistence, chainConfigs, resetOpt) {
|
|
51
41
|
var reset = resetOpt !== undefined ? resetOpt : false;
|
|
52
42
|
try {
|
|
53
43
|
var promise = persistence.storageStatus;
|
|
@@ -75,12 +65,11 @@ async function init(persistence, resetOpt) {
|
|
|
75
65
|
};
|
|
76
66
|
if (reset || !await persistence.storage.isInitialized()) {
|
|
77
67
|
Logging.info("Initializing the indexer storage...");
|
|
78
|
-
await persistence.storage.initialize(
|
|
68
|
+
var initialState = await persistence.storage.initialize(chainConfigs, persistence.allEntities, persistence.allEnums);
|
|
79
69
|
Logging.info("The indexer storage is ready. Uploading cache...");
|
|
80
70
|
persistence.storageStatus = {
|
|
81
71
|
TAG: "Ready",
|
|
82
|
-
|
|
83
|
-
cache: await loadInitialCache(persistence, true)
|
|
72
|
+
_0: initialState
|
|
84
73
|
};
|
|
85
74
|
} else {
|
|
86
75
|
var match = persistence.storageStatus;
|
|
@@ -90,8 +79,7 @@ async function init(persistence, resetOpt) {
|
|
|
90
79
|
Logging.info("The indexer storage is ready.");
|
|
91
80
|
persistence.storageStatus = {
|
|
92
81
|
TAG: "Ready",
|
|
93
|
-
|
|
94
|
-
cache: await loadInitialCache(persistence, false)
|
|
82
|
+
_0: await persistence.storage.loadInitialState()
|
|
95
83
|
};
|
|
96
84
|
}
|
|
97
85
|
|
|
@@ -113,6 +101,15 @@ function getInitializedStorageOrThrow(persistence) {
|
|
|
113
101
|
}
|
|
114
102
|
}
|
|
115
103
|
|
|
104
|
+
function getInitializedState(persistence) {
|
|
105
|
+
var initialState = persistence.storageStatus;
|
|
106
|
+
if (typeof initialState !== "object" || initialState.TAG === "Initializing") {
|
|
107
|
+
return Js_exn.raiseError("Failed to access the initial state. The Persistence layer is not initialized.");
|
|
108
|
+
} else {
|
|
109
|
+
return initialState._0;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
116
113
|
async function setEffectCacheOrThrow(persistence, effect, items) {
|
|
117
114
|
var match = persistence.storageStatus;
|
|
118
115
|
if (typeof match !== "object") {
|
|
@@ -121,7 +118,7 @@ async function setEffectCacheOrThrow(persistence, effect, items) {
|
|
|
121
118
|
if (match.TAG === "Initializing") {
|
|
122
119
|
return Js_exn.raiseError("Failed to access the indexer storage. The Persistence layer is not initialized.");
|
|
123
120
|
}
|
|
124
|
-
var cache = match.cache;
|
|
121
|
+
var cache = match._0.cache;
|
|
125
122
|
var storage = persistence.storage;
|
|
126
123
|
var effectName = effect.name;
|
|
127
124
|
var c = cache[effectName];
|
|
@@ -147,5 +144,6 @@ exports.entityHistoryActionEnumConfig = entityHistoryActionEnumConfig;
|
|
|
147
144
|
exports.make = make;
|
|
148
145
|
exports.init = init;
|
|
149
146
|
exports.getInitializedStorageOrThrow = getInitializedStorageOrThrow;
|
|
147
|
+
exports.getInitializedState = getInitializedState;
|
|
150
148
|
exports.setEffectCacheOrThrow = setEffectCacheOrThrow;
|
|
151
149
|
/* Logging Not a pure module */
|