envio 2.27.6 → 2.28.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/rescript.json +3 -0
- package/src/Hasura.res +135 -12
- package/src/Hasura.res.js +95 -13
- package/src/Internal.res +7 -4
- package/src/InternalConfig.res +20 -0
- package/src/InternalConfig.res.js +2 -0
- package/src/Js.shim.ts +11 -0
- package/src/LoadManager.res +12 -6
- package/src/LoadManager.res.js +13 -6
- package/src/Persistence.res +25 -33
- package/src/Persistence.res.js +18 -20
- package/src/PgStorage.res +155 -101
- package/src/PgStorage.res.js +141 -100
- package/src/Prometheus.res +2 -2
- package/src/Prometheus.res.js +2 -3
- package/src/bindings/Pino.res +1 -1
- package/src/bindings/Pino.res.js +2 -1
- package/src/db/EntityHistory.res +18 -17
- package/src/db/EntityHistory.res.js +28 -26
- package/src/db/InternalTable.gen.ts +43 -0
- package/src/db/InternalTable.res +392 -0
- package/src/db/InternalTable.res.js +295 -0
- package/src/vendored/Rest.res +11 -2
- package/src/vendored/Rest.res.js +44 -35
package/src/Persistence.res.js
CHANGED
|
@@ -6,6 +6,7 @@ var Logging = require("./Logging.res.js");
|
|
|
6
6
|
var Prometheus = require("./Prometheus.res.js");
|
|
7
7
|
var EntityHistory = require("./db/EntityHistory.res.js");
|
|
8
8
|
var ErrorHandling = require("./ErrorHandling.res.js");
|
|
9
|
+
var InternalTable = require("./db/InternalTable.res.js");
|
|
9
10
|
var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
|
|
10
11
|
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
11
12
|
|
|
@@ -24,12 +25,11 @@ var entityHistoryActionEnumConfig = {
|
|
|
24
25
|
default: "SET"
|
|
25
26
|
};
|
|
26
27
|
|
|
27
|
-
function make(userEntities,
|
|
28
|
-
var allEntities = userEntities.concat([
|
|
28
|
+
function make(userEntities, allEnums, storage) {
|
|
29
|
+
var allEntities = userEntities.concat([InternalTable.DynamicContractRegistry.config]);
|
|
29
30
|
var allEnums$1 = allEnums.concat([entityHistoryActionEnumConfig]);
|
|
30
31
|
return {
|
|
31
32
|
userEntities: userEntities,
|
|
32
|
-
staticTables: staticTables,
|
|
33
33
|
allEntities: allEntities,
|
|
34
34
|
allEnums: allEnums$1,
|
|
35
35
|
storageStatus: "Unknown",
|
|
@@ -37,17 +37,7 @@ function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, stor
|
|
|
37
37
|
};
|
|
38
38
|
}
|
|
39
39
|
|
|
40
|
-
async function
|
|
41
|
-
var effectCacheRecords = await persistence.storage.restoreEffectCache(withUpload);
|
|
42
|
-
var cache = {};
|
|
43
|
-
effectCacheRecords.forEach(function (record) {
|
|
44
|
-
Prometheus.EffectCacheCount.set(record.count, record.effectName);
|
|
45
|
-
cache[record.effectName] = record;
|
|
46
|
-
});
|
|
47
|
-
return cache;
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
async function init(persistence, resetOpt) {
|
|
40
|
+
async function init(persistence, chainConfigs, resetOpt) {
|
|
51
41
|
var reset = resetOpt !== undefined ? resetOpt : false;
|
|
52
42
|
try {
|
|
53
43
|
var promise = persistence.storageStatus;
|
|
@@ -75,12 +65,11 @@ async function init(persistence, resetOpt) {
|
|
|
75
65
|
};
|
|
76
66
|
if (reset || !await persistence.storage.isInitialized()) {
|
|
77
67
|
Logging.info("Initializing the indexer storage...");
|
|
78
|
-
await persistence.storage.initialize(
|
|
68
|
+
var initialState = await persistence.storage.initialize(chainConfigs, persistence.allEntities, persistence.allEnums);
|
|
79
69
|
Logging.info("The indexer storage is ready. Uploading cache...");
|
|
80
70
|
persistence.storageStatus = {
|
|
81
71
|
TAG: "Ready",
|
|
82
|
-
|
|
83
|
-
cache: await loadInitialCache(persistence, true)
|
|
72
|
+
_0: initialState
|
|
84
73
|
};
|
|
85
74
|
} else {
|
|
86
75
|
var match = persistence.storageStatus;
|
|
@@ -90,8 +79,7 @@ async function init(persistence, resetOpt) {
|
|
|
90
79
|
Logging.info("The indexer storage is ready.");
|
|
91
80
|
persistence.storageStatus = {
|
|
92
81
|
TAG: "Ready",
|
|
93
|
-
|
|
94
|
-
cache: await loadInitialCache(persistence, false)
|
|
82
|
+
_0: await persistence.storage.loadInitialState()
|
|
95
83
|
};
|
|
96
84
|
}
|
|
97
85
|
|
|
@@ -113,6 +101,15 @@ function getInitializedStorageOrThrow(persistence) {
|
|
|
113
101
|
}
|
|
114
102
|
}
|
|
115
103
|
|
|
104
|
+
function getInitializedState(persistence) {
|
|
105
|
+
var initialState = persistence.storageStatus;
|
|
106
|
+
if (typeof initialState !== "object" || initialState.TAG === "Initializing") {
|
|
107
|
+
return Js_exn.raiseError("Failed to access the initial state. The Persistence layer is not initialized.");
|
|
108
|
+
} else {
|
|
109
|
+
return initialState._0;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
116
113
|
async function setEffectCacheOrThrow(persistence, effect, items) {
|
|
117
114
|
var match = persistence.storageStatus;
|
|
118
115
|
if (typeof match !== "object") {
|
|
@@ -121,7 +118,7 @@ async function setEffectCacheOrThrow(persistence, effect, items) {
|
|
|
121
118
|
if (match.TAG === "Initializing") {
|
|
122
119
|
return Js_exn.raiseError("Failed to access the indexer storage. The Persistence layer is not initialized.");
|
|
123
120
|
}
|
|
124
|
-
var cache = match.cache;
|
|
121
|
+
var cache = match._0.cache;
|
|
125
122
|
var storage = persistence.storage;
|
|
126
123
|
var effectName = effect.name;
|
|
127
124
|
var c = cache[effectName];
|
|
@@ -147,5 +144,6 @@ exports.entityHistoryActionEnumConfig = entityHistoryActionEnumConfig;
|
|
|
147
144
|
exports.make = make;
|
|
148
145
|
exports.init = init;
|
|
149
146
|
exports.getInitializedStorageOrThrow = getInitializedStorageOrThrow;
|
|
147
|
+
exports.getInitializedState = getInitializedState;
|
|
150
148
|
exports.setEffectCacheOrThrow = setEffectCacheOrThrow;
|
|
151
149
|
/* Logging Not a pure module */
|
package/src/PgStorage.res
CHANGED
|
@@ -57,11 +57,19 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
|
|
|
57
57
|
let makeInitializeTransaction = (
|
|
58
58
|
~pgSchema,
|
|
59
59
|
~pgUser,
|
|
60
|
-
~
|
|
60
|
+
~chainConfigs=[],
|
|
61
61
|
~entities=[],
|
|
62
62
|
~enums=[],
|
|
63
63
|
~isEmptyPgSchema=false,
|
|
64
64
|
) => {
|
|
65
|
+
let generalTables = [
|
|
66
|
+
InternalTable.EventSyncState.table,
|
|
67
|
+
InternalTable.Chains.table,
|
|
68
|
+
InternalTable.PersistedState.table,
|
|
69
|
+
InternalTable.EndOfBlockRangeScannedData.table,
|
|
70
|
+
InternalTable.RawEvents.table,
|
|
71
|
+
]
|
|
72
|
+
|
|
65
73
|
let allTables = generalTables->Array.copy
|
|
66
74
|
let allEntityTables = []
|
|
67
75
|
entities->Js.Array2.forEach((entity: Internal.entityConfig) => {
|
|
@@ -113,7 +121,8 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
|
113
121
|
|
|
114
122
|
// Add derived indices
|
|
115
123
|
entities->Js.Array2.forEach((entity: Internal.entityConfig) => {
|
|
116
|
-
functionsQuery :=
|
|
124
|
+
functionsQuery :=
|
|
125
|
+
functionsQuery.contents ++ "\n" ++ entity.entityHistory.makeInsertFnQuery(~pgSchema)
|
|
117
126
|
|
|
118
127
|
entity.table
|
|
119
128
|
->Table.getDerivedFromFields
|
|
@@ -131,6 +140,12 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
|
131
140
|
})
|
|
132
141
|
})
|
|
133
142
|
|
|
143
|
+
// Populate initial chain data
|
|
144
|
+
switch InternalTable.Chains.makeInitialValuesQuery(~pgSchema, ~chainConfigs) {
|
|
145
|
+
| Some(initialChainsValuesQuery) => query := query.contents ++ "\n" ++ initialChainsValuesQuery
|
|
146
|
+
| None => ()
|
|
147
|
+
}
|
|
148
|
+
|
|
134
149
|
// Add cache row count function
|
|
135
150
|
functionsQuery :=
|
|
136
151
|
functionsQuery.contents ++
|
|
@@ -162,6 +177,10 @@ let makeLoadByIdsQuery = (~pgSchema, ~tableName) => {
|
|
|
162
177
|
`SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = ANY($1::text[]);`
|
|
163
178
|
}
|
|
164
179
|
|
|
180
|
+
let makeLoadAllQuery = (~pgSchema, ~tableName) => {
|
|
181
|
+
`SELECT * FROM "${pgSchema}"."${tableName}";`
|
|
182
|
+
}
|
|
183
|
+
|
|
165
184
|
let makeInsertUnnestSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema, ~isRawEvents) => {
|
|
166
185
|
let {quotedFieldNames, quotedNonPrimaryFieldNames, arrayFieldTypes} =
|
|
167
186
|
table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
|
|
@@ -234,21 +253,19 @@ VALUES${placeholders.contents}` ++
|
|
|
234
253
|
} ++ ";"
|
|
235
254
|
}
|
|
236
255
|
|
|
237
|
-
// Should move this to a better place
|
|
238
|
-
// We need it for the isRawEvents check in makeTableBatchSet
|
|
239
|
-
// to always apply the unnest optimization.
|
|
240
|
-
// This is needed, because even though it has JSON fields,
|
|
241
|
-
// they are always guaranteed to be an object.
|
|
242
|
-
// FIXME what about Fuel params?
|
|
243
|
-
let rawEventsTableName = "raw_events"
|
|
244
|
-
let eventSyncStateTableName = "event_sync_state"
|
|
245
|
-
|
|
246
256
|
// Constants for chunking
|
|
247
257
|
let maxItemsPerQuery = 500
|
|
248
258
|
|
|
249
259
|
let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'item>) => {
|
|
250
260
|
let {dbSchema, hasArrayField} = table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
|
|
251
|
-
|
|
261
|
+
|
|
262
|
+
// Should move this to a better place
|
|
263
|
+
// We need it for the isRawEvents check in makeTableBatchSet
|
|
264
|
+
// to always apply the unnest optimization.
|
|
265
|
+
// This is needed, because even though it has JSON fields,
|
|
266
|
+
// they are always guaranteed to be an object.
|
|
267
|
+
// FIXME what about Fuel params?
|
|
268
|
+
let isRawEvents = table.tableName === InternalTable.RawEvents.table.tableName
|
|
252
269
|
|
|
253
270
|
// Should experiment how much it'll affect performance
|
|
254
271
|
// Although, it should be fine not to perform the validation check,
|
|
@@ -401,8 +418,7 @@ let setEntityHistoryOrThrow = (
|
|
|
401
418
|
~shouldCopyCurrentEntity=?,
|
|
402
419
|
~shouldRemoveInvalidUtf8=false,
|
|
403
420
|
) => {
|
|
404
|
-
rows
|
|
405
|
-
->Belt.Array.map(historyRow => {
|
|
421
|
+
rows->Belt.Array.map(historyRow => {
|
|
406
422
|
let row = historyRow->S.reverseConvertToJsonOrThrow(entityHistory.schema)
|
|
407
423
|
if shouldRemoveInvalidUtf8 {
|
|
408
424
|
[row]->removeInvalidUtf8InPlace
|
|
@@ -418,10 +434,19 @@ let setEntityHistoryOrThrow = (
|
|
|
418
434
|
!containsRollbackDiffChange
|
|
419
435
|
}
|
|
420
436
|
},
|
|
421
|
-
)
|
|
437
|
+
)->Promise.catch(exn => {
|
|
438
|
+
let reason = exn->Utils.prettifyExn
|
|
439
|
+
let detail = %raw(`reason?.detail || ""`)
|
|
440
|
+
raise(
|
|
441
|
+
Persistence.StorageError({
|
|
442
|
+
message: `Failed to insert history item into table "${entityHistory.table.tableName}".${detail !== ""
|
|
443
|
+
? ` Details: ${detail}`
|
|
444
|
+
: ""}`,
|
|
445
|
+
reason,
|
|
446
|
+
}),
|
|
447
|
+
)
|
|
448
|
+
})
|
|
422
449
|
})
|
|
423
|
-
->Promise.all
|
|
424
|
-
->(Utils.magic: promise<array<unit>> => promise<unit>)
|
|
425
450
|
}
|
|
426
451
|
|
|
427
452
|
type schemaTableName = {
|
|
@@ -539,12 +564,95 @@ let make = (
|
|
|
539
564
|
let isInitialized = async () => {
|
|
540
565
|
let envioTables =
|
|
541
566
|
await sql->Postgres.unsafe(
|
|
542
|
-
`SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND table_name = '${
|
|
567
|
+
`SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND table_name = '${InternalTable.EventSyncState.table.tableName}' OR table_name = '${InternalTable.Chains.table.tableName}';`,
|
|
543
568
|
)
|
|
544
569
|
envioTables->Utils.Array.notEmpty
|
|
545
570
|
}
|
|
546
571
|
|
|
547
|
-
let
|
|
572
|
+
let restoreEffectCache = async (~withUpload) => {
|
|
573
|
+
if withUpload {
|
|
574
|
+
// Try to restore cache tables from binary files
|
|
575
|
+
let nothingToUploadErrorMessage = "Nothing to upload."
|
|
576
|
+
|
|
577
|
+
switch await Promise.all2((
|
|
578
|
+
NodeJs.Fs.Promises.readdir(cacheDirPath)
|
|
579
|
+
->Promise.thenResolve(e => Ok(e))
|
|
580
|
+
->Promise.catch(_ => Promise.resolve(Error(nothingToUploadErrorMessage))),
|
|
581
|
+
getConnectedPsqlExec(~pgUser, ~pgHost, ~pgDatabase, ~pgPort),
|
|
582
|
+
)) {
|
|
583
|
+
| (Ok(entries), Ok(psqlExec)) => {
|
|
584
|
+
let cacheFiles = entries->Js.Array2.filter(entry => {
|
|
585
|
+
entry->Js.String2.endsWith(".tsv")
|
|
586
|
+
})
|
|
587
|
+
|
|
588
|
+
let _ =
|
|
589
|
+
await cacheFiles
|
|
590
|
+
->Js.Array2.map(entry => {
|
|
591
|
+
let effectName = entry->Js.String2.slice(~from=0, ~to_=-4) // Remove .tsv extension
|
|
592
|
+
let table = Internal.makeCacheTable(~effectName)
|
|
593
|
+
|
|
594
|
+
sql
|
|
595
|
+
->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema))
|
|
596
|
+
->Promise.then(() => {
|
|
597
|
+
let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString
|
|
598
|
+
|
|
599
|
+
let command = `${psqlExec} -c 'COPY "${pgSchema}"."${table.tableName}" FROM STDIN WITH (FORMAT text, HEADER);' < ${inputFile}`
|
|
600
|
+
|
|
601
|
+
Promise.make(
|
|
602
|
+
(resolve, reject) => {
|
|
603
|
+
NodeJs.ChildProcess.execWithOptions(
|
|
604
|
+
command,
|
|
605
|
+
psqlExecOptions,
|
|
606
|
+
(~error, ~stdout, ~stderr as _) => {
|
|
607
|
+
switch error {
|
|
608
|
+
| Value(error) => reject(error)
|
|
609
|
+
| Null => resolve(stdout)
|
|
610
|
+
}
|
|
611
|
+
},
|
|
612
|
+
)
|
|
613
|
+
},
|
|
614
|
+
)
|
|
615
|
+
})
|
|
616
|
+
})
|
|
617
|
+
->Promise.all
|
|
618
|
+
|
|
619
|
+
Logging.info("Successfully uploaded cache.")
|
|
620
|
+
}
|
|
621
|
+
| (Error(message), _)
|
|
622
|
+
| (_, Error(message)) =>
|
|
623
|
+
if message === nothingToUploadErrorMessage {
|
|
624
|
+
Logging.info("No cache found to upload.")
|
|
625
|
+
} else {
|
|
626
|
+
Logging.error(`Failed to upload cache, continuing without it. ${message}`)
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
let cacheTableInfo: array<schemaCacheTableInfo> =
|
|
632
|
+
await sql->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema))
|
|
633
|
+
|
|
634
|
+
if withUpload && cacheTableInfo->Utils.Array.notEmpty {
|
|
635
|
+
// Integration with other tools like Hasura
|
|
636
|
+
switch onNewTables {
|
|
637
|
+
| Some(onNewTables) =>
|
|
638
|
+
await onNewTables(
|
|
639
|
+
~tableNames=cacheTableInfo->Js.Array2.map(info => {
|
|
640
|
+
info.tableName
|
|
641
|
+
}),
|
|
642
|
+
)
|
|
643
|
+
| None => ()
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
let cache = Js.Dict.empty()
|
|
648
|
+
cacheTableInfo->Js.Array2.forEach(({tableName, count}) => {
|
|
649
|
+
let effectName = tableName->Js.String2.sliceToEnd(~from=cacheTablePrefixLength)
|
|
650
|
+
cache->Js.Dict.set(effectName, ({effectName, count}: Persistence.effectCacheRecord))
|
|
651
|
+
})
|
|
652
|
+
cache
|
|
653
|
+
}
|
|
654
|
+
|
|
655
|
+
let initialize = async (~chainConfigs=[], ~entities=[], ~enums=[]): Persistence.initialState => {
|
|
548
656
|
let schemaTableNames: array<schemaTableName> =
|
|
549
657
|
await sql->Postgres.unsafe(makeSchemaTableNamesQuery(~pgSchema))
|
|
550
658
|
|
|
@@ -557,7 +665,11 @@ let make = (
|
|
|
557
665
|
schemaTableNames->Utils.Array.notEmpty &&
|
|
558
666
|
// Otherwise should throw if there's a table, but no envio specific one
|
|
559
667
|
// This means that the schema is used for something else than envio.
|
|
560
|
-
!(
|
|
668
|
+
!(
|
|
669
|
+
schemaTableNames->Js.Array2.some(table =>
|
|
670
|
+
table.tableName === InternalTable.EventSyncState.table.tableName
|
|
671
|
+
)
|
|
672
|
+
)
|
|
561
673
|
) {
|
|
562
674
|
Js.Exn.raiseError(
|
|
563
675
|
`Cannot run Envio migrations on PostgreSQL schema "${pgSchema}" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: "pnpm envio local db-migrate down"\n2. Or specify a different schema name by setting the "ENVIO_PG_PUBLIC_SCHEMA" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.`,
|
|
@@ -567,9 +679,9 @@ let make = (
|
|
|
567
679
|
let queries = makeInitializeTransaction(
|
|
568
680
|
~pgSchema,
|
|
569
681
|
~pgUser,
|
|
570
|
-
~generalTables,
|
|
571
682
|
~entities,
|
|
572
683
|
~enums,
|
|
684
|
+
~chainConfigs,
|
|
573
685
|
~isEmptyPgSchema=schemaTableNames->Utils.Array.isEmpty,
|
|
574
686
|
)
|
|
575
687
|
// Execute all queries within a single transaction for integrity
|
|
@@ -577,11 +689,19 @@ let make = (
|
|
|
577
689
|
queries->Js.Array2.map(query => sql->Postgres.unsafe(query))
|
|
578
690
|
})
|
|
579
691
|
|
|
692
|
+
let cache = await restoreEffectCache(~withUpload=true)
|
|
693
|
+
|
|
580
694
|
// Integration with other tools like Hasura
|
|
581
695
|
switch onInitialize {
|
|
582
696
|
| Some(onInitialize) => await onInitialize()
|
|
583
697
|
| None => ()
|
|
584
698
|
}
|
|
699
|
+
|
|
700
|
+
{
|
|
701
|
+
cleanRun: true,
|
|
702
|
+
cache,
|
|
703
|
+
chains: chainConfigs->Js.Array2.map(InternalTable.Chains.initialFromConfig),
|
|
704
|
+
}
|
|
585
705
|
}
|
|
586
706
|
|
|
587
707
|
let loadByIdsOrThrow = async (~ids, ~table: Table.table, ~rowsSchema) => {
|
|
@@ -767,97 +887,31 @@ let make = (
|
|
|
767
887
|
}
|
|
768
888
|
}
|
|
769
889
|
|
|
770
|
-
let
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
getConnectedPsqlExec(~pgUser, ~pgHost, ~pgDatabase, ~pgPort),
|
|
780
|
-
)) {
|
|
781
|
-
| (Ok(entries), Ok(psqlExec)) => {
|
|
782
|
-
let cacheFiles = entries->Js.Array2.filter(entry => {
|
|
783
|
-
entry->Js.String2.endsWith(".tsv")
|
|
784
|
-
})
|
|
785
|
-
|
|
786
|
-
let _ =
|
|
787
|
-
await cacheFiles
|
|
788
|
-
->Js.Array2.map(entry => {
|
|
789
|
-
let effectName = entry->Js.String2.slice(~from=0, ~to_=-4) // Remove .tsv extension
|
|
790
|
-
let table = Internal.makeCacheTable(~effectName)
|
|
791
|
-
|
|
792
|
-
sql
|
|
793
|
-
->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema))
|
|
794
|
-
->Promise.then(() => {
|
|
795
|
-
let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString
|
|
796
|
-
|
|
797
|
-
let command = `${psqlExec} -c 'COPY "${pgSchema}"."${table.tableName}" FROM STDIN WITH (FORMAT text, HEADER);' < ${inputFile}`
|
|
798
|
-
|
|
799
|
-
Promise.make(
|
|
800
|
-
(resolve, reject) => {
|
|
801
|
-
NodeJs.ChildProcess.execWithOptions(
|
|
802
|
-
command,
|
|
803
|
-
psqlExecOptions,
|
|
804
|
-
(~error, ~stdout, ~stderr as _) => {
|
|
805
|
-
switch error {
|
|
806
|
-
| Value(error) => reject(error)
|
|
807
|
-
| Null => resolve(stdout)
|
|
808
|
-
}
|
|
809
|
-
},
|
|
810
|
-
)
|
|
811
|
-
},
|
|
812
|
-
)
|
|
813
|
-
})
|
|
814
|
-
})
|
|
815
|
-
->Promise.all
|
|
816
|
-
|
|
817
|
-
Logging.info("Successfully uploaded cache.")
|
|
818
|
-
}
|
|
819
|
-
| (Error(message), _)
|
|
820
|
-
| (_, Error(message)) =>
|
|
821
|
-
if message === nothingToUploadErrorMessage {
|
|
822
|
-
Logging.info("No cache found to upload.")
|
|
823
|
-
} else {
|
|
824
|
-
Logging.error(`Failed to upload cache, continuing without it. ${message}`)
|
|
825
|
-
}
|
|
826
|
-
}
|
|
827
|
-
}
|
|
828
|
-
|
|
829
|
-
let cacheTableInfo: array<schemaCacheTableInfo> =
|
|
830
|
-
await sql->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema))
|
|
890
|
+
let loadInitialState = async (): Persistence.initialState => {
|
|
891
|
+
let (cache, chains) = await Promise.all2((
|
|
892
|
+
restoreEffectCache(~withUpload=false),
|
|
893
|
+
sql
|
|
894
|
+
->Postgres.unsafe(
|
|
895
|
+
makeLoadAllQuery(~pgSchema, ~tableName=InternalTable.Chains.table.tableName),
|
|
896
|
+
)
|
|
897
|
+
->(Utils.magic: promise<array<unknown>> => promise<array<InternalTable.Chains.t>>),
|
|
898
|
+
))
|
|
831
899
|
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
await onNewTables(
|
|
837
|
-
~tableNames=cacheTableInfo->Js.Array2.map(info => {
|
|
838
|
-
info.tableName
|
|
839
|
-
}),
|
|
840
|
-
)
|
|
841
|
-
| None => ()
|
|
842
|
-
}
|
|
900
|
+
{
|
|
901
|
+
cleanRun: false,
|
|
902
|
+
cache,
|
|
903
|
+
chains,
|
|
843
904
|
}
|
|
844
|
-
|
|
845
|
-
cacheTableInfo->Js.Array2.map((info): Persistence.effectCacheRecord => {
|
|
846
|
-
{
|
|
847
|
-
effectName: info.tableName->Js.String2.sliceToEnd(~from=cacheTablePrefixLength),
|
|
848
|
-
count: info.count,
|
|
849
|
-
}
|
|
850
|
-
})
|
|
851
905
|
}
|
|
852
906
|
|
|
853
907
|
{
|
|
854
908
|
isInitialized,
|
|
855
909
|
initialize,
|
|
910
|
+
loadInitialState,
|
|
856
911
|
loadByFieldOrThrow,
|
|
857
912
|
loadByIdsOrThrow,
|
|
858
913
|
setOrThrow,
|
|
859
914
|
setEffectCacheOrThrow,
|
|
860
915
|
dumpEffectCache,
|
|
861
|
-
restoreEffectCache,
|
|
862
916
|
}
|
|
863
917
|
}
|