envio 2.26.0-alpha.5 → 2.26.0-alpha.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +2 -21
- package/index.js +1 -0
- package/package.json +5 -5
- package/src/Envio.res +10 -1
- package/src/Envio.res.js +7 -1
- package/src/Internal.res +1 -0
- package/src/Persistence.res +2 -2
- package/src/Persistence.res.js +2 -2
- package/src/PgStorage.res +99 -70
- package/src/PgStorage.res.js +120 -80
package/index.d.ts
CHANGED
|
@@ -35,26 +35,6 @@ type UnknownToOutput<T> = T extends Sury.Schema<unknown>
|
|
|
35
35
|
>
|
|
36
36
|
: T;
|
|
37
37
|
|
|
38
|
-
type UnknownToInput<T> = T extends Sury.Schema<unknown>
|
|
39
|
-
? Sury.Input<T>
|
|
40
|
-
: T extends (...args: any[]) => any
|
|
41
|
-
? T
|
|
42
|
-
: T extends unknown[]
|
|
43
|
-
? { [K in keyof T]: UnknownToInput<T[K]> }
|
|
44
|
-
: T extends { [k in keyof T]: unknown }
|
|
45
|
-
? Flatten<
|
|
46
|
-
{
|
|
47
|
-
[k in keyof T as HasUndefined<UnknownToInput<T[k]>> extends true
|
|
48
|
-
? k
|
|
49
|
-
: never]?: UnknownToInput<T[k]>;
|
|
50
|
-
} & {
|
|
51
|
-
[k in keyof T as HasUndefined<UnknownToInput<T[k]>> extends true
|
|
52
|
-
? never
|
|
53
|
-
: k]: UnknownToInput<T[k]>;
|
|
54
|
-
}
|
|
55
|
-
>
|
|
56
|
-
: T;
|
|
57
|
-
|
|
58
38
|
type HasUndefined<T> = [T] extends [undefined]
|
|
59
39
|
? true
|
|
60
40
|
: undefined extends T
|
|
@@ -92,7 +72,7 @@ type Flatten<T> = T extends object
|
|
|
92
72
|
export function experimental_createEffect<
|
|
93
73
|
IS,
|
|
94
74
|
OS,
|
|
95
|
-
I =
|
|
75
|
+
I = UnknownToOutput<IS>,
|
|
96
76
|
O = UnknownToOutput<OS>,
|
|
97
77
|
// A hack to enforce that the inferred return type
|
|
98
78
|
// matches the output schema type
|
|
@@ -130,6 +110,7 @@ export declare namespace S {
|
|
|
130
110
|
// Don't expose recursive for now, since it's too advanced
|
|
131
111
|
// export const recursive: typeof Sury.recursive;
|
|
132
112
|
export const transform: typeof Sury.transform;
|
|
113
|
+
export const shape: typeof Sury.shape;
|
|
133
114
|
export const refine: typeof Sury.refine;
|
|
134
115
|
export const schema: typeof Sury.schema;
|
|
135
116
|
export const record: typeof Sury.record;
|
package/index.js
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.26.0-alpha.
|
|
3
|
+
"version": "v2.26.0-alpha.7",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.26.0-alpha.
|
|
29
|
-
"envio-linux-arm64": "v2.26.0-alpha.
|
|
30
|
-
"envio-darwin-x64": "v2.26.0-alpha.
|
|
31
|
-
"envio-darwin-arm64": "v2.26.0-alpha.
|
|
28
|
+
"envio-linux-x64": "v2.26.0-alpha.7",
|
|
29
|
+
"envio-linux-arm64": "v2.26.0-alpha.7",
|
|
30
|
+
"envio-darwin-x64": "v2.26.0-alpha.7",
|
|
31
|
+
"envio-darwin-arm64": "v2.26.0-alpha.7"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.5",
|
package/src/Envio.res
CHANGED
|
@@ -50,7 +50,16 @@ let experimental_createEffect = (
|
|
|
50
50
|
>
|
|
51
51
|
),
|
|
52
52
|
callsCount: 0,
|
|
53
|
-
|
|
53
|
+
// This is the way to make the createEffect API
|
|
54
|
+
// work without the need for users to call S.schema themselves,
|
|
55
|
+
// but simply pass the desired object/tuple/etc.
|
|
56
|
+
// If they pass a schem, it'll also work.
|
|
57
|
+
input: S.schema(_ => options.input)->(
|
|
58
|
+
Utils.magic: S.t<S.t<'input>> => S.t<Internal.effectInput>
|
|
59
|
+
),
|
|
60
|
+
output: S.schema(_ => options.output)->(
|
|
61
|
+
Utils.magic: S.t<S.t<'output>> => S.t<Internal.effectOutput>
|
|
62
|
+
),
|
|
54
63
|
cache: options.cache->Belt.Option.getWithDefault(false),
|
|
55
64
|
}->(Utils.magic: Internal.effect => effect<'input, 'output>)
|
|
56
65
|
}
|
package/src/Envio.res.js
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
var Prometheus = require("./Prometheus.res.js");
|
|
5
5
|
var Belt_Option = require("rescript/lib/js/belt_Option.js");
|
|
6
|
+
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
6
7
|
|
|
7
8
|
function experimental_createEffect(options, handler) {
|
|
8
9
|
Prometheus.EffectCallsCount.set(0, options.name);
|
|
@@ -10,7 +11,12 @@ function experimental_createEffect(options, handler) {
|
|
|
10
11
|
name: options.name,
|
|
11
12
|
handler: handler,
|
|
12
13
|
cache: Belt_Option.getWithDefault(options.cache, false),
|
|
13
|
-
output:
|
|
14
|
+
output: S$RescriptSchema.schema(function (param) {
|
|
15
|
+
return options.output;
|
|
16
|
+
}),
|
|
17
|
+
input: S$RescriptSchema.schema(function (param) {
|
|
18
|
+
return options.input;
|
|
19
|
+
}),
|
|
14
20
|
callsCount: 0
|
|
15
21
|
};
|
|
16
22
|
}
|
package/src/Internal.res
CHANGED
package/src/Persistence.res
CHANGED
|
@@ -139,7 +139,7 @@ let init = {
|
|
|
139
139
|
~enums=persistence.allEnums,
|
|
140
140
|
)
|
|
141
141
|
|
|
142
|
-
Logging.info(`The indexer storage is ready.
|
|
142
|
+
Logging.info(`The indexer storage is ready. Uploading cache...`)
|
|
143
143
|
persistence.storageStatus = Ready({
|
|
144
144
|
cleanRun: true,
|
|
145
145
|
cache: await loadInitialCache(persistence, ~withUpload=true),
|
|
@@ -152,7 +152,7 @@ let init = {
|
|
|
152
152
|
| _ => false
|
|
153
153
|
}
|
|
154
154
|
) {
|
|
155
|
-
Logging.info(`The indexer storage is
|
|
155
|
+
Logging.info(`The indexer storage is ready.`)
|
|
156
156
|
persistence.storageStatus = Ready({
|
|
157
157
|
cleanRun: false,
|
|
158
158
|
cache: await loadInitialCache(persistence, ~withUpload=false),
|
package/src/Persistence.res.js
CHANGED
|
@@ -76,7 +76,7 @@ async function init(persistence, resetOpt) {
|
|
|
76
76
|
if (reset || !await persistence.storage.isInitialized()) {
|
|
77
77
|
Logging.info("Initializing the indexer storage...");
|
|
78
78
|
await persistence.storage.initialize(persistence.allEntities, persistence.staticTables, persistence.allEnums);
|
|
79
|
-
Logging.info("The indexer storage is ready.
|
|
79
|
+
Logging.info("The indexer storage is ready. Uploading cache...");
|
|
80
80
|
persistence.storageStatus = {
|
|
81
81
|
TAG: "Ready",
|
|
82
82
|
cleanRun: true,
|
|
@@ -87,7 +87,7 @@ async function init(persistence, resetOpt) {
|
|
|
87
87
|
var tmp;
|
|
88
88
|
tmp = typeof match !== "object" || match.TAG !== "Initializing" ? false : true;
|
|
89
89
|
if (tmp) {
|
|
90
|
-
Logging.info("The indexer storage is
|
|
90
|
+
Logging.info("The indexer storage is ready.");
|
|
91
91
|
persistence.storageStatus = {
|
|
92
92
|
TAG: "Ready",
|
|
93
93
|
cleanRun: false,
|
package/src/PgStorage.res
CHANGED
|
@@ -387,7 +387,7 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
|
|
|
387
387
|
raise(
|
|
388
388
|
Persistence.StorageError({
|
|
389
389
|
message: `Failed to insert items into table "${table.tableName}"`,
|
|
390
|
-
reason: exn,
|
|
390
|
+
reason: exn->Internal.prettifyExn,
|
|
391
391
|
}),
|
|
392
392
|
)
|
|
393
393
|
}
|
|
@@ -455,14 +455,19 @@ let makeSchemaCacheTableInfoQuery = (~pgSchema) => {
|
|
|
455
455
|
type psqlExecState =
|
|
456
456
|
Unknown | Pending(promise<result<string, string>>) | Resolved(result<string, string>)
|
|
457
457
|
|
|
458
|
-
let
|
|
458
|
+
let getConnectedPsqlExec = {
|
|
459
|
+
let pgDockerServiceName = "envio-postgres"
|
|
460
|
+
// Should use the default port, since we're executing the command
|
|
461
|
+
// from the postgres container's network.
|
|
462
|
+
let pgDockerServicePort = 5432
|
|
463
|
+
|
|
459
464
|
// For development: We run the indexer process locally,
|
|
460
465
|
// and there might not be psql installed on the user's machine.
|
|
461
466
|
// So we use docker-compose to run psql existing in the postgres container.
|
|
462
467
|
// For production: We expect indexer to be running in a container,
|
|
463
468
|
// with psql installed. So we can call it directly.
|
|
464
469
|
let psqlExecState = ref(Unknown)
|
|
465
|
-
async (~pgUser, ~pgHost) => {
|
|
470
|
+
async (~pgUser, ~pgHost, ~pgDatabase, ~pgPort) => {
|
|
466
471
|
switch psqlExecState.contents {
|
|
467
472
|
| Unknown => {
|
|
468
473
|
let promise = Promise.make((resolve, _reject) => {
|
|
@@ -470,18 +475,31 @@ let getPsqlExec = {
|
|
|
470
475
|
NodeJs.ChildProcess.exec(`${binary} --version`, (~error, ~stdout as _, ~stderr as _) => {
|
|
471
476
|
switch error {
|
|
472
477
|
| Value(_) => {
|
|
473
|
-
let binary = `docker-compose exec -T -u ${pgUser} ${
|
|
478
|
+
let binary = `docker-compose exec -T -u ${pgUser} ${pgDockerServiceName} psql`
|
|
474
479
|
NodeJs.ChildProcess.exec(
|
|
475
480
|
`${binary} --version`,
|
|
476
481
|
(~error, ~stdout as _, ~stderr as _) => {
|
|
477
482
|
switch error {
|
|
478
|
-
| Value(_) =>
|
|
479
|
-
|
|
483
|
+
| Value(_) =>
|
|
484
|
+
resolve(
|
|
485
|
+
Error(`Please check if "psql" binary is installed or docker-compose is running for the local indexer.`),
|
|
486
|
+
)
|
|
487
|
+
| Null =>
|
|
488
|
+
resolve(
|
|
489
|
+
Ok(
|
|
490
|
+
`${binary} -h ${pgHost} -p ${pgDockerServicePort->Js.Int.toString} -U ${pgUser} -d ${pgDatabase}`,
|
|
491
|
+
),
|
|
492
|
+
)
|
|
480
493
|
}
|
|
481
494
|
},
|
|
482
495
|
)
|
|
483
496
|
}
|
|
484
|
-
| Null =>
|
|
497
|
+
| Null =>
|
|
498
|
+
resolve(
|
|
499
|
+
Ok(
|
|
500
|
+
`${binary} -h ${pgHost} -p ${pgPort->Js.Int.toString} -U ${pgUser} -d ${pgDatabase}`,
|
|
501
|
+
),
|
|
502
|
+
)
|
|
485
503
|
}
|
|
486
504
|
})
|
|
487
505
|
})
|
|
@@ -496,12 +514,12 @@ let getPsqlExec = {
|
|
|
496
514
|
}
|
|
497
515
|
}
|
|
498
516
|
}
|
|
499
|
-
let psqlExecMissingErrorMessage = `Please check if "psql" binary is installed or docker-compose is running for the local indexer.`
|
|
500
517
|
|
|
501
518
|
let make = (
|
|
502
519
|
~sql: Postgres.sql,
|
|
503
520
|
~pgHost,
|
|
504
521
|
~pgSchema,
|
|
522
|
+
~pgPort,
|
|
505
523
|
~pgUser,
|
|
506
524
|
~pgDatabase,
|
|
507
525
|
~pgPassword,
|
|
@@ -509,7 +527,7 @@ let make = (
|
|
|
509
527
|
~onNewTables=?,
|
|
510
528
|
): Persistence.storage => {
|
|
511
529
|
let psqlExecOptions: NodeJs.ChildProcess.execOptions = {
|
|
512
|
-
env: Js.Dict.fromArray([("PGPASSWORD", pgPassword)]),
|
|
530
|
+
env: Js.Dict.fromArray([("PGPASSWORD", pgPassword), ("PATH", %raw(`process.env.PATH`))]),
|
|
513
531
|
}
|
|
514
532
|
|
|
515
533
|
let cacheDirPath = NodeJs.Path.resolve([
|
|
@@ -673,7 +691,7 @@ let make = (
|
|
|
673
691
|
cacheTablePrefix ++ effectName,
|
|
674
692
|
~fields=[
|
|
675
693
|
Table.mkField("id", Text, ~fieldSchema=S.string, ~isPrimaryKey=true),
|
|
676
|
-
Table.mkField("output", JsonB, ~fieldSchema=
|
|
694
|
+
Table.mkField("output", JsonB, ~fieldSchema=S.json(~validate=false)),
|
|
677
695
|
],
|
|
678
696
|
~compositeIndices=[],
|
|
679
697
|
)
|
|
@@ -709,73 +727,79 @@ let make = (
|
|
|
709
727
|
}
|
|
710
728
|
|
|
711
729
|
let dumpEffectCache = async () => {
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
730
|
+
try {
|
|
731
|
+
let cacheTableInfo: array<schemaCacheTableInfo> =
|
|
732
|
+
(await sql
|
|
733
|
+
->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema)))
|
|
734
|
+
->Js.Array2.filter(i => i.count > 0)
|
|
735
|
+
|
|
736
|
+
if cacheTableInfo->Utils.Array.notEmpty {
|
|
737
|
+
// Create .envio/cache directory if it doesn't exist
|
|
738
|
+
try {
|
|
739
|
+
await NodeJs.Fs.Promises.access(cacheDirPath)
|
|
740
|
+
} catch {
|
|
741
|
+
| _ =>
|
|
742
|
+
// Create directory if it doesn't exist
|
|
743
|
+
await NodeJs.Fs.Promises.mkdir(~path=cacheDirPath, ~options={recursive: true})
|
|
744
|
+
}
|
|
726
745
|
|
|
727
|
-
|
|
728
|
-
|
|
746
|
+
// Command for testing. Run from generated
|
|
747
|
+
// docker-compose exec -T -u postgres envio-postgres psql -d envio-dev -c 'COPY "public"."envio_effect_getTokenMetadata" TO STDOUT (FORMAT text, HEADER);' > ../.envio/cache/getTokenMetadata.tsv
|
|
729
748
|
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
749
|
+
switch await getConnectedPsqlExec(~pgUser, ~pgHost, ~pgDatabase, ~pgPort) {
|
|
750
|
+
| Ok(psqlExec) => {
|
|
751
|
+
Logging.info(
|
|
752
|
+
`Dumping cache: ${cacheTableInfo
|
|
753
|
+
->Js.Array2.map(({tableName, count}) =>
|
|
754
|
+
tableName ++ " (" ++ count->Belt.Int.toString ++ " rows)"
|
|
755
|
+
)
|
|
756
|
+
->Js.Array2.joinWith(", ")}`,
|
|
757
|
+
)
|
|
739
758
|
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
759
|
+
let promises = cacheTableInfo->Js.Array2.map(async ({tableName}) => {
|
|
760
|
+
let cacheName = tableName->Js.String2.sliceToEnd(~from=cacheTablePrefixLength)
|
|
761
|
+
let outputFile =
|
|
762
|
+
NodeJs.Path.join(cacheDirPath, cacheName ++ ".tsv")->NodeJs.Path.toString
|
|
763
|
+
|
|
764
|
+
let command = `${psqlExec} -c 'COPY "${pgSchema}"."${tableName}" TO STDOUT WITH (FORMAT text, HEADER);' > ${outputFile}`
|
|
765
|
+
|
|
766
|
+
Promise.make((resolve, reject) => {
|
|
767
|
+
NodeJs.ChildProcess.execWithOptions(
|
|
768
|
+
command,
|
|
769
|
+
psqlExecOptions,
|
|
770
|
+
(~error, ~stdout, ~stderr as _) => {
|
|
771
|
+
switch error {
|
|
772
|
+
| Value(error) => reject(error)
|
|
773
|
+
| Null => resolve(stdout)
|
|
774
|
+
}
|
|
775
|
+
},
|
|
776
|
+
)
|
|
777
|
+
})
|
|
758
778
|
})
|
|
759
|
-
})
|
|
760
779
|
|
|
761
|
-
|
|
762
|
-
|
|
780
|
+
let _ = await promises->Promise.all
|
|
781
|
+
Logging.info(`Successfully dumped cache to ${cacheDirPath->NodeJs.Path.toString}`)
|
|
782
|
+
}
|
|
783
|
+
| Error(message) => Logging.error(`Failed to dump cache. ${message}`)
|
|
763
784
|
}
|
|
764
|
-
| Error(_) => Logging.error(`Failed to dump cache. ${psqlExecMissingErrorMessage}`)
|
|
765
785
|
}
|
|
786
|
+
} catch {
|
|
787
|
+
| exn => Logging.errorWithExn(exn->Internal.prettifyExn, `Failed to dump cache.`)
|
|
766
788
|
}
|
|
767
789
|
}
|
|
768
790
|
|
|
769
791
|
let restoreEffectCache = async (~withUpload) => {
|
|
770
792
|
if withUpload {
|
|
771
793
|
// Try to restore cache tables from binary files
|
|
772
|
-
let
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
794
|
+
let nothingToUploadErrorMessage = "Nothing to upload."
|
|
795
|
+
|
|
796
|
+
switch await Promise.all2((
|
|
797
|
+
NodeJs.Fs.Promises.readdir(cacheDirPath)
|
|
798
|
+
->Promise.thenResolve(e => Ok(e))
|
|
799
|
+
->Promise.catch(_ => Promise.resolve(Error(nothingToUploadErrorMessage))),
|
|
800
|
+
getConnectedPsqlExec(~pgUser, ~pgHost, ~pgDatabase, ~pgPort),
|
|
801
|
+
)) {
|
|
802
|
+
| (Ok(entries), Ok(psqlExec)) => {
|
|
779
803
|
let cacheFiles = entries->Js.Array2.filter(entry => {
|
|
780
804
|
entry->Js.String2.endsWith(".tsv")
|
|
781
805
|
})
|
|
@@ -799,7 +823,7 @@ let make = (
|
|
|
799
823
|
->Promise.then(() => {
|
|
800
824
|
let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString
|
|
801
825
|
|
|
802
|
-
let command = `${psqlExec} -
|
|
826
|
+
let command = `${psqlExec} -c 'COPY "${pgSchema}"."${tableName}" FROM STDIN WITH (FORMAT text, HEADER);' < ${inputFile}`
|
|
803
827
|
|
|
804
828
|
Promise.make(
|
|
805
829
|
(resolve, reject) => {
|
|
@@ -818,18 +842,23 @@ let make = (
|
|
|
818
842
|
})
|
|
819
843
|
})
|
|
820
844
|
->Promise.all
|
|
845
|
+
|
|
846
|
+
Logging.info("Successfully uploaded cache.")
|
|
847
|
+
}
|
|
848
|
+
| (Error(message), _)
|
|
849
|
+
| (_, Error(message)) =>
|
|
850
|
+
if message === nothingToUploadErrorMessage {
|
|
851
|
+
Logging.info("No cache found to upload.")
|
|
852
|
+
} else {
|
|
853
|
+
Logging.error(`Failed to upload cache, continuing without it. ${message}`)
|
|
821
854
|
}
|
|
822
|
-
| Error(_) =>
|
|
823
|
-
Logging.error(
|
|
824
|
-
`Failed to restore cache, continuing without it. ${psqlExecMissingErrorMessage}`,
|
|
825
|
-
)
|
|
826
855
|
}
|
|
827
856
|
}
|
|
828
857
|
|
|
829
858
|
let cacheTableInfo: array<schemaCacheTableInfo> =
|
|
830
859
|
await sql->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema))
|
|
831
860
|
|
|
832
|
-
if withUpload {
|
|
861
|
+
if withUpload && cacheTableInfo->Utils.Array.notEmpty {
|
|
833
862
|
switch onNewTables {
|
|
834
863
|
| Some(onNewTables) =>
|
|
835
864
|
await onNewTables(
|
package/src/PgStorage.res.js
CHANGED
|
@@ -10,6 +10,8 @@ var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
|
10
10
|
var Schema = require("./db/Schema.res.js");
|
|
11
11
|
var Js_dict = require("rescript/lib/js/js_dict.js");
|
|
12
12
|
var Logging = require("./Logging.res.js");
|
|
13
|
+
var $$Promise = require("./bindings/Promise.res.js");
|
|
14
|
+
var Internal = require("./Internal.res.js");
|
|
13
15
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
14
16
|
var Caml_array = require("rescript/lib/js/caml_array.js");
|
|
15
17
|
var Belt_Option = require("rescript/lib/js/belt_Option.js");
|
|
@@ -274,7 +276,7 @@ async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
|
|
|
274
276
|
throw {
|
|
275
277
|
RE_EXN_ID: Persistence.StorageError,
|
|
276
278
|
message: "Failed to insert items into table \"" + table.tableName + "\"",
|
|
277
|
-
reason: exn,
|
|
279
|
+
reason: Internal.prettifyExn(exn),
|
|
278
280
|
Error: new Error()
|
|
279
281
|
};
|
|
280
282
|
}
|
|
@@ -307,7 +309,7 @@ var psqlExecState = {
|
|
|
307
309
|
contents: "Unknown"
|
|
308
310
|
};
|
|
309
311
|
|
|
310
|
-
async function
|
|
312
|
+
async function getConnectedPsqlExec(pgUser, pgHost, pgDatabase, pgPort) {
|
|
311
313
|
var promise = psqlExecState.contents;
|
|
312
314
|
if (typeof promise === "object") {
|
|
313
315
|
if (promise.TAG === "Pending") {
|
|
@@ -322,20 +324,20 @@ async function getPsqlExec(pgUser, pgHost) {
|
|
|
322
324
|
if (error === null) {
|
|
323
325
|
return resolve({
|
|
324
326
|
TAG: "Ok",
|
|
325
|
-
_0: binary
|
|
327
|
+
_0: binary + " -h " + pgHost + " -p " + pgPort.toString() + " -U " + pgUser + " -d " + pgDatabase
|
|
326
328
|
});
|
|
327
329
|
}
|
|
328
|
-
var binary$1 = "docker-compose exec -T -u " + pgUser + "
|
|
330
|
+
var binary$1 = "docker-compose exec -T -u " + pgUser + " envio-postgres psql";
|
|
329
331
|
Child_process.exec(binary$1 + " --version", (function (error, param, param$1) {
|
|
330
332
|
if (error === null) {
|
|
331
333
|
return resolve({
|
|
332
334
|
TAG: "Ok",
|
|
333
|
-
_0: binary$1
|
|
335
|
+
_0: binary$1 + " -h " + pgHost + " -p " + (5432).toString() + " -U " + pgUser + " -d " + pgDatabase
|
|
334
336
|
});
|
|
335
337
|
} else {
|
|
336
338
|
return resolve({
|
|
337
339
|
TAG: "Error",
|
|
338
|
-
_0: "
|
|
340
|
+
_0: "Please check if \"psql\" binary is installed or docker-compose is running for the local indexer."
|
|
339
341
|
});
|
|
340
342
|
}
|
|
341
343
|
}));
|
|
@@ -353,13 +355,17 @@ async function getPsqlExec(pgUser, pgHost) {
|
|
|
353
355
|
return result;
|
|
354
356
|
}
|
|
355
357
|
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
var psqlExecOptions_env = Js_dict.fromArray([[
|
|
358
|
+
function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onInitialize, onNewTables) {
|
|
359
|
+
var psqlExecOptions_env = Js_dict.fromArray([
|
|
360
|
+
[
|
|
360
361
|
"PGPASSWORD",
|
|
361
362
|
pgPassword
|
|
362
|
-
]
|
|
363
|
+
],
|
|
364
|
+
[
|
|
365
|
+
"PATH",
|
|
366
|
+
process.env.PATH
|
|
367
|
+
]
|
|
368
|
+
]);
|
|
363
369
|
var psqlExecOptions = {
|
|
364
370
|
env: psqlExecOptions_env
|
|
365
371
|
};
|
|
@@ -464,7 +470,7 @@ function make(sql, pgHost, pgSchema, pgUser, pgDatabase, pgPassword, onInitializ
|
|
|
464
470
|
var setEffectCacheOrThrow = async function (effectName, ids, outputs, outputSchema, initialize) {
|
|
465
471
|
var table = Table.mkTable(cacheTablePrefix + effectName, [], [
|
|
466
472
|
Table.mkField("id", "TEXT", S$RescriptSchema.string, undefined, undefined, undefined, true, undefined, undefined),
|
|
467
|
-
Table.mkField("output", "JSONB",
|
|
473
|
+
Table.mkField("output", "JSONB", S$RescriptSchema.json(false), undefined, undefined, undefined, undefined, undefined, undefined)
|
|
468
474
|
]);
|
|
469
475
|
if (initialize) {
|
|
470
476
|
await sql.unsafe(makeCreateTableQuery(table, pgSchema));
|
|
@@ -488,85 +494,120 @@ function make(sql, pgHost, pgSchema, pgUser, pgDatabase, pgPassword, onInitializ
|
|
|
488
494
|
}));
|
|
489
495
|
};
|
|
490
496
|
var dumpEffectCache = async function () {
|
|
491
|
-
var cacheTableInfo = (await sql.unsafe(makeSchemaCacheTableInfoQuery(pgSchema))).filter(function (i) {
|
|
492
|
-
return i.count > 0;
|
|
493
|
-
});
|
|
494
|
-
if (!Utils.$$Array.notEmpty(cacheTableInfo)) {
|
|
495
|
-
return ;
|
|
496
|
-
}
|
|
497
497
|
try {
|
|
498
|
-
await
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
498
|
+
var cacheTableInfo = (await sql.unsafe(makeSchemaCacheTableInfoQuery(pgSchema))).filter(function (i) {
|
|
499
|
+
return i.count > 0;
|
|
500
|
+
});
|
|
501
|
+
if (!Utils.$$Array.notEmpty(cacheTableInfo)) {
|
|
502
|
+
return ;
|
|
503
|
+
}
|
|
504
|
+
try {
|
|
505
|
+
await Fs.promises.access(cacheDirPath);
|
|
506
|
+
}
|
|
507
|
+
catch (exn){
|
|
508
|
+
await Fs.promises.mkdir(cacheDirPath, {
|
|
509
|
+
recursive: true
|
|
510
|
+
});
|
|
511
|
+
}
|
|
512
|
+
var psqlExec = await getConnectedPsqlExec(pgUser, pgHost, pgDatabase, pgPort);
|
|
513
|
+
if (psqlExec.TAG !== "Ok") {
|
|
514
|
+
return Logging.error("Failed to dump cache. " + psqlExec._0);
|
|
515
|
+
}
|
|
516
|
+
var psqlExec$1 = psqlExec._0;
|
|
517
|
+
Logging.info("Dumping cache: " + cacheTableInfo.map(function (param) {
|
|
518
|
+
return param.table_name + " (" + String(param.count) + " rows)";
|
|
519
|
+
}).join(", "));
|
|
520
|
+
var promises = cacheTableInfo.map(async function (param) {
|
|
521
|
+
var tableName = param.table_name;
|
|
522
|
+
var cacheName = tableName.slice(cacheTablePrefixLength);
|
|
523
|
+
var outputFile = Path.join(cacheDirPath, cacheName + ".tsv");
|
|
524
|
+
var command = psqlExec$1 + " -c 'COPY \"" + pgSchema + "\".\"" + tableName + "\" TO STDOUT WITH (FORMAT text, HEADER);' > " + outputFile;
|
|
525
|
+
return new Promise((function (resolve, reject) {
|
|
526
|
+
Child_process.exec(command, psqlExecOptions, (function (error, stdout, param) {
|
|
527
|
+
if (error === null) {
|
|
528
|
+
return resolve(stdout);
|
|
529
|
+
} else {
|
|
530
|
+
return reject(error);
|
|
531
|
+
}
|
|
532
|
+
}));
|
|
533
|
+
}));
|
|
503
534
|
});
|
|
535
|
+
await Promise.all(promises);
|
|
536
|
+
return Logging.info("Successfully dumped cache to " + cacheDirPath);
|
|
504
537
|
}
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
return Logging.
|
|
538
|
+
catch (raw_exn){
|
|
539
|
+
var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
540
|
+
return Logging.errorWithExn(Internal.prettifyExn(exn$1), "Failed to dump cache.");
|
|
508
541
|
}
|
|
509
|
-
var psqlExec$1 = psqlExec._0;
|
|
510
|
-
Logging.info("Dumping cache: " + cacheTableInfo.map(function (param) {
|
|
511
|
-
return param.table_name + " (" + String(param.count) + " rows)";
|
|
512
|
-
}).join(", "));
|
|
513
|
-
var promises = cacheTableInfo.map(async function (param) {
|
|
514
|
-
var tableName = param.table_name;
|
|
515
|
-
var cacheName = tableName.slice(cacheTablePrefixLength);
|
|
516
|
-
var outputFile = Path.join(cacheDirPath, cacheName + ".tsv");
|
|
517
|
-
var command = psqlExec$1 + " -h " + pgHost + " -U " + pgUser + " -d " + pgDatabase + " -c 'COPY \"" + pgSchema + "\".\"" + tableName + "\" TO STDOUT WITH (FORMAT text, HEADER);' > " + outputFile;
|
|
518
|
-
return new Promise((function (resolve, reject) {
|
|
519
|
-
Child_process.exec(command, psqlExecOptions, (function (error, stdout, param) {
|
|
520
|
-
if (error === null) {
|
|
521
|
-
return resolve(stdout);
|
|
522
|
-
} else {
|
|
523
|
-
return reject(error);
|
|
524
|
-
}
|
|
525
|
-
}));
|
|
526
|
-
}));
|
|
527
|
-
});
|
|
528
|
-
await Promise.all(promises);
|
|
529
|
-
return Logging.info("Successfully dumped cache to " + cacheDirPath);
|
|
530
542
|
};
|
|
531
543
|
var restoreEffectCache = async function (withUpload) {
|
|
532
544
|
if (withUpload) {
|
|
545
|
+
var nothingToUploadErrorMessage = "Nothing to upload.";
|
|
533
546
|
var match = await Promise.all([
|
|
534
|
-
Fs.promises.readdir(cacheDirPath)
|
|
535
|
-
|
|
547
|
+
$$Promise.$$catch(Fs.promises.readdir(cacheDirPath).then(function (e) {
|
|
548
|
+
return {
|
|
549
|
+
TAG: "Ok",
|
|
550
|
+
_0: e
|
|
551
|
+
};
|
|
552
|
+
}), (function (param) {
|
|
553
|
+
return Promise.resolve({
|
|
554
|
+
TAG: "Error",
|
|
555
|
+
_0: nothingToUploadErrorMessage
|
|
556
|
+
});
|
|
557
|
+
})),
|
|
558
|
+
getConnectedPsqlExec(pgUser, pgHost, pgDatabase, pgPort)
|
|
536
559
|
]);
|
|
537
|
-
var
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
560
|
+
var exit = 0;
|
|
561
|
+
var message;
|
|
562
|
+
var entries = match[0];
|
|
563
|
+
if (entries.TAG === "Ok") {
|
|
564
|
+
var psqlExec = match[1];
|
|
565
|
+
if (psqlExec.TAG === "Ok") {
|
|
566
|
+
var psqlExec$1 = psqlExec._0;
|
|
567
|
+
var cacheFiles = entries._0.filter(function (entry) {
|
|
568
|
+
return entry.endsWith(".tsv");
|
|
569
|
+
});
|
|
570
|
+
await Promise.all(cacheFiles.map(function (entry) {
|
|
571
|
+
var cacheName = entry.slice(0, -4);
|
|
572
|
+
var tableName = cacheTablePrefix + cacheName;
|
|
573
|
+
var table = Table.mkTable(tableName, [], [
|
|
574
|
+
Table.mkField("id", "TEXT", S$RescriptSchema.string, undefined, undefined, undefined, true, undefined, undefined),
|
|
575
|
+
Table.mkField("output", "JSONB", S$RescriptSchema.json(false), undefined, undefined, undefined, undefined, undefined, undefined)
|
|
576
|
+
]);
|
|
577
|
+
return sql.unsafe(makeCreateTableQuery(table, pgSchema)).then(function () {
|
|
578
|
+
var inputFile = Path.join(cacheDirPath, entry);
|
|
579
|
+
var command = psqlExec$1 + " -c 'COPY \"" + pgSchema + "\".\"" + tableName + "\" FROM STDIN WITH (FORMAT text, HEADER);' < " + inputFile;
|
|
580
|
+
return new Promise((function (resolve, reject) {
|
|
581
|
+
Child_process.exec(command, psqlExecOptions, (function (error, stdout, param) {
|
|
582
|
+
if (error === null) {
|
|
583
|
+
return resolve(stdout);
|
|
584
|
+
} else {
|
|
585
|
+
return reject(error);
|
|
586
|
+
}
|
|
587
|
+
}));
|
|
588
|
+
}));
|
|
589
|
+
});
|
|
590
|
+
}));
|
|
591
|
+
Logging.info("Successfully uploaded cache.");
|
|
592
|
+
} else {
|
|
593
|
+
message = match[1]._0;
|
|
594
|
+
exit = 1;
|
|
595
|
+
}
|
|
564
596
|
} else {
|
|
565
|
-
|
|
597
|
+
message = entries._0;
|
|
598
|
+
exit = 1;
|
|
566
599
|
}
|
|
600
|
+
if (exit === 1) {
|
|
601
|
+
if (message === nothingToUploadErrorMessage) {
|
|
602
|
+
Logging.info("No cache found to upload.");
|
|
603
|
+
} else {
|
|
604
|
+
Logging.error("Failed to upload cache, continuing without it. " + message);
|
|
605
|
+
}
|
|
606
|
+
}
|
|
607
|
+
|
|
567
608
|
}
|
|
568
609
|
var cacheTableInfo = await sql.unsafe(makeSchemaCacheTableInfoQuery(pgSchema));
|
|
569
|
-
if (withUpload && onNewTables !== undefined) {
|
|
610
|
+
if (withUpload && Utils.$$Array.notEmpty(cacheTableInfo) && onNewTables !== undefined) {
|
|
570
611
|
await onNewTables(cacheTableInfo.map(function (info) {
|
|
571
612
|
return info.table_name;
|
|
572
613
|
}));
|
|
@@ -616,7 +657,6 @@ exports.makeSchemaTableNamesQuery = makeSchemaTableNamesQuery;
|
|
|
616
657
|
exports.cacheTablePrefix = cacheTablePrefix;
|
|
617
658
|
exports.cacheTablePrefixLength = cacheTablePrefixLength;
|
|
618
659
|
exports.makeSchemaCacheTableInfoQuery = makeSchemaCacheTableInfoQuery;
|
|
619
|
-
exports.
|
|
620
|
-
exports.psqlExecMissingErrorMessage = psqlExecMissingErrorMessage;
|
|
660
|
+
exports.getConnectedPsqlExec = getConnectedPsqlExec;
|
|
621
661
|
exports.make = make;
|
|
622
662
|
/* pgEncodingErrorSchema Not a pure module */
|