envio 2.26.0-alpha.7 → 2.26.0-alpha.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +21 -2
- package/index.js +0 -1
- package/package.json +5 -5
- package/src/Envio.res +0 -10
- package/src/Envio.res.js +0 -7
- package/src/Internal.res +0 -2
- package/src/Persistence.res +58 -94
- package/src/Persistence.res.js +15 -49
- package/src/PgStorage.res +26 -329
- package/src/PgStorage.res.js +20 -250
- package/src/Prometheus.res +0 -12
- package/src/Prometheus.res.js +0 -12
- package/src/Utils.res +9 -22
- package/src/Utils.res.js +6 -17
- package/src/bindings/Express.res +1 -1
- package/src/bindings/NodeJs.res +26 -27
- package/src/bindings/NodeJs.res.js +13 -5
package/src/PgStorage.res
CHANGED
|
@@ -58,7 +58,7 @@ let makeInitializeTransaction = (
|
|
|
58
58
|
~generalTables=[],
|
|
59
59
|
~entities=[],
|
|
60
60
|
~enums=[],
|
|
61
|
-
~
|
|
61
|
+
~reuseExistingPgSchema=false,
|
|
62
62
|
) => {
|
|
63
63
|
let allTables = generalTables->Array.copy
|
|
64
64
|
let allEntityTables = []
|
|
@@ -71,11 +71,10 @@ let makeInitializeTransaction = (
|
|
|
71
71
|
|
|
72
72
|
let query = ref(
|
|
73
73
|
(
|
|
74
|
-
|
|
74
|
+
reuseExistingPgSchema
|
|
75
75
|
// Hosted Service already have a DB with the created public schema
|
|
76
76
|
// It also doesn't allow to simply drop it,
|
|
77
|
-
// so we reuse
|
|
78
|
-
// (but only for public, since it's usually always exists)
|
|
77
|
+
// so we reuse an existing schema when it's empty (our case)
|
|
79
78
|
? ""
|
|
80
79
|
: `DROP SCHEMA IF EXISTS "${pgSchema}" CASCADE;
|
|
81
80
|
CREATE SCHEMA "${pgSchema}";\n`
|
|
@@ -129,20 +128,6 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
|
129
128
|
})
|
|
130
129
|
})
|
|
131
130
|
|
|
132
|
-
// Add cache row count function
|
|
133
|
-
functionsQuery :=
|
|
134
|
-
functionsQuery.contents ++
|
|
135
|
-
"\n" ++
|
|
136
|
-
`CREATE OR REPLACE FUNCTION get_cache_row_count(table_name text)
|
|
137
|
-
RETURNS integer AS $$
|
|
138
|
-
DECLARE
|
|
139
|
-
result integer;
|
|
140
|
-
BEGIN
|
|
141
|
-
EXECUTE format('SELECT COUNT(*) FROM "${pgSchema}".%I', table_name) INTO result;
|
|
142
|
-
RETURN result;
|
|
143
|
-
END;
|
|
144
|
-
$$ LANGUAGE plpgsql;`
|
|
145
|
-
|
|
146
131
|
[query.contents]->Js.Array2.concat(
|
|
147
132
|
functionsQuery.contents !== "" ? [functionsQuery.contents] : [],
|
|
148
133
|
)
|
|
@@ -387,7 +372,7 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
|
|
|
387
372
|
raise(
|
|
388
373
|
Persistence.StorageError({
|
|
389
374
|
message: `Failed to insert items into table "${table.tableName}"`,
|
|
390
|
-
reason: exn
|
|
375
|
+
reason: exn,
|
|
391
376
|
}),
|
|
392
377
|
)
|
|
393
378
|
}
|
|
@@ -433,110 +418,7 @@ let makeSchemaTableNamesQuery = (~pgSchema) => {
|
|
|
433
418
|
`SELECT table_name FROM information_schema.tables WHERE table_schema = '${pgSchema}';`
|
|
434
419
|
}
|
|
435
420
|
|
|
436
|
-
let
|
|
437
|
-
let cacheTablePrefixLength = cacheTablePrefix->String.length
|
|
438
|
-
|
|
439
|
-
type schemaCacheTableInfo = {
|
|
440
|
-
@as("table_name")
|
|
441
|
-
tableName: string,
|
|
442
|
-
@as("count")
|
|
443
|
-
count: int,
|
|
444
|
-
}
|
|
445
|
-
|
|
446
|
-
let makeSchemaCacheTableInfoQuery = (~pgSchema) => {
|
|
447
|
-
`SELECT
|
|
448
|
-
t.table_name,
|
|
449
|
-
get_cache_row_count(t.table_name) as count
|
|
450
|
-
FROM information_schema.tables t
|
|
451
|
-
WHERE t.table_schema = '${pgSchema}'
|
|
452
|
-
AND t.table_name LIKE '${cacheTablePrefix}%';`
|
|
453
|
-
}
|
|
454
|
-
|
|
455
|
-
type psqlExecState =
|
|
456
|
-
Unknown | Pending(promise<result<string, string>>) | Resolved(result<string, string>)
|
|
457
|
-
|
|
458
|
-
let getConnectedPsqlExec = {
|
|
459
|
-
let pgDockerServiceName = "envio-postgres"
|
|
460
|
-
// Should use the default port, since we're executing the command
|
|
461
|
-
// from the postgres container's network.
|
|
462
|
-
let pgDockerServicePort = 5432
|
|
463
|
-
|
|
464
|
-
// For development: We run the indexer process locally,
|
|
465
|
-
// and there might not be psql installed on the user's machine.
|
|
466
|
-
// So we use docker-compose to run psql existing in the postgres container.
|
|
467
|
-
// For production: We expect indexer to be running in a container,
|
|
468
|
-
// with psql installed. So we can call it directly.
|
|
469
|
-
let psqlExecState = ref(Unknown)
|
|
470
|
-
async (~pgUser, ~pgHost, ~pgDatabase, ~pgPort) => {
|
|
471
|
-
switch psqlExecState.contents {
|
|
472
|
-
| Unknown => {
|
|
473
|
-
let promise = Promise.make((resolve, _reject) => {
|
|
474
|
-
let binary = "psql"
|
|
475
|
-
NodeJs.ChildProcess.exec(`${binary} --version`, (~error, ~stdout as _, ~stderr as _) => {
|
|
476
|
-
switch error {
|
|
477
|
-
| Value(_) => {
|
|
478
|
-
let binary = `docker-compose exec -T -u ${pgUser} ${pgDockerServiceName} psql`
|
|
479
|
-
NodeJs.ChildProcess.exec(
|
|
480
|
-
`${binary} --version`,
|
|
481
|
-
(~error, ~stdout as _, ~stderr as _) => {
|
|
482
|
-
switch error {
|
|
483
|
-
| Value(_) =>
|
|
484
|
-
resolve(
|
|
485
|
-
Error(`Please check if "psql" binary is installed or docker-compose is running for the local indexer.`),
|
|
486
|
-
)
|
|
487
|
-
| Null =>
|
|
488
|
-
resolve(
|
|
489
|
-
Ok(
|
|
490
|
-
`${binary} -h ${pgHost} -p ${pgDockerServicePort->Js.Int.toString} -U ${pgUser} -d ${pgDatabase}`,
|
|
491
|
-
),
|
|
492
|
-
)
|
|
493
|
-
}
|
|
494
|
-
},
|
|
495
|
-
)
|
|
496
|
-
}
|
|
497
|
-
| Null =>
|
|
498
|
-
resolve(
|
|
499
|
-
Ok(
|
|
500
|
-
`${binary} -h ${pgHost} -p ${pgPort->Js.Int.toString} -U ${pgUser} -d ${pgDatabase}`,
|
|
501
|
-
),
|
|
502
|
-
)
|
|
503
|
-
}
|
|
504
|
-
})
|
|
505
|
-
})
|
|
506
|
-
|
|
507
|
-
psqlExecState := Pending(promise)
|
|
508
|
-
let result = await promise
|
|
509
|
-
psqlExecState := Resolved(result)
|
|
510
|
-
result
|
|
511
|
-
}
|
|
512
|
-
| Pending(promise) => await promise
|
|
513
|
-
| Resolved(result) => result
|
|
514
|
-
}
|
|
515
|
-
}
|
|
516
|
-
}
|
|
517
|
-
|
|
518
|
-
let make = (
|
|
519
|
-
~sql: Postgres.sql,
|
|
520
|
-
~pgHost,
|
|
521
|
-
~pgSchema,
|
|
522
|
-
~pgPort,
|
|
523
|
-
~pgUser,
|
|
524
|
-
~pgDatabase,
|
|
525
|
-
~pgPassword,
|
|
526
|
-
~onInitialize=?,
|
|
527
|
-
~onNewTables=?,
|
|
528
|
-
): Persistence.storage => {
|
|
529
|
-
let psqlExecOptions: NodeJs.ChildProcess.execOptions = {
|
|
530
|
-
env: Js.Dict.fromArray([("PGPASSWORD", pgPassword), ("PATH", %raw(`process.env.PATH`))]),
|
|
531
|
-
}
|
|
532
|
-
|
|
533
|
-
let cacheDirPath = NodeJs.Path.resolve([
|
|
534
|
-
// Right outside of the generated directory
|
|
535
|
-
"..",
|
|
536
|
-
".envio",
|
|
537
|
-
"cache",
|
|
538
|
-
])
|
|
539
|
-
|
|
421
|
+
let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
|
|
540
422
|
let isInitialized = async () => {
|
|
541
423
|
let envioTables =
|
|
542
424
|
await sql->Postgres.unsafe(
|
|
@@ -571,17 +453,32 @@ let make = (
|
|
|
571
453
|
~generalTables,
|
|
572
454
|
~entities,
|
|
573
455
|
~enums,
|
|
574
|
-
~
|
|
456
|
+
~reuseExistingPgSchema=schemaTableNames->Utils.Array.isEmpty,
|
|
575
457
|
)
|
|
576
458
|
// Execute all queries within a single transaction for integrity
|
|
577
459
|
let _ = await sql->Postgres.beginSql(sql => {
|
|
578
460
|
queries->Js.Array2.map(query => sql->Postgres.unsafe(query))
|
|
579
461
|
})
|
|
462
|
+
}
|
|
580
463
|
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
464
|
+
let loadEffectCaches = async () => {
|
|
465
|
+
let schemaTableNames: array<schemaTableName> =
|
|
466
|
+
await sql->Postgres.unsafe(makeSchemaTableNamesQuery(~pgSchema))
|
|
467
|
+
schemaTableNames->Belt.Array.keepMapU(schemaTableName => {
|
|
468
|
+
if schemaTableName.tableName->Js.String2.startsWith("effect_cache_") {
|
|
469
|
+
Some(
|
|
470
|
+
(
|
|
471
|
+
{
|
|
472
|
+
name: schemaTableName.tableName,
|
|
473
|
+
size: 0,
|
|
474
|
+
table: None,
|
|
475
|
+
}: Persistence.effectCache
|
|
476
|
+
),
|
|
477
|
+
)
|
|
478
|
+
} else {
|
|
479
|
+
None
|
|
480
|
+
}
|
|
481
|
+
})
|
|
585
482
|
}
|
|
586
483
|
|
|
587
484
|
let loadByIdsOrThrow = async (~ids, ~table: Table.table, ~rowsSchema) => {
|
|
@@ -680,212 +577,12 @@ let make = (
|
|
|
680
577
|
)
|
|
681
578
|
}
|
|
682
579
|
|
|
683
|
-
let setEffectCacheOrThrow = async (
|
|
684
|
-
~effectName: string,
|
|
685
|
-
~ids: array<string>,
|
|
686
|
-
~outputs: array<Internal.effectOutput>,
|
|
687
|
-
~outputSchema: S.t<Internal.effectOutput>,
|
|
688
|
-
~initialize: bool,
|
|
689
|
-
) => {
|
|
690
|
-
let table = Table.mkTable(
|
|
691
|
-
cacheTablePrefix ++ effectName,
|
|
692
|
-
~fields=[
|
|
693
|
-
Table.mkField("id", Text, ~fieldSchema=S.string, ~isPrimaryKey=true),
|
|
694
|
-
Table.mkField("output", JsonB, ~fieldSchema=S.json(~validate=false)),
|
|
695
|
-
],
|
|
696
|
-
~compositeIndices=[],
|
|
697
|
-
)
|
|
698
|
-
|
|
699
|
-
if initialize {
|
|
700
|
-
let _ = await sql->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema))
|
|
701
|
-
switch onNewTables {
|
|
702
|
-
| Some(onNewTables) => await onNewTables(~tableNames=[table.tableName])
|
|
703
|
-
| None => ()
|
|
704
|
-
}
|
|
705
|
-
}
|
|
706
|
-
|
|
707
|
-
let items = []
|
|
708
|
-
for idx in 0 to outputs->Array.length - 1 {
|
|
709
|
-
items
|
|
710
|
-
->Js.Array2.push({
|
|
711
|
-
"id": ids[idx],
|
|
712
|
-
"output": outputs[idx],
|
|
713
|
-
})
|
|
714
|
-
->ignore
|
|
715
|
-
}
|
|
716
|
-
|
|
717
|
-
await setOrThrow(
|
|
718
|
-
~items,
|
|
719
|
-
~table,
|
|
720
|
-
~itemSchema=S.schema(s =>
|
|
721
|
-
{
|
|
722
|
-
"id": s.matches(S.string),
|
|
723
|
-
"output": s.matches(outputSchema),
|
|
724
|
-
}
|
|
725
|
-
),
|
|
726
|
-
)
|
|
727
|
-
}
|
|
728
|
-
|
|
729
|
-
let dumpEffectCache = async () => {
|
|
730
|
-
try {
|
|
731
|
-
let cacheTableInfo: array<schemaCacheTableInfo> =
|
|
732
|
-
(await sql
|
|
733
|
-
->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema)))
|
|
734
|
-
->Js.Array2.filter(i => i.count > 0)
|
|
735
|
-
|
|
736
|
-
if cacheTableInfo->Utils.Array.notEmpty {
|
|
737
|
-
// Create .envio/cache directory if it doesn't exist
|
|
738
|
-
try {
|
|
739
|
-
await NodeJs.Fs.Promises.access(cacheDirPath)
|
|
740
|
-
} catch {
|
|
741
|
-
| _ =>
|
|
742
|
-
// Create directory if it doesn't exist
|
|
743
|
-
await NodeJs.Fs.Promises.mkdir(~path=cacheDirPath, ~options={recursive: true})
|
|
744
|
-
}
|
|
745
|
-
|
|
746
|
-
// Command for testing. Run from generated
|
|
747
|
-
// docker-compose exec -T -u postgres envio-postgres psql -d envio-dev -c 'COPY "public"."envio_effect_getTokenMetadata" TO STDOUT (FORMAT text, HEADER);' > ../.envio/cache/getTokenMetadata.tsv
|
|
748
|
-
|
|
749
|
-
switch await getConnectedPsqlExec(~pgUser, ~pgHost, ~pgDatabase, ~pgPort) {
|
|
750
|
-
| Ok(psqlExec) => {
|
|
751
|
-
Logging.info(
|
|
752
|
-
`Dumping cache: ${cacheTableInfo
|
|
753
|
-
->Js.Array2.map(({tableName, count}) =>
|
|
754
|
-
tableName ++ " (" ++ count->Belt.Int.toString ++ " rows)"
|
|
755
|
-
)
|
|
756
|
-
->Js.Array2.joinWith(", ")}`,
|
|
757
|
-
)
|
|
758
|
-
|
|
759
|
-
let promises = cacheTableInfo->Js.Array2.map(async ({tableName}) => {
|
|
760
|
-
let cacheName = tableName->Js.String2.sliceToEnd(~from=cacheTablePrefixLength)
|
|
761
|
-
let outputFile =
|
|
762
|
-
NodeJs.Path.join(cacheDirPath, cacheName ++ ".tsv")->NodeJs.Path.toString
|
|
763
|
-
|
|
764
|
-
let command = `${psqlExec} -c 'COPY "${pgSchema}"."${tableName}" TO STDOUT WITH (FORMAT text, HEADER);' > ${outputFile}`
|
|
765
|
-
|
|
766
|
-
Promise.make((resolve, reject) => {
|
|
767
|
-
NodeJs.ChildProcess.execWithOptions(
|
|
768
|
-
command,
|
|
769
|
-
psqlExecOptions,
|
|
770
|
-
(~error, ~stdout, ~stderr as _) => {
|
|
771
|
-
switch error {
|
|
772
|
-
| Value(error) => reject(error)
|
|
773
|
-
| Null => resolve(stdout)
|
|
774
|
-
}
|
|
775
|
-
},
|
|
776
|
-
)
|
|
777
|
-
})
|
|
778
|
-
})
|
|
779
|
-
|
|
780
|
-
let _ = await promises->Promise.all
|
|
781
|
-
Logging.info(`Successfully dumped cache to ${cacheDirPath->NodeJs.Path.toString}`)
|
|
782
|
-
}
|
|
783
|
-
| Error(message) => Logging.error(`Failed to dump cache. ${message}`)
|
|
784
|
-
}
|
|
785
|
-
}
|
|
786
|
-
} catch {
|
|
787
|
-
| exn => Logging.errorWithExn(exn->Internal.prettifyExn, `Failed to dump cache.`)
|
|
788
|
-
}
|
|
789
|
-
}
|
|
790
|
-
|
|
791
|
-
let restoreEffectCache = async (~withUpload) => {
|
|
792
|
-
if withUpload {
|
|
793
|
-
// Try to restore cache tables from binary files
|
|
794
|
-
let nothingToUploadErrorMessage = "Nothing to upload."
|
|
795
|
-
|
|
796
|
-
switch await Promise.all2((
|
|
797
|
-
NodeJs.Fs.Promises.readdir(cacheDirPath)
|
|
798
|
-
->Promise.thenResolve(e => Ok(e))
|
|
799
|
-
->Promise.catch(_ => Promise.resolve(Error(nothingToUploadErrorMessage))),
|
|
800
|
-
getConnectedPsqlExec(~pgUser, ~pgHost, ~pgDatabase, ~pgPort),
|
|
801
|
-
)) {
|
|
802
|
-
| (Ok(entries), Ok(psqlExec)) => {
|
|
803
|
-
let cacheFiles = entries->Js.Array2.filter(entry => {
|
|
804
|
-
entry->Js.String2.endsWith(".tsv")
|
|
805
|
-
})
|
|
806
|
-
|
|
807
|
-
let _ =
|
|
808
|
-
await cacheFiles
|
|
809
|
-
->Js.Array2.map(entry => {
|
|
810
|
-
let cacheName = entry->Js.String2.slice(~from=0, ~to_=-4) // Remove .tsv extension
|
|
811
|
-
let tableName = cacheTablePrefix ++ cacheName
|
|
812
|
-
let table = Table.mkTable(
|
|
813
|
-
tableName,
|
|
814
|
-
~fields=[
|
|
815
|
-
Table.mkField("id", Text, ~fieldSchema=S.string, ~isPrimaryKey=true),
|
|
816
|
-
Table.mkField("output", JsonB, ~fieldSchema=S.json(~validate=false)),
|
|
817
|
-
],
|
|
818
|
-
~compositeIndices=[],
|
|
819
|
-
)
|
|
820
|
-
|
|
821
|
-
sql
|
|
822
|
-
->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema))
|
|
823
|
-
->Promise.then(() => {
|
|
824
|
-
let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString
|
|
825
|
-
|
|
826
|
-
let command = `${psqlExec} -c 'COPY "${pgSchema}"."${tableName}" FROM STDIN WITH (FORMAT text, HEADER);' < ${inputFile}`
|
|
827
|
-
|
|
828
|
-
Promise.make(
|
|
829
|
-
(resolve, reject) => {
|
|
830
|
-
NodeJs.ChildProcess.execWithOptions(
|
|
831
|
-
command,
|
|
832
|
-
psqlExecOptions,
|
|
833
|
-
(~error, ~stdout, ~stderr as _) => {
|
|
834
|
-
switch error {
|
|
835
|
-
| Value(error) => reject(error)
|
|
836
|
-
| Null => resolve(stdout)
|
|
837
|
-
}
|
|
838
|
-
},
|
|
839
|
-
)
|
|
840
|
-
},
|
|
841
|
-
)
|
|
842
|
-
})
|
|
843
|
-
})
|
|
844
|
-
->Promise.all
|
|
845
|
-
|
|
846
|
-
Logging.info("Successfully uploaded cache.")
|
|
847
|
-
}
|
|
848
|
-
| (Error(message), _)
|
|
849
|
-
| (_, Error(message)) =>
|
|
850
|
-
if message === nothingToUploadErrorMessage {
|
|
851
|
-
Logging.info("No cache found to upload.")
|
|
852
|
-
} else {
|
|
853
|
-
Logging.error(`Failed to upload cache, continuing without it. ${message}`)
|
|
854
|
-
}
|
|
855
|
-
}
|
|
856
|
-
}
|
|
857
|
-
|
|
858
|
-
let cacheTableInfo: array<schemaCacheTableInfo> =
|
|
859
|
-
await sql->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema))
|
|
860
|
-
|
|
861
|
-
if withUpload && cacheTableInfo->Utils.Array.notEmpty {
|
|
862
|
-
switch onNewTables {
|
|
863
|
-
| Some(onNewTables) =>
|
|
864
|
-
await onNewTables(
|
|
865
|
-
~tableNames=cacheTableInfo->Js.Array2.map(info => {
|
|
866
|
-
info.tableName
|
|
867
|
-
}),
|
|
868
|
-
)
|
|
869
|
-
| None => ()
|
|
870
|
-
}
|
|
871
|
-
}
|
|
872
|
-
|
|
873
|
-
cacheTableInfo->Js.Array2.map((info): Persistence.effectCacheRecord => {
|
|
874
|
-
{
|
|
875
|
-
effectName: info.tableName->Js.String2.sliceToEnd(~from=cacheTablePrefixLength),
|
|
876
|
-
count: info.count,
|
|
877
|
-
}
|
|
878
|
-
})
|
|
879
|
-
}
|
|
880
|
-
|
|
881
580
|
{
|
|
882
581
|
isInitialized,
|
|
883
582
|
initialize,
|
|
884
583
|
loadByFieldOrThrow,
|
|
584
|
+
loadEffectCaches,
|
|
885
585
|
loadByIdsOrThrow,
|
|
886
586
|
setOrThrow,
|
|
887
|
-
setEffectCacheOrThrow,
|
|
888
|
-
dumpEffectCache,
|
|
889
|
-
restoreEffectCache,
|
|
890
587
|
}
|
|
891
588
|
}
|