envio 2.32.2 → 3.0.0-alpha-main-clickhouse-sink
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +1 -0
- package/package.json +6 -5
- package/src/Batch.res +4 -4
- package/src/Change.res +9 -0
- package/src/Change.res.js +2 -0
- package/src/Config.res +5 -5
- package/src/Config.res.js +3 -1
- package/src/Envio.gen.ts +3 -3
- package/src/Envio.res +14 -3
- package/src/EventRegister.res +3 -11
- package/src/EventRegister.res.js +4 -8
- package/src/EventRegister.resi +1 -1
- package/src/InMemoryStore.res +7 -15
- package/src/InMemoryStore.res.js +2 -4
- package/src/InMemoryTable.res +50 -35
- package/src/InMemoryTable.res.js +52 -84
- package/src/Internal.gen.ts +0 -2
- package/src/Internal.res +20 -38
- package/src/Internal.res.js +2 -16
- package/src/Persistence.res +190 -38
- package/src/Persistence.res.js +92 -39
- package/src/PgStorage.res +700 -14
- package/src/PgStorage.res.js +431 -19
- package/src/Platform.res +141 -0
- package/src/Platform.res.js +170 -0
- package/src/Prometheus.res +41 -0
- package/src/Prometheus.res.js +45 -0
- package/src/SafeCheckpointTracking.res +5 -4
- package/src/Sink.res +47 -0
- package/src/Sink.res.js +36 -0
- package/src/Utils.res +2 -0
- package/src/Utils.res.js +3 -0
- package/src/bindings/ClickHouse.res +387 -0
- package/src/bindings/ClickHouse.res.js +274 -0
- package/src/bindings/Postgres.res +15 -0
- package/src/db/EntityHistory.res +33 -156
- package/src/db/EntityHistory.res.js +40 -115
- package/src/db/InternalTable.res +56 -55
- package/src/db/InternalTable.res.js +49 -52
- package/src/db/Table.res +86 -22
- package/src/db/Table.res.js +77 -10
package/src/PgStorage.res
CHANGED
|
@@ -32,12 +32,13 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema, ~isNumericArrayAsText
|
|
|
32
32
|
let fieldName = field->Table.getDbFieldName
|
|
33
33
|
|
|
34
34
|
{
|
|
35
|
-
`"${fieldName}" ${
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
35
|
+
`"${fieldName}" ${Table.getPgFieldType(
|
|
36
|
+
~fieldType,
|
|
37
|
+
~pgSchema,
|
|
38
|
+
~isArray,
|
|
39
|
+
~isNullable,
|
|
40
|
+
~isNumericArrayAsText,
|
|
41
|
+
)}${switch defaultValue {
|
|
41
42
|
| Some(defaultValue) => ` DEFAULT ${defaultValue}`
|
|
42
43
|
| None => isNullable ? `` : ` NOT NULL`
|
|
43
44
|
}}`
|
|
@@ -56,6 +57,73 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema, ~isNumericArrayAsText
|
|
|
56
57
|
: ""});`
|
|
57
58
|
}
|
|
58
59
|
|
|
60
|
+
let getEntityHistory = (~entityConfig: Internal.entityConfig): EntityHistory.pgEntityHistory<
|
|
61
|
+
'entity,
|
|
62
|
+
> => {
|
|
63
|
+
switch entityConfig.pgEntityHistoryCache {
|
|
64
|
+
| Some(cache) => cache
|
|
65
|
+
| None =>
|
|
66
|
+
let cache = {
|
|
67
|
+
let id = "id"
|
|
68
|
+
|
|
69
|
+
let dataFields = entityConfig.table.fields->Belt.Array.keepMap(field =>
|
|
70
|
+
switch field {
|
|
71
|
+
| Field(field) =>
|
|
72
|
+
switch field.fieldName {
|
|
73
|
+
//id is not nullable and should be part of the pk
|
|
74
|
+
| "id" => {...field, fieldName: id, isPrimaryKey: true}->Table.Field->Some
|
|
75
|
+
| _ =>
|
|
76
|
+
{
|
|
77
|
+
...field,
|
|
78
|
+
isNullable: true, //All entity fields are nullable in the case
|
|
79
|
+
isIndex: false, //No need to index any additional entity data fields in entity history
|
|
80
|
+
}
|
|
81
|
+
->Field
|
|
82
|
+
->Some
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
| DerivedFrom(_) => None
|
|
86
|
+
}
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
let actionField = Table.mkField(
|
|
90
|
+
EntityHistory.changeFieldName,
|
|
91
|
+
EntityHistory.changeFieldType,
|
|
92
|
+
~fieldSchema=S.never,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
let checkpointIdField = Table.mkField(
|
|
96
|
+
EntityHistory.checkpointIdFieldName,
|
|
97
|
+
EntityHistory.checkpointIdFieldType,
|
|
98
|
+
~fieldSchema=EntityHistory.unsafeCheckpointIdSchema,
|
|
99
|
+
~isPrimaryKey=true,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
let entityTableName = entityConfig.table.tableName
|
|
103
|
+
let historyTableName = EntityHistory.historyTableName(
|
|
104
|
+
~entityName=entityTableName,
|
|
105
|
+
~entityIndex=entityConfig.index,
|
|
106
|
+
)
|
|
107
|
+
//ignore composite indices
|
|
108
|
+
let table = Table.mkTable(
|
|
109
|
+
historyTableName,
|
|
110
|
+
~fields=dataFields->Belt.Array.concat([checkpointIdField, actionField]),
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
let setChangeSchema = EntityHistory.makeSetUpdateSchema(entityConfig.schema)
|
|
114
|
+
|
|
115
|
+
{
|
|
116
|
+
EntityHistory.table,
|
|
117
|
+
setChangeSchema,
|
|
118
|
+
setChangeSchemaRows: S.array(setChangeSchema),
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
entityConfig.pgEntityHistoryCache = Some(cache)
|
|
123
|
+
cache
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
59
127
|
let makeInitializeTransaction = (
|
|
60
128
|
~pgSchema,
|
|
61
129
|
~pgUser,
|
|
@@ -74,10 +142,10 @@ let makeInitializeTransaction = (
|
|
|
74
142
|
|
|
75
143
|
let allTables = generalTables->Array.copy
|
|
76
144
|
let allEntityTables = []
|
|
77
|
-
entities->Js.Array2.forEach((
|
|
78
|
-
allEntityTables->Js.Array2.push(
|
|
79
|
-
allTables->Js.Array2.push(
|
|
80
|
-
allTables->Js.Array2.push(
|
|
145
|
+
entities->Js.Array2.forEach((entityConfig: Internal.entityConfig) => {
|
|
146
|
+
allEntityTables->Js.Array2.push(entityConfig.table)->ignore
|
|
147
|
+
allTables->Js.Array2.push(entityConfig.table)->ignore
|
|
148
|
+
allTables->Js.Array2.push(getEntityHistory(~entityConfig).table)->ignore
|
|
81
149
|
})
|
|
82
150
|
let derivedSchema = Schema.make(allEntityTables)
|
|
83
151
|
|
|
@@ -97,10 +165,10 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
|
97
165
|
)
|
|
98
166
|
|
|
99
167
|
// Optimized enum creation - direct when cleanRun, conditional otherwise
|
|
100
|
-
enums->Js.Array2.forEach((enumConfig:
|
|
168
|
+
enums->Js.Array2.forEach((enumConfig: Table.enumConfig<Table.enum>) => {
|
|
101
169
|
// Create base enum creation query once
|
|
102
170
|
let enumCreateQuery = `CREATE TYPE "${pgSchema}".${enumConfig.name} AS ENUM(${enumConfig.variants
|
|
103
|
-
->Js.Array2.map(v => `'${v->(Utils.magic:
|
|
171
|
+
->Js.Array2.map(v => `'${v->(Utils.magic: Table.enum => string)}'`)
|
|
104
172
|
->Js.Array2.joinWith(", ")});`
|
|
105
173
|
|
|
106
174
|
query := query.contents ++ "\n" ++ enumCreateQuery
|
|
@@ -176,6 +244,14 @@ let makeLoadByIdsQuery = (~pgSchema, ~tableName) => {
|
|
|
176
244
|
`SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = ANY($1::text[]);`
|
|
177
245
|
}
|
|
178
246
|
|
|
247
|
+
let makeDeleteByIdQuery = (~pgSchema, ~tableName) => {
|
|
248
|
+
`DELETE FROM "${pgSchema}"."${tableName}" WHERE id = $1;`
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
let makeDeleteByIdsQuery = (~pgSchema, ~tableName) => {
|
|
252
|
+
`DELETE FROM "${pgSchema}"."${tableName}" WHERE id = ANY($1::text[]);`
|
|
253
|
+
}
|
|
254
|
+
|
|
179
255
|
let makeLoadAllQuery = (~pgSchema, ~tableName) => {
|
|
180
256
|
`SELECT * FROM "${pgSchema}"."${tableName}";`
|
|
181
257
|
}
|
|
@@ -505,6 +581,463 @@ let getConnectedPsqlExec = {
|
|
|
505
581
|
}
|
|
506
582
|
}
|
|
507
583
|
|
|
584
|
+
let deleteByIdsOrThrow = async (sql, ~pgSchema, ~ids, ~table: Table.table) => {
|
|
585
|
+
switch await (
|
|
586
|
+
switch ids {
|
|
587
|
+
| [_] =>
|
|
588
|
+
sql->Postgres.preparedUnsafe(
|
|
589
|
+
makeDeleteByIdQuery(~pgSchema, ~tableName=table.tableName),
|
|
590
|
+
ids->Obj.magic,
|
|
591
|
+
)
|
|
592
|
+
| _ =>
|
|
593
|
+
sql->Postgres.preparedUnsafe(
|
|
594
|
+
makeDeleteByIdsQuery(~pgSchema, ~tableName=table.tableName),
|
|
595
|
+
[ids]->Obj.magic,
|
|
596
|
+
)
|
|
597
|
+
}
|
|
598
|
+
) {
|
|
599
|
+
| exception exn =>
|
|
600
|
+
raise(
|
|
601
|
+
Persistence.StorageError({
|
|
602
|
+
message: `Failed deleting "${table.tableName}" from storage by ids`,
|
|
603
|
+
reason: exn,
|
|
604
|
+
}),
|
|
605
|
+
)
|
|
606
|
+
| _ => ()
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
let makeInsertDeleteUpdatesQuery = (~entityConfig: Internal.entityConfig, ~pgSchema) => {
|
|
611
|
+
let historyTableName = EntityHistory.historyTableName(
|
|
612
|
+
~entityName=entityConfig.name,
|
|
613
|
+
~entityIndex=entityConfig.index,
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
// Get all field names for the INSERT statement
|
|
617
|
+
let allHistoryFieldNames = entityConfig.table.fields->Belt.Array.keepMap(fieldOrDerived =>
|
|
618
|
+
switch fieldOrDerived {
|
|
619
|
+
| Field(field) => field->Table.getDbFieldName->Some
|
|
620
|
+
| DerivedFrom(_) => None
|
|
621
|
+
}
|
|
622
|
+
)
|
|
623
|
+
allHistoryFieldNames->Js.Array2.push(EntityHistory.checkpointIdFieldName)->ignore
|
|
624
|
+
allHistoryFieldNames->Js.Array2.push(EntityHistory.changeFieldName)->ignore
|
|
625
|
+
|
|
626
|
+
let allHistoryFieldNamesStr =
|
|
627
|
+
allHistoryFieldNames->Belt.Array.map(name => `"${name}"`)->Js.Array2.joinWith(", ")
|
|
628
|
+
|
|
629
|
+
// Build the SELECT part: id from unnest, envio_checkpoint_id from unnest, 'DELETE' for action, NULL for all other fields
|
|
630
|
+
let selectParts = allHistoryFieldNames->Belt.Array.map(fieldName => {
|
|
631
|
+
switch fieldName {
|
|
632
|
+
| field if field == Table.idFieldName => `u.${Table.idFieldName}`
|
|
633
|
+
| field if field == EntityHistory.checkpointIdFieldName =>
|
|
634
|
+
`u.${EntityHistory.checkpointIdFieldName}`
|
|
635
|
+
| field if field == EntityHistory.changeFieldName =>
|
|
636
|
+
`'${(EntityHistory.RowAction.DELETE :> string)}'`
|
|
637
|
+
| _ => "NULL"
|
|
638
|
+
}
|
|
639
|
+
})
|
|
640
|
+
let selectPartsStr = selectParts->Js.Array2.joinWith(", ")
|
|
641
|
+
|
|
642
|
+
// Get the PostgreSQL type for the checkpoint ID field
|
|
643
|
+
let checkpointIdPgType = Table.getPgFieldType(
|
|
644
|
+
~fieldType=EntityHistory.checkpointIdFieldType,
|
|
645
|
+
~pgSchema,
|
|
646
|
+
~isArray=false,
|
|
647
|
+
~isNumericArrayAsText=false,
|
|
648
|
+
~isNullable=false,
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
`INSERT INTO "${pgSchema}"."${historyTableName}" (${allHistoryFieldNamesStr})
|
|
652
|
+
SELECT ${selectPartsStr}
|
|
653
|
+
FROM UNNEST($1::text[], $2::${checkpointIdPgType}[]) AS u(${Table.idFieldName}, ${EntityHistory.checkpointIdFieldName})`
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
let executeSet = (
|
|
657
|
+
sql: Postgres.sql,
|
|
658
|
+
~items: array<'a>,
|
|
659
|
+
~dbFunction: (Postgres.sql, array<'a>) => promise<unit>,
|
|
660
|
+
) => {
|
|
661
|
+
if items->Array.length > 0 {
|
|
662
|
+
sql->dbFunction(items)
|
|
663
|
+
} else {
|
|
664
|
+
Promise.resolve()
|
|
665
|
+
}
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
let rec writeBatch = async (
|
|
669
|
+
sql,
|
|
670
|
+
~batch: Batch.t,
|
|
671
|
+
~rawEvents,
|
|
672
|
+
~pgSchema,
|
|
673
|
+
~rollbackTargetCheckpointId,
|
|
674
|
+
~isInReorgThreshold,
|
|
675
|
+
~config: Config.t,
|
|
676
|
+
~allEntities: array<Internal.entityConfig>,
|
|
677
|
+
~setEffectCacheOrThrow,
|
|
678
|
+
~updatedEffectsCache,
|
|
679
|
+
~updatedEntities: array<Persistence.updatedEntity>,
|
|
680
|
+
~sinkPromise: option<promise<option<exn>>>,
|
|
681
|
+
~escapeTables=?,
|
|
682
|
+
) => {
|
|
683
|
+
try {
|
|
684
|
+
let shouldSaveHistory = config->Config.shouldSaveHistory(~isInReorgThreshold)
|
|
685
|
+
|
|
686
|
+
let specificError = ref(None)
|
|
687
|
+
|
|
688
|
+
let setRawEvents = executeSet(
|
|
689
|
+
_,
|
|
690
|
+
~dbFunction=(sql, items) => {
|
|
691
|
+
sql->setOrThrow(
|
|
692
|
+
~items,
|
|
693
|
+
~table=InternalTable.RawEvents.table,
|
|
694
|
+
~itemSchema=InternalTable.RawEvents.schema,
|
|
695
|
+
~pgSchema,
|
|
696
|
+
)
|
|
697
|
+
},
|
|
698
|
+
~items=rawEvents,
|
|
699
|
+
)
|
|
700
|
+
|
|
701
|
+
let setEntities = updatedEntities->Belt.Array.map(({entityConfig, updates}) => {
|
|
702
|
+
let entitiesToSet = []
|
|
703
|
+
let idsToDelete = []
|
|
704
|
+
|
|
705
|
+
updates->Js.Array2.forEach(row => {
|
|
706
|
+
switch row {
|
|
707
|
+
| {latestChange: Set({entity})} => entitiesToSet->Belt.Array.push(entity)
|
|
708
|
+
| {latestChange: Delete({entityId})} => idsToDelete->Belt.Array.push(entityId)
|
|
709
|
+
}
|
|
710
|
+
})
|
|
711
|
+
|
|
712
|
+
let shouldRemoveInvalidUtf8 = switch escapeTables {
|
|
713
|
+
| Some(tables) if tables->Utils.Set.has(entityConfig.table) => true
|
|
714
|
+
| _ => false
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
async sql => {
|
|
718
|
+
try {
|
|
719
|
+
let promises = []
|
|
720
|
+
|
|
721
|
+
if shouldSaveHistory {
|
|
722
|
+
let backfillHistoryIds = Utils.Set.make()
|
|
723
|
+
let batchSetUpdates = []
|
|
724
|
+
// Use unnest approach
|
|
725
|
+
let batchDeleteCheckpointIds = []
|
|
726
|
+
let batchDeleteEntityIds = []
|
|
727
|
+
|
|
728
|
+
updates->Js.Array2.forEach(update => {
|
|
729
|
+
switch update {
|
|
730
|
+
| {history, containsRollbackDiffChange} =>
|
|
731
|
+
history->Js.Array2.forEach(
|
|
732
|
+
(change: Change.t<'a>) => {
|
|
733
|
+
if !containsRollbackDiffChange {
|
|
734
|
+
// For every update we want to make sure that there's an existing history item
|
|
735
|
+
// with the current entity state. So we backfill history with checkpoint id 0,
|
|
736
|
+
// before writing updates. Don't do this if the update has a rollback diff change.
|
|
737
|
+
backfillHistoryIds->Utils.Set.add(change->Change.getEntityId)->ignore
|
|
738
|
+
}
|
|
739
|
+
switch change {
|
|
740
|
+
| Delete({entityId}) => {
|
|
741
|
+
batchDeleteEntityIds->Belt.Array.push(entityId)->ignore
|
|
742
|
+
batchDeleteCheckpointIds
|
|
743
|
+
->Belt.Array.push(change->Change.getCheckpointId)
|
|
744
|
+
->ignore
|
|
745
|
+
}
|
|
746
|
+
| Set(_) => batchSetUpdates->Js.Array2.push(change)->ignore
|
|
747
|
+
}
|
|
748
|
+
},
|
|
749
|
+
)
|
|
750
|
+
}
|
|
751
|
+
})
|
|
752
|
+
|
|
753
|
+
if backfillHistoryIds->Utils.Set.size !== 0 {
|
|
754
|
+
// This must run before updating entity or entity history tables
|
|
755
|
+
await EntityHistory.backfillHistory(
|
|
756
|
+
sql,
|
|
757
|
+
~pgSchema,
|
|
758
|
+
~entityName=entityConfig.name,
|
|
759
|
+
~entityIndex=entityConfig.index,
|
|
760
|
+
~ids=backfillHistoryIds->Utils.Set.toArray,
|
|
761
|
+
)
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
if batchDeleteCheckpointIds->Utils.Array.notEmpty {
|
|
765
|
+
promises->Belt.Array.push(
|
|
766
|
+
sql
|
|
767
|
+
->Postgres.preparedUnsafe(
|
|
768
|
+
makeInsertDeleteUpdatesQuery(~entityConfig, ~pgSchema),
|
|
769
|
+
(batchDeleteEntityIds, batchDeleteCheckpointIds)->Obj.magic,
|
|
770
|
+
)
|
|
771
|
+
->Promise.ignoreValue,
|
|
772
|
+
)
|
|
773
|
+
}
|
|
774
|
+
|
|
775
|
+
if batchSetUpdates->Utils.Array.notEmpty {
|
|
776
|
+
if shouldRemoveInvalidUtf8 {
|
|
777
|
+
let entities = batchSetUpdates->Js.Array2.map(batchSetUpdate => {
|
|
778
|
+
switch batchSetUpdate {
|
|
779
|
+
| Set({entity}) => entity
|
|
780
|
+
| _ => Js.Exn.raiseError("Expected Set action")
|
|
781
|
+
}
|
|
782
|
+
})
|
|
783
|
+
entities->removeInvalidUtf8InPlace
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
let entityHistory = getEntityHistory(~entityConfig)
|
|
787
|
+
|
|
788
|
+
promises
|
|
789
|
+
->Js.Array2.push(
|
|
790
|
+
sql->setOrThrow(
|
|
791
|
+
~items=batchSetUpdates,
|
|
792
|
+
~itemSchema=entityHistory.setChangeSchema,
|
|
793
|
+
~table=entityHistory.table,
|
|
794
|
+
~pgSchema,
|
|
795
|
+
),
|
|
796
|
+
)
|
|
797
|
+
->ignore
|
|
798
|
+
}
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
if entitiesToSet->Utils.Array.notEmpty {
|
|
802
|
+
if shouldRemoveInvalidUtf8 {
|
|
803
|
+
entitiesToSet->removeInvalidUtf8InPlace
|
|
804
|
+
}
|
|
805
|
+
promises->Belt.Array.push(
|
|
806
|
+
sql->setOrThrow(
|
|
807
|
+
~items=entitiesToSet,
|
|
808
|
+
~table=entityConfig.table,
|
|
809
|
+
~itemSchema=entityConfig.schema,
|
|
810
|
+
~pgSchema,
|
|
811
|
+
),
|
|
812
|
+
)
|
|
813
|
+
}
|
|
814
|
+
if idsToDelete->Utils.Array.notEmpty {
|
|
815
|
+
promises->Belt.Array.push(
|
|
816
|
+
sql->deleteByIdsOrThrow(~pgSchema, ~ids=idsToDelete, ~table=entityConfig.table),
|
|
817
|
+
)
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
let _ = await promises->Promise.all
|
|
821
|
+
} catch {
|
|
822
|
+
// There's a race condition that sql->Postgres.beginSql
|
|
823
|
+
// might throw PG error, earlier, than the handled error
|
|
824
|
+
// from setOrThrow will be passed through.
|
|
825
|
+
// This is needed for the utf8 encoding fix.
|
|
826
|
+
| exn => {
|
|
827
|
+
/* Note: Entity History doesn't return StorageError yet, and directly throws JsError */
|
|
828
|
+
let normalizedExn = switch exn {
|
|
829
|
+
| JsError(_) => exn
|
|
830
|
+
| Persistence.StorageError({reason: exn}) => exn
|
|
831
|
+
| _ => exn
|
|
832
|
+
}->Js.Exn.anyToExnInternal
|
|
833
|
+
|
|
834
|
+
switch normalizedExn {
|
|
835
|
+
| JsError(error) =>
|
|
836
|
+
// Workaround for https://github.com/enviodev/hyperindex/issues/446
|
|
837
|
+
// We do escaping only when we actually got an error writing for the first time.
|
|
838
|
+
// This is not perfect, but an optimization to avoid escaping for every single item.
|
|
839
|
+
|
|
840
|
+
switch error->S.parseOrThrow(pgErrorMessageSchema) {
|
|
841
|
+
| `current transaction is aborted, commands ignored until end of transaction block` => ()
|
|
842
|
+
| `invalid byte sequence for encoding "UTF8": 0x00` =>
|
|
843
|
+
// Since the transaction is aborted at this point,
|
|
844
|
+
// we can't simply retry the function with escaped items,
|
|
845
|
+
// so propagate the error, to restart the whole batch write.
|
|
846
|
+
// Also, pass the failing table, to escape only its items.
|
|
847
|
+
// TODO: Ideally all this should be done in the file,
|
|
848
|
+
// so it'll be easier to work on PG specific logic.
|
|
849
|
+
specificError.contents = Some(PgEncodingError({table: entityConfig.table}))
|
|
850
|
+
| _ => specificError.contents = Some(exn->Utils.prettifyExn)
|
|
851
|
+
| exception _ => ()
|
|
852
|
+
}
|
|
853
|
+
| S.Raised(_) => raise(normalizedExn) // But rethrow this one, since it's not a PG error
|
|
854
|
+
| _ => ()
|
|
855
|
+
}
|
|
856
|
+
|
|
857
|
+
// Improtant: Don't rethrow here, since it'll result in
|
|
858
|
+
// an unhandled rejected promise error.
|
|
859
|
+
// That's fine not to throw, since sql->Postgres.beginSql
|
|
860
|
+
// will fail anyways.
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
}
|
|
864
|
+
})
|
|
865
|
+
|
|
866
|
+
//In the event of a rollback, rollback all meta tables based on the given
|
|
867
|
+
//valid event identifier, where all rows created after this eventIdentifier should
|
|
868
|
+
//be deleted
|
|
869
|
+
let rollbackTables = switch rollbackTargetCheckpointId {
|
|
870
|
+
| Some(rollbackTargetCheckpointId) =>
|
|
871
|
+
Some(
|
|
872
|
+
sql => {
|
|
873
|
+
let promises = allEntities->Js.Array2.map(entityConfig => {
|
|
874
|
+
sql->EntityHistory.rollback(
|
|
875
|
+
~pgSchema,
|
|
876
|
+
~entityName=entityConfig.name,
|
|
877
|
+
~entityIndex=entityConfig.index,
|
|
878
|
+
~rollbackTargetCheckpointId,
|
|
879
|
+
)
|
|
880
|
+
})
|
|
881
|
+
promises
|
|
882
|
+
->Js.Array2.push(
|
|
883
|
+
sql->InternalTable.Checkpoints.rollback(~pgSchema, ~rollbackTargetCheckpointId),
|
|
884
|
+
)
|
|
885
|
+
->ignore
|
|
886
|
+
Promise.all(promises)
|
|
887
|
+
},
|
|
888
|
+
)
|
|
889
|
+
| None => None
|
|
890
|
+
}
|
|
891
|
+
|
|
892
|
+
try {
|
|
893
|
+
let _ = await Promise.all2((
|
|
894
|
+
sql->Postgres.beginSql(async sql => {
|
|
895
|
+
//Rollback tables need to happen first in the traction
|
|
896
|
+
switch rollbackTables {
|
|
897
|
+
| Some(rollbackTables) =>
|
|
898
|
+
let _ = await rollbackTables(sql)
|
|
899
|
+
| None => ()
|
|
900
|
+
}
|
|
901
|
+
|
|
902
|
+
let setOperations = [
|
|
903
|
+
sql =>
|
|
904
|
+
sql->InternalTable.Chains.setProgressedChains(
|
|
905
|
+
~pgSchema,
|
|
906
|
+
~progressedChains=batch.progressedChainsById->Utils.Dict.mapValuesToArray((
|
|
907
|
+
chainAfterBatch
|
|
908
|
+
): InternalTable.Chains.progressedChain => {
|
|
909
|
+
chainId: chainAfterBatch.fetchState.chainId,
|
|
910
|
+
progressBlockNumber: chainAfterBatch.progressBlockNumber,
|
|
911
|
+
totalEventsProcessed: chainAfterBatch.totalEventsProcessed,
|
|
912
|
+
}),
|
|
913
|
+
),
|
|
914
|
+
setRawEvents,
|
|
915
|
+
]->Belt.Array.concat(setEntities)
|
|
916
|
+
|
|
917
|
+
if shouldSaveHistory {
|
|
918
|
+
setOperations->Belt.Array.push(sql =>
|
|
919
|
+
sql->InternalTable.Checkpoints.insert(
|
|
920
|
+
~pgSchema,
|
|
921
|
+
~checkpointIds=batch.checkpointIds,
|
|
922
|
+
~checkpointChainIds=batch.checkpointChainIds,
|
|
923
|
+
~checkpointBlockNumbers=batch.checkpointBlockNumbers,
|
|
924
|
+
~checkpointBlockHashes=batch.checkpointBlockHashes,
|
|
925
|
+
~checkpointEventsProcessed=batch.checkpointEventsProcessed,
|
|
926
|
+
)
|
|
927
|
+
)
|
|
928
|
+
}
|
|
929
|
+
|
|
930
|
+
await setOperations
|
|
931
|
+
->Belt.Array.map(dbFunc => sql->dbFunc)
|
|
932
|
+
->Promise.all
|
|
933
|
+
->Promise.ignoreValue
|
|
934
|
+
|
|
935
|
+
switch sinkPromise {
|
|
936
|
+
| Some(sinkPromise) =>
|
|
937
|
+
switch await sinkPromise {
|
|
938
|
+
| Some(exn) => raise(exn)
|
|
939
|
+
| None => ()
|
|
940
|
+
}
|
|
941
|
+
| None => ()
|
|
942
|
+
}
|
|
943
|
+
}),
|
|
944
|
+
// Since effect cache currently doesn't support rollback,
|
|
945
|
+
// we can run it outside of the transaction for simplicity.
|
|
946
|
+
updatedEffectsCache
|
|
947
|
+
->Belt.Array.map(({effect, items, shouldInitialize}: Persistence.updatedEffectCache) => {
|
|
948
|
+
setEffectCacheOrThrow(~effect, ~items, ~initialize=shouldInitialize)
|
|
949
|
+
})
|
|
950
|
+
->Promise.all,
|
|
951
|
+
))
|
|
952
|
+
|
|
953
|
+
// Just in case, if there's a not PG-specific error.
|
|
954
|
+
switch specificError.contents {
|
|
955
|
+
| Some(specificError) => raise(specificError)
|
|
956
|
+
| None => ()
|
|
957
|
+
}
|
|
958
|
+
} catch {
|
|
959
|
+
| exn =>
|
|
960
|
+
raise(
|
|
961
|
+
switch specificError.contents {
|
|
962
|
+
| Some(specificError) => specificError
|
|
963
|
+
| None => exn
|
|
964
|
+
},
|
|
965
|
+
)
|
|
966
|
+
}
|
|
967
|
+
} catch {
|
|
968
|
+
| PgEncodingError({table}) =>
|
|
969
|
+
let escapeTables = switch escapeTables {
|
|
970
|
+
| Some(set) => set
|
|
971
|
+
| None => Utils.Set.make()
|
|
972
|
+
}
|
|
973
|
+
let _ = escapeTables->Utils.Set.add(table)
|
|
974
|
+
// Retry with specifying which tables to escape.
|
|
975
|
+
await writeBatch(
|
|
976
|
+
sql,
|
|
977
|
+
~escapeTables,
|
|
978
|
+
~rawEvents,
|
|
979
|
+
~batch,
|
|
980
|
+
~pgSchema,
|
|
981
|
+
~rollbackTargetCheckpointId,
|
|
982
|
+
~isInReorgThreshold,
|
|
983
|
+
~config,
|
|
984
|
+
~setEffectCacheOrThrow,
|
|
985
|
+
~updatedEffectsCache,
|
|
986
|
+
~allEntities,
|
|
987
|
+
~updatedEntities,
|
|
988
|
+
~sinkPromise,
|
|
989
|
+
)
|
|
990
|
+
}
|
|
991
|
+
}
|
|
992
|
+
|
|
993
|
+
// Returns the most recent entity state for IDs that need to be restored during rollback.
|
|
994
|
+
// For each ID modified after the rollback target, retrieves its latest state at or before the target.
|
|
995
|
+
let makeGetRollbackRestoredEntitiesQuery = (~entityConfig: Internal.entityConfig, ~pgSchema) => {
|
|
996
|
+
let dataFieldNames = entityConfig.table.fields->Belt.Array.keepMap(fieldOrDerived =>
|
|
997
|
+
switch fieldOrDerived {
|
|
998
|
+
| Field(field) => field->Table.getDbFieldName->Some
|
|
999
|
+
| DerivedFrom(_) => None
|
|
1000
|
+
}
|
|
1001
|
+
)
|
|
1002
|
+
|
|
1003
|
+
let dataFieldsCommaSeparated =
|
|
1004
|
+
dataFieldNames->Belt.Array.map(name => `"${name}"`)->Js.Array2.joinWith(", ")
|
|
1005
|
+
|
|
1006
|
+
let historyTableName = EntityHistory.historyTableName(
|
|
1007
|
+
~entityName=entityConfig.name,
|
|
1008
|
+
~entityIndex=entityConfig.index,
|
|
1009
|
+
)
|
|
1010
|
+
|
|
1011
|
+
`SELECT DISTINCT ON (${Table.idFieldName}) ${dataFieldsCommaSeparated}
|
|
1012
|
+
FROM "${pgSchema}"."${historyTableName}"
|
|
1013
|
+
WHERE "${EntityHistory.checkpointIdFieldName}" <= $1
|
|
1014
|
+
AND EXISTS (
|
|
1015
|
+
SELECT 1
|
|
1016
|
+
FROM "${pgSchema}"."${historyTableName}" h
|
|
1017
|
+
WHERE h.${Table.idFieldName} = "${historyTableName}".${Table.idFieldName}
|
|
1018
|
+
AND h."${EntityHistory.checkpointIdFieldName}" > $1
|
|
1019
|
+
)
|
|
1020
|
+
ORDER BY ${Table.idFieldName}, "${EntityHistory.checkpointIdFieldName}" DESC`
|
|
1021
|
+
}
|
|
1022
|
+
|
|
1023
|
+
// Returns entity IDs that were created after the rollback target and have no history before it.
|
|
1024
|
+
// These entities should be deleted during rollback.
|
|
1025
|
+
let makeGetRollbackRemovedIdsQuery = (~entityConfig: Internal.entityConfig, ~pgSchema) => {
|
|
1026
|
+
let historyTableName = EntityHistory.historyTableName(
|
|
1027
|
+
~entityName=entityConfig.name,
|
|
1028
|
+
~entityIndex=entityConfig.index,
|
|
1029
|
+
)
|
|
1030
|
+
`SELECT DISTINCT ${Table.idFieldName}
|
|
1031
|
+
FROM "${pgSchema}"."${historyTableName}"
|
|
1032
|
+
WHERE "${EntityHistory.checkpointIdFieldName}" > $1
|
|
1033
|
+
AND NOT EXISTS (
|
|
1034
|
+
SELECT 1
|
|
1035
|
+
FROM "${pgSchema}"."${historyTableName}" h
|
|
1036
|
+
WHERE h.${Table.idFieldName} = "${historyTableName}".${Table.idFieldName}
|
|
1037
|
+
AND h."${EntityHistory.checkpointIdFieldName}" <= $1
|
|
1038
|
+
)`
|
|
1039
|
+
}
|
|
1040
|
+
|
|
508
1041
|
let make = (
|
|
509
1042
|
~sql: Postgres.sql,
|
|
510
1043
|
~pgHost,
|
|
@@ -514,6 +1047,7 @@ let make = (
|
|
|
514
1047
|
~pgDatabase,
|
|
515
1048
|
~pgPassword,
|
|
516
1049
|
~isHasuraEnabled,
|
|
1050
|
+
~sink: option<Sink.t>=?,
|
|
517
1051
|
~onInitialize=?,
|
|
518
1052
|
~onNewTables=?,
|
|
519
1053
|
): Persistence.storage => {
|
|
@@ -645,6 +1179,12 @@ let make = (
|
|
|
645
1179
|
)
|
|
646
1180
|
}
|
|
647
1181
|
|
|
1182
|
+
// Call sink.initialize before executing PG queries
|
|
1183
|
+
switch sink {
|
|
1184
|
+
| Some(sink) => await sink.initialize(~chainConfigs, ~entities, ~enums)
|
|
1185
|
+
| None => ()
|
|
1186
|
+
}
|
|
1187
|
+
|
|
648
1188
|
let queries = makeInitializeTransaction(
|
|
649
1189
|
~pgSchema,
|
|
650
1190
|
~pgUser,
|
|
@@ -889,19 +1429,156 @@ let make = (
|
|
|
889
1429
|
}),
|
|
890
1430
|
sql
|
|
891
1431
|
->Postgres.unsafe(InternalTable.Checkpoints.makeCommitedCheckpointIdQuery(~pgSchema))
|
|
892
|
-
->(Utils.magic: promise<array<unknown>> => promise<array<{"id":
|
|
1432
|
+
->(Utils.magic: promise<array<unknown>> => promise<array<{"id": float}>>),
|
|
893
1433
|
sql
|
|
894
1434
|
->Postgres.unsafe(InternalTable.Checkpoints.makeGetReorgCheckpointsQuery(~pgSchema))
|
|
895
1435
|
->(Utils.magic: promise<array<unknown>> => promise<array<Internal.reorgCheckpoint>>),
|
|
896
1436
|
))
|
|
897
1437
|
|
|
1438
|
+
let checkpointId = (checkpointIdResult->Belt.Array.getUnsafe(0))["id"]
|
|
1439
|
+
|
|
1440
|
+
// Resume sink if present - needed to rollback any reorg changes
|
|
1441
|
+
switch sink {
|
|
1442
|
+
| Some(sink) => await sink.resume(~checkpointId)
|
|
1443
|
+
| None => ()
|
|
1444
|
+
}
|
|
1445
|
+
|
|
898
1446
|
{
|
|
899
1447
|
cleanRun: false,
|
|
900
1448
|
reorgCheckpoints,
|
|
901
1449
|
cache,
|
|
902
1450
|
chains,
|
|
903
|
-
checkpointId
|
|
1451
|
+
checkpointId,
|
|
1452
|
+
}
|
|
1453
|
+
}
|
|
1454
|
+
|
|
1455
|
+
let executeUnsafe = query => sql->Postgres.unsafe(query)
|
|
1456
|
+
|
|
1457
|
+
let hasEntityHistoryRows = async () => {
|
|
1458
|
+
// Query for all entity history tables (they have the prefix "envio_history_")
|
|
1459
|
+
let historyTables = await sql->Postgres.unsafe(
|
|
1460
|
+
`SELECT table_name FROM information_schema.tables
|
|
1461
|
+
WHERE table_schema = '${pgSchema}'
|
|
1462
|
+
AND table_name LIKE 'envio_history_%';`,
|
|
1463
|
+
)
|
|
1464
|
+
|
|
1465
|
+
if historyTables->Utils.Array.isEmpty {
|
|
1466
|
+
false
|
|
1467
|
+
} else {
|
|
1468
|
+
// Check if any of these tables have rows
|
|
1469
|
+
let checks =
|
|
1470
|
+
await historyTables
|
|
1471
|
+
->Belt.Array.map(async (table: {"table_name": string}) => {
|
|
1472
|
+
try {
|
|
1473
|
+
let query = `SELECT EXISTS(SELECT 1 FROM "${pgSchema}"."${table["table_name"]}" LIMIT 1);`
|
|
1474
|
+
let result: array<{"exists": bool}> = (await sql->Postgres.unsafe(query))->Utils.magic
|
|
1475
|
+
switch result {
|
|
1476
|
+
| [row] => row["exists"]
|
|
1477
|
+
| _ => false
|
|
1478
|
+
}
|
|
1479
|
+
} catch {
|
|
1480
|
+
| _ => false
|
|
1481
|
+
}
|
|
1482
|
+
})
|
|
1483
|
+
->Promise.all
|
|
1484
|
+
checks->Belt.Array.some(v => v)
|
|
1485
|
+
}
|
|
1486
|
+
}
|
|
1487
|
+
|
|
1488
|
+
let setChainMeta = chainsData =>
|
|
1489
|
+
InternalTable.Chains.setMeta(sql, ~pgSchema, ~chainsData)->Promise.thenResolve(_ =>
|
|
1490
|
+
%raw(`undefined`)
|
|
1491
|
+
)
|
|
1492
|
+
|
|
1493
|
+
let pruneStaleCheckpoints = (~safeCheckpointId) =>
|
|
1494
|
+
InternalTable.Checkpoints.pruneStaleCheckpoints(sql, ~pgSchema, ~safeCheckpointId)
|
|
1495
|
+
|
|
1496
|
+
let pruneStaleEntityHistory = (~entityName, ~entityIndex, ~safeCheckpointId) =>
|
|
1497
|
+
EntityHistory.pruneStaleEntityHistory(
|
|
1498
|
+
sql,
|
|
1499
|
+
~pgSchema,
|
|
1500
|
+
~entityName,
|
|
1501
|
+
~entityIndex,
|
|
1502
|
+
~safeCheckpointId,
|
|
1503
|
+
)
|
|
1504
|
+
|
|
1505
|
+
let getRollbackTargetCheckpoint = (~reorgChainId, ~lastKnownValidBlockNumber) =>
|
|
1506
|
+
InternalTable.Checkpoints.getRollbackTargetCheckpoint(
|
|
1507
|
+
sql,
|
|
1508
|
+
~pgSchema,
|
|
1509
|
+
~reorgChainId,
|
|
1510
|
+
~lastKnownValidBlockNumber,
|
|
1511
|
+
)
|
|
1512
|
+
|
|
1513
|
+
let getRollbackProgressDiff = (~rollbackTargetCheckpointId) =>
|
|
1514
|
+
InternalTable.Checkpoints.getRollbackProgressDiff(sql, ~pgSchema, ~rollbackTargetCheckpointId)
|
|
1515
|
+
|
|
1516
|
+
let getRollbackData = async (
|
|
1517
|
+
~entityConfig: Internal.entityConfig,
|
|
1518
|
+
~rollbackTargetCheckpointId,
|
|
1519
|
+
) => {
|
|
1520
|
+
await Promise.all2((
|
|
1521
|
+
// Get IDs of entities that should be deleted (created after rollback target with no prior history)
|
|
1522
|
+
sql
|
|
1523
|
+
->Postgres.preparedUnsafe(
|
|
1524
|
+
makeGetRollbackRemovedIdsQuery(~entityConfig, ~pgSchema),
|
|
1525
|
+
[rollbackTargetCheckpointId]->Utils.magic,
|
|
1526
|
+
)
|
|
1527
|
+
->(Utils.magic: promise<unknown> => promise<array<{"id": string}>>),
|
|
1528
|
+
// Get entities that should be restored to their state at or before rollback target
|
|
1529
|
+
sql
|
|
1530
|
+
->Postgres.preparedUnsafe(
|
|
1531
|
+
makeGetRollbackRestoredEntitiesQuery(~entityConfig, ~pgSchema),
|
|
1532
|
+
[rollbackTargetCheckpointId]->Utils.magic,
|
|
1533
|
+
)
|
|
1534
|
+
->(Utils.magic: promise<unknown> => promise<array<unknown>>),
|
|
1535
|
+
))
|
|
1536
|
+
}
|
|
1537
|
+
|
|
1538
|
+
let writeBatchMethod = async (
|
|
1539
|
+
~batch,
|
|
1540
|
+
~rawEvents,
|
|
1541
|
+
~rollbackTargetCheckpointId,
|
|
1542
|
+
~isInReorgThreshold,
|
|
1543
|
+
~config,
|
|
1544
|
+
~allEntities,
|
|
1545
|
+
~updatedEffectsCache,
|
|
1546
|
+
~updatedEntities,
|
|
1547
|
+
) => {
|
|
1548
|
+
// Initialize sink if configured
|
|
1549
|
+
let sinkPromise = switch sink {
|
|
1550
|
+
| Some(sink) => {
|
|
1551
|
+
let timerRef = Hrtime.makeTimer()
|
|
1552
|
+
Some(
|
|
1553
|
+
sink.writeBatch(~batch, ~updatedEntities)
|
|
1554
|
+
->Promise.thenResolve(_ => {
|
|
1555
|
+
Prometheus.SinkWrite.increment(
|
|
1556
|
+
~sinkName=sink.name,
|
|
1557
|
+
~timeMillis=timerRef->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis,
|
|
1558
|
+
)
|
|
1559
|
+
None
|
|
1560
|
+
})
|
|
1561
|
+
// Otherwise it fails with unhandled exception
|
|
1562
|
+
->Promise.catchResolve(exn => Some(exn)),
|
|
1563
|
+
)
|
|
1564
|
+
}
|
|
1565
|
+
| None => None
|
|
904
1566
|
}
|
|
1567
|
+
|
|
1568
|
+
await writeBatch(
|
|
1569
|
+
sql,
|
|
1570
|
+
~batch,
|
|
1571
|
+
~rawEvents,
|
|
1572
|
+
~pgSchema,
|
|
1573
|
+
~rollbackTargetCheckpointId,
|
|
1574
|
+
~isInReorgThreshold,
|
|
1575
|
+
~config,
|
|
1576
|
+
~allEntities,
|
|
1577
|
+
~setEffectCacheOrThrow,
|
|
1578
|
+
~updatedEffectsCache,
|
|
1579
|
+
~updatedEntities,
|
|
1580
|
+
~sinkPromise,
|
|
1581
|
+
)
|
|
905
1582
|
}
|
|
906
1583
|
|
|
907
1584
|
{
|
|
@@ -913,5 +1590,14 @@ let make = (
|
|
|
913
1590
|
setOrThrow,
|
|
914
1591
|
setEffectCacheOrThrow,
|
|
915
1592
|
dumpEffectCache,
|
|
1593
|
+
executeUnsafe,
|
|
1594
|
+
hasEntityHistoryRows,
|
|
1595
|
+
setChainMeta,
|
|
1596
|
+
pruneStaleCheckpoints,
|
|
1597
|
+
pruneStaleEntityHistory,
|
|
1598
|
+
getRollbackTargetCheckpoint,
|
|
1599
|
+
getRollbackProgressDiff,
|
|
1600
|
+
getRollbackData,
|
|
1601
|
+
writeBatch: writeBatchMethod,
|
|
916
1602
|
}
|
|
917
1603
|
}
|