envio 3.0.0-alpha.21 → 3.0.0-alpha.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin.mjs +2 -48
- package/evm.schema.json +67 -0
- package/fuel.schema.json +67 -0
- package/index.d.ts +822 -38
- package/index.js +5 -3
- package/package.json +10 -8
- package/rescript.json +5 -9
- package/src/Address.res +4 -5
- package/src/Address.res.mjs +9 -12
- package/src/Api.res +15 -0
- package/src/Api.res.mjs +20 -0
- package/src/Batch.res +32 -34
- package/src/Batch.res.mjs +172 -187
- package/src/Bin.res +89 -0
- package/src/Bin.res.mjs +97 -0
- package/src/ChainFetcher.res +33 -57
- package/src/ChainFetcher.res.mjs +197 -227
- package/src/ChainManager.res +6 -14
- package/src/ChainManager.res.mjs +74 -85
- package/src/ChainMap.res +14 -16
- package/src/ChainMap.res.mjs +38 -38
- package/src/Config.res +193 -135
- package/src/Config.res.mjs +566 -592
- package/src/Core.res +182 -0
- package/src/Core.res.mjs +207 -0
- package/src/Ecosystem.res +25 -4
- package/src/Ecosystem.res.mjs +12 -13
- package/src/Env.res +20 -13
- package/src/Env.res.mjs +124 -113
- package/src/EnvSafe.res +269 -0
- package/src/EnvSafe.res.mjs +296 -0
- package/src/EnvSafe.resi +18 -0
- package/src/Envio.res +37 -26
- package/src/Envio.res.mjs +59 -60
- package/src/ErrorHandling.res +2 -2
- package/src/ErrorHandling.res.mjs +15 -15
- package/src/EventConfigBuilder.res +219 -81
- package/src/EventConfigBuilder.res.mjs +259 -202
- package/src/EventProcessing.res +27 -38
- package/src/EventProcessing.res.mjs +165 -183
- package/src/EventUtils.res +11 -11
- package/src/EventUtils.res.mjs +21 -22
- package/src/EvmTypes.res +0 -1
- package/src/EvmTypes.res.mjs +5 -5
- package/src/FetchState.res +360 -256
- package/src/FetchState.res.mjs +958 -914
- package/src/GlobalState.res +365 -351
- package/src/GlobalState.res.mjs +958 -992
- package/src/GlobalStateManager.res +1 -2
- package/src/GlobalStateManager.res.mjs +36 -44
- package/src/HandlerLoader.res +107 -23
- package/src/HandlerLoader.res.mjs +128 -38
- package/src/HandlerRegister.res +127 -103
- package/src/HandlerRegister.res.mjs +164 -164
- package/src/HandlerRegister.resi +12 -4
- package/src/Hasura.res +35 -22
- package/src/Hasura.res.mjs +158 -167
- package/src/InMemoryStore.res +20 -27
- package/src/InMemoryStore.res.mjs +64 -80
- package/src/InMemoryTable.res +34 -39
- package/src/InMemoryTable.res.mjs +165 -170
- package/src/Internal.res +52 -33
- package/src/Internal.res.mjs +84 -81
- package/src/LazyLoader.res.mjs +55 -61
- package/src/LoadLayer.res +77 -78
- package/src/LoadLayer.res.mjs +160 -189
- package/src/LoadManager.res +16 -21
- package/src/LoadManager.res.mjs +79 -84
- package/src/LogSelection.res +236 -68
- package/src/LogSelection.res.mjs +211 -141
- package/src/Logging.res +13 -9
- package/src/Logging.res.mjs +130 -143
- package/src/Main.res +428 -51
- package/src/Main.res.mjs +528 -271
- package/src/Persistence.res +77 -84
- package/src/Persistence.res.mjs +131 -132
- package/src/PgStorage.res +291 -167
- package/src/PgStorage.res.mjs +797 -817
- package/src/Prometheus.res +50 -58
- package/src/Prometheus.res.mjs +345 -373
- package/src/ReorgDetection.res +22 -24
- package/src/ReorgDetection.res.mjs +100 -106
- package/src/SafeCheckpointTracking.res +7 -7
- package/src/SafeCheckpointTracking.res.mjs +40 -43
- package/src/SimulateItems.res +41 -49
- package/src/SimulateItems.res.mjs +257 -272
- package/src/Sink.res +2 -2
- package/src/Sink.res.mjs +22 -26
- package/src/TableIndices.res +1 -2
- package/src/TableIndices.res.mjs +42 -48
- package/src/TestIndexer.res +196 -189
- package/src/TestIndexer.res.mjs +536 -536
- package/src/TestIndexerProxyStorage.res +15 -16
- package/src/TestIndexerProxyStorage.res.mjs +98 -122
- package/src/TestIndexerWorker.res +4 -0
- package/src/TestIndexerWorker.res.mjs +7 -0
- package/src/Throttler.res +3 -3
- package/src/Throttler.res.mjs +23 -24
- package/src/Time.res +1 -1
- package/src/Time.res.mjs +18 -21
- package/src/TopicFilter.res +3 -3
- package/src/TopicFilter.res.mjs +29 -30
- package/src/UserContext.res +93 -54
- package/src/UserContext.res.mjs +197 -182
- package/src/Utils.res +141 -86
- package/src/Utils.res.mjs +334 -295
- package/src/bindings/BigDecimal.res +0 -2
- package/src/bindings/BigDecimal.res.mjs +19 -23
- package/src/bindings/ClickHouse.res +28 -27
- package/src/bindings/ClickHouse.res.mjs +243 -240
- package/src/bindings/DateFns.res +11 -11
- package/src/bindings/DateFns.res.mjs +7 -7
- package/src/bindings/EventSource.res.mjs +2 -2
- package/src/bindings/Express.res +2 -5
- package/src/bindings/Hrtime.res +2 -2
- package/src/bindings/Hrtime.res.mjs +30 -32
- package/src/bindings/Lodash.res.mjs +1 -1
- package/src/bindings/NodeJs.res +14 -9
- package/src/bindings/NodeJs.res.mjs +20 -20
- package/src/bindings/Pino.res +8 -10
- package/src/bindings/Pino.res.mjs +40 -43
- package/src/bindings/Postgres.res +2 -5
- package/src/bindings/Postgres.res.mjs +9 -9
- package/src/bindings/PromClient.res +17 -2
- package/src/bindings/PromClient.res.mjs +30 -7
- package/src/bindings/SDSL.res.mjs +2 -2
- package/src/bindings/Viem.res +4 -4
- package/src/bindings/Viem.res.mjs +20 -22
- package/src/bindings/Vitest.res +1 -1
- package/src/bindings/Vitest.res.mjs +2 -2
- package/src/bindings/WebSocket.res +1 -1
- package/src/db/EntityHistory.res +9 -3
- package/src/db/EntityHistory.res.mjs +84 -59
- package/src/db/InternalTable.res +62 -60
- package/src/db/InternalTable.res.mjs +271 -203
- package/src/db/Schema.res +1 -2
- package/src/db/Schema.res.mjs +28 -32
- package/src/db/Table.res +28 -27
- package/src/db/Table.res.mjs +276 -292
- package/src/sources/EventRouter.res +21 -16
- package/src/sources/EventRouter.res.mjs +55 -57
- package/src/sources/Evm.res +17 -1
- package/src/sources/Evm.res.mjs +16 -8
- package/src/sources/EvmChain.res +15 -17
- package/src/sources/EvmChain.res.mjs +40 -42
- package/src/sources/Fuel.res +14 -1
- package/src/sources/Fuel.res.mjs +16 -8
- package/src/sources/FuelSDK.res +1 -1
- package/src/sources/FuelSDK.res.mjs +6 -8
- package/src/sources/HyperFuel.res +8 -10
- package/src/sources/HyperFuel.res.mjs +113 -123
- package/src/sources/HyperFuelClient.res.mjs +6 -7
- package/src/sources/HyperFuelSource.res +19 -20
- package/src/sources/HyperFuelSource.res.mjs +339 -356
- package/src/sources/HyperSync.res +11 -13
- package/src/sources/HyperSync.res.mjs +206 -220
- package/src/sources/HyperSyncClient.res +5 -7
- package/src/sources/HyperSyncClient.res.mjs +70 -75
- package/src/sources/HyperSyncHeightStream.res +8 -9
- package/src/sources/HyperSyncHeightStream.res.mjs +78 -86
- package/src/sources/HyperSyncJsonApi.res +18 -15
- package/src/sources/HyperSyncJsonApi.res.mjs +201 -231
- package/src/sources/HyperSyncSource.res +17 -21
- package/src/sources/HyperSyncSource.res.mjs +268 -290
- package/src/sources/Rpc.res +5 -5
- package/src/sources/Rpc.res.mjs +168 -192
- package/src/sources/RpcSource.res +166 -167
- package/src/sources/RpcSource.res.mjs +972 -1046
- package/src/sources/RpcWebSocketHeightStream.res +10 -11
- package/src/sources/RpcWebSocketHeightStream.res.mjs +131 -145
- package/src/sources/SimulateSource.res +1 -1
- package/src/sources/SimulateSource.res.mjs +35 -38
- package/src/sources/Source.res +1 -1
- package/src/sources/Source.res.mjs +3 -3
- package/src/sources/SourceManager.res +39 -20
- package/src/sources/SourceManager.res.mjs +340 -371
- package/src/sources/SourceManager.resi +2 -1
- package/src/sources/Svm.res +12 -5
- package/src/sources/Svm.res.mjs +44 -41
- package/src/tui/Tui.res +23 -12
- package/src/tui/Tui.res.mjs +292 -290
- package/src/tui/bindings/Ink.res +2 -4
- package/src/tui/bindings/Ink.res.mjs +35 -41
- package/src/tui/components/BufferedProgressBar.res +7 -7
- package/src/tui/components/BufferedProgressBar.res.mjs +46 -46
- package/src/tui/components/CustomHooks.res +1 -2
- package/src/tui/components/CustomHooks.res.mjs +102 -122
- package/src/tui/components/Messages.res +1 -2
- package/src/tui/components/Messages.res.mjs +38 -42
- package/src/tui/components/SyncETA.res +10 -11
- package/src/tui/components/SyncETA.res.mjs +178 -196
- package/src/tui/components/TuiData.res +1 -1
- package/src/tui/components/TuiData.res.mjs +7 -6
- package/src/vendored/Rest.res +52 -66
- package/src/vendored/Rest.res.mjs +324 -364
- package/svm.schema.json +67 -0
- package/src/Address.gen.ts +0 -8
- package/src/Config.gen.ts +0 -19
- package/src/Envio.gen.ts +0 -55
- package/src/EvmTypes.gen.ts +0 -6
- package/src/InMemoryStore.gen.ts +0 -6
- package/src/Internal.gen.ts +0 -64
- package/src/PgStorage.gen.ts +0 -10
- package/src/PgStorage.res.d.mts +0 -5
- package/src/Types.ts +0 -56
- package/src/bindings/BigDecimal.gen.ts +0 -14
- package/src/bindings/BigDecimal.res.d.mts +0 -5
- package/src/bindings/BigInt.gen.ts +0 -10
- package/src/bindings/BigInt.res +0 -70
- package/src/bindings/BigInt.res.d.mts +0 -5
- package/src/bindings/BigInt.res.mjs +0 -154
- package/src/bindings/Ethers.res.d.mts +0 -5
- package/src/bindings/Pino.gen.ts +0 -17
- package/src/bindings/Postgres.gen.ts +0 -8
- package/src/bindings/Postgres.res.d.mts +0 -5
- package/src/bindings/Promise.res +0 -67
- package/src/bindings/Promise.res.mjs +0 -26
- package/src/db/InternalTable.gen.ts +0 -36
- package/src/sources/HyperSyncClient.gen.ts +0 -19
package/src/PgStorage.res
CHANGED
|
@@ -1,8 +1,5 @@
|
|
|
1
1
|
let getCacheRowCountFnName = "get_cache_row_count"
|
|
2
2
|
|
|
3
|
-
// Only needed for some old tests
|
|
4
|
-
// Remove @genType in the future
|
|
5
|
-
@genType
|
|
6
3
|
let makeClient = () => {
|
|
7
4
|
Postgres.makeSql(
|
|
8
5
|
~config={
|
|
@@ -26,8 +23,10 @@ let makeClient = () => {
|
|
|
26
23
|
}
|
|
27
24
|
|
|
28
25
|
let makeCreateIndexQuery = (~tableName, ~indexFields, ~pgSchema) => {
|
|
29
|
-
let indexName = tableName ++ "_" ++ indexFields->
|
|
30
|
-
|
|
26
|
+
let indexName = tableName ++ "_" ++ indexFields->Array.joinUnsafe("_")
|
|
27
|
+
|
|
28
|
+
// Case for indexer before envio@2.28
|
|
29
|
+
let index = indexFields->Belt.Array.map(idx => `"${idx}"`)->Array.joinUnsafe(", ")
|
|
31
30
|
`CREATE INDEX IF NOT EXISTS "${indexName}" ON "${pgSchema}"."${tableName}"(${index});`
|
|
32
31
|
}
|
|
33
32
|
|
|
@@ -52,17 +51,16 @@ let makeCreateCompositeIndexQuery = (
|
|
|
52
51
|
tableName ++
|
|
53
52
|
"_" ++
|
|
54
53
|
indexFields
|
|
55
|
-
->
|
|
56
|
-
->
|
|
54
|
+
->Array.map(f => f.fieldName ++ directionToIndexName(f.direction))
|
|
55
|
+
->Array.joinUnsafe("_")
|
|
57
56
|
let index =
|
|
58
57
|
indexFields
|
|
59
58
|
->Belt.Array.map(f => `"${f.fieldName}"${directionToSql(f.direction)}`)
|
|
60
|
-
->
|
|
59
|
+
->Array.joinUnsafe(", ")
|
|
61
60
|
`CREATE INDEX IF NOT EXISTS "${indexName}" ON "${pgSchema}"."${tableName}"(${index});`
|
|
62
61
|
}
|
|
63
62
|
|
|
64
63
|
let makeCreateTableIndicesQuery = (table: Table.table, ~pgSchema) => {
|
|
65
|
-
open Belt
|
|
66
64
|
let tableName = table.tableName
|
|
67
65
|
let createIndex = indexField =>
|
|
68
66
|
makeCreateIndexQuery(~tableName, ~indexFields=[indexField], ~pgSchema)
|
|
@@ -73,12 +71,11 @@ let makeCreateTableIndicesQuery = (table: Table.table, ~pgSchema) => {
|
|
|
73
71
|
let singleIndices = table->Table.getSingleIndices
|
|
74
72
|
let compositeIndices = table->Table.getCompositeIndices
|
|
75
73
|
|
|
76
|
-
singleIndices->Array.map(createIndex)->
|
|
77
|
-
compositeIndices->Array.map(createCompositeIndex)->
|
|
74
|
+
singleIndices->Array.map(createIndex)->Array.joinUnsafe("\n") ++
|
|
75
|
+
compositeIndices->Array.map(createCompositeIndex)->Array.joinUnsafe("\n")
|
|
78
76
|
}
|
|
79
77
|
|
|
80
78
|
let makeCreateTableQuery = (table: Table.table, ~pgSchema, ~isNumericArrayAsText) => {
|
|
81
|
-
open Belt
|
|
82
79
|
let fieldsMapped =
|
|
83
80
|
table
|
|
84
81
|
->Table.getFields
|
|
@@ -99,13 +96,10 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema, ~isNumericArrayAsText
|
|
|
99
96
|
}}`
|
|
100
97
|
}
|
|
101
98
|
})
|
|
102
|
-
->
|
|
99
|
+
->Array.joinUnsafe(", ")
|
|
103
100
|
|
|
104
101
|
let primaryKeyFieldNames = table->Table.getPrimaryKeyFieldNames
|
|
105
|
-
let primaryKey =
|
|
106
|
-
primaryKeyFieldNames
|
|
107
|
-
->Array.map(field => `"${field}"`)
|
|
108
|
-
->Js.Array2.joinWith(", ")
|
|
102
|
+
let primaryKey = primaryKeyFieldNames->Array.map(field => `"${field}"`)->Array.joinUnsafe(", ")
|
|
109
103
|
|
|
110
104
|
`CREATE TABLE IF NOT EXISTS "${pgSchema}"."${table.tableName}"(${fieldsMapped}${primaryKeyFieldNames->Array.length > 0
|
|
111
105
|
? `, PRIMARY KEY(${primaryKey})`
|
|
@@ -198,10 +192,10 @@ let makeInitializeTransaction = (
|
|
|
198
192
|
|
|
199
193
|
let allTables = generalTables->Array.copy
|
|
200
194
|
let allEntityTables = []
|
|
201
|
-
entities->
|
|
202
|
-
allEntityTables->
|
|
203
|
-
allTables->
|
|
204
|
-
allTables->
|
|
195
|
+
entities->Array.forEach((entityConfig: Internal.entityConfig) => {
|
|
196
|
+
allEntityTables->Array.push(entityConfig.table)->ignore
|
|
197
|
+
allTables->Array.push(entityConfig.table)->ignore
|
|
198
|
+
allTables->Array.push(getEntityHistory(~entityConfig).table)->ignore
|
|
205
199
|
})
|
|
206
200
|
let derivedSchema = Schema.make(allEntityTables)
|
|
207
201
|
|
|
@@ -221,17 +215,16 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
|
221
215
|
)
|
|
222
216
|
|
|
223
217
|
// Optimized enum creation - direct when cleanRun, conditional otherwise
|
|
224
|
-
enums->
|
|
225
|
-
// Create base enum creation query once
|
|
218
|
+
enums->Array.forEach((enumConfig: Table.enumConfig<Table.enum>) => {
|
|
226
219
|
let enumCreateQuery = `CREATE TYPE "${pgSchema}".${enumConfig.name} AS ENUM(${enumConfig.variants
|
|
227
|
-
->
|
|
228
|
-
->
|
|
220
|
+
->Array.map(v => `'${v->(Utils.magic: Table.enum => string)}'`)
|
|
221
|
+
->Array.joinUnsafe(", ")});`
|
|
229
222
|
|
|
230
223
|
query := query.contents ++ "\n" ++ enumCreateQuery
|
|
231
224
|
})
|
|
232
225
|
|
|
233
226
|
// Batch all table creation first (optimal for PostgreSQL)
|
|
234
|
-
allTables->
|
|
227
|
+
allTables->Array.forEach((table: Table.table) => {
|
|
235
228
|
query :=
|
|
236
229
|
query.contents ++
|
|
237
230
|
"\n" ++
|
|
@@ -239,7 +232,7 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
|
239
232
|
})
|
|
240
233
|
|
|
241
234
|
// Then batch all indices (better performance when tables exist)
|
|
242
|
-
allTables->
|
|
235
|
+
allTables->Array.forEach((table: Table.table) => {
|
|
243
236
|
let indices = makeCreateTableIndicesQuery(table, ~pgSchema)
|
|
244
237
|
if indices !== "" {
|
|
245
238
|
query := query.contents ++ "\n" ++ indices
|
|
@@ -247,10 +240,10 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
|
247
240
|
})
|
|
248
241
|
|
|
249
242
|
// Add derived indices
|
|
250
|
-
entities->
|
|
243
|
+
entities->Array.forEach((entity: Internal.entityConfig) => {
|
|
251
244
|
entity.table
|
|
252
245
|
->Table.getDerivedFromFields
|
|
253
|
-
->
|
|
246
|
+
->Array.forEach(derivedFromField => {
|
|
254
247
|
let indexField =
|
|
255
248
|
derivedSchema->Schema.getDerivedFromFieldName(derivedFromField)->Utils.unwrapResultExn
|
|
256
249
|
query :=
|
|
@@ -318,26 +311,26 @@ let makeInsertUnnestSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema, ~is
|
|
|
318
311
|
|
|
319
312
|
let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
|
|
320
313
|
|
|
321
|
-
`INSERT INTO "${pgSchema}"."${table.tableName}" (${quotedFieldNames->
|
|
314
|
+
`INSERT INTO "${pgSchema}"."${table.tableName}" (${quotedFieldNames->Array.joinUnsafe(", ")})
|
|
322
315
|
SELECT * FROM unnest(${arrayFieldTypes
|
|
323
|
-
->
|
|
324
|
-
`$${(idx + 1)->
|
|
316
|
+
->Array.mapWithIndex((arrayFieldType, idx) => {
|
|
317
|
+
`$${(idx + 1)->Int.toString}::${arrayFieldType}`
|
|
325
318
|
})
|
|
326
|
-
->
|
|
319
|
+
->Array.joinUnsafe(",")})` ++
|
|
327
320
|
switch (isRawEvents, primaryKeyFieldNames) {
|
|
328
321
|
| (true, _)
|
|
329
322
|
| (_, []) => ``
|
|
330
323
|
| (false, primaryKeyFieldNames) =>
|
|
331
324
|
`ON CONFLICT(${primaryKeyFieldNames
|
|
332
|
-
->
|
|
333
|
-
->
|
|
325
|
+
->Array.map(fieldName => `"${fieldName}"`)
|
|
326
|
+
->Array.joinUnsafe(",")}) DO ` ++ (
|
|
334
327
|
quotedNonPrimaryFieldNames->Utils.Array.isEmpty
|
|
335
328
|
? `NOTHING`
|
|
336
329
|
: `UPDATE SET ${quotedNonPrimaryFieldNames
|
|
337
|
-
->
|
|
330
|
+
->Array.map(fieldName => {
|
|
338
331
|
`${fieldName} = EXCLUDED.${fieldName}`
|
|
339
332
|
})
|
|
340
|
-
->
|
|
333
|
+
->Array.joinUnsafe(",")}`
|
|
341
334
|
)
|
|
342
335
|
} ++ ";"
|
|
343
336
|
}
|
|
@@ -360,26 +353,26 @@ let makeInsertValuesSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema, ~it
|
|
|
360
353
|
if fieldIdx > 0 {
|
|
361
354
|
placeholders := placeholders.contents ++ ","
|
|
362
355
|
}
|
|
363
|
-
placeholders := placeholders.contents ++ `$${(fieldIdx * itemsCount + idx)->
|
|
356
|
+
placeholders := placeholders.contents ++ `$${(fieldIdx * itemsCount + idx)->Int.toString}`
|
|
364
357
|
}
|
|
365
358
|
placeholders := placeholders.contents ++ ")"
|
|
366
359
|
}
|
|
367
360
|
|
|
368
|
-
`INSERT INTO "${pgSchema}"."${table.tableName}" (${quotedFieldNames->
|
|
361
|
+
`INSERT INTO "${pgSchema}"."${table.tableName}" (${quotedFieldNames->Array.joinUnsafe(", ")})
|
|
369
362
|
VALUES${placeholders.contents}` ++
|
|
370
363
|
switch primaryKeyFieldNames {
|
|
371
364
|
| [] => ``
|
|
372
365
|
| primaryKeyFieldNames =>
|
|
373
366
|
`ON CONFLICT(${primaryKeyFieldNames
|
|
374
|
-
->
|
|
375
|
-
->
|
|
367
|
+
->Array.map(fieldName => `"${fieldName}"`)
|
|
368
|
+
->Array.joinUnsafe(",")}) DO ` ++ (
|
|
376
369
|
quotedNonPrimaryFieldNames->Utils.Array.isEmpty
|
|
377
370
|
? `NOTHING`
|
|
378
371
|
: `UPDATE SET ${quotedNonPrimaryFieldNames
|
|
379
|
-
->
|
|
372
|
+
->Array.map(fieldName => {
|
|
380
373
|
`${fieldName} = EXCLUDED.${fieldName}`
|
|
381
374
|
})
|
|
382
|
-
->
|
|
375
|
+
->Array.joinUnsafe(",")}`
|
|
383
376
|
)
|
|
384
377
|
} ++ ";"
|
|
385
378
|
}
|
|
@@ -401,7 +394,7 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'
|
|
|
401
394
|
// Currently history update table uses S.object with transformation for schema,
|
|
402
395
|
// which is being lossed during conversion to dbSchema.
|
|
403
396
|
// So use simple insert values for now.
|
|
404
|
-
let isHistoryUpdate = table.tableName->
|
|
397
|
+
let isHistoryUpdate = table.tableName->String.startsWith(EntityHistory.historyTablePrefix)
|
|
405
398
|
|
|
406
399
|
// Should experiment how much it'll affect performance
|
|
407
400
|
// Although, it should be fine not to perform the validation check,
|
|
@@ -450,27 +443,21 @@ let chunkArray = (arr: array<'a>, ~chunkSize) => {
|
|
|
450
443
|
let chunks = []
|
|
451
444
|
let i = ref(0)
|
|
452
445
|
while i.contents < arr->Array.length {
|
|
453
|
-
let chunk = arr->
|
|
454
|
-
chunks->
|
|
446
|
+
let chunk = arr->Array.slice(~start=i.contents, ~end=i.contents + chunkSize)
|
|
447
|
+
chunks->Array.push(chunk)->ignore
|
|
455
448
|
i := i.contents + chunkSize
|
|
456
449
|
}
|
|
457
450
|
chunks
|
|
458
451
|
}
|
|
459
452
|
|
|
460
453
|
let removeInvalidUtf8InPlace = entities =>
|
|
461
|
-
entities->
|
|
454
|
+
entities->Array.forEach(item => {
|
|
462
455
|
let dict = item->(Utils.magic: 'a => dict<unknown>)
|
|
463
456
|
dict->Utils.Dict.forEachWithKey((value, key) => {
|
|
464
|
-
if value->
|
|
457
|
+
if value->typeof === #string {
|
|
465
458
|
let value = value->(Utils.magic: unknown => string)
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
//
|
|
469
|
-
// This is unsafe, but we rely that it'll use
|
|
470
|
-
// the mutated reference on retry.
|
|
471
|
-
// TODO: Test it properly after we start using
|
|
472
|
-
// real pg for indexer test framework.
|
|
473
|
-
dict->Js.Dict.set(
|
|
459
|
+
|
|
460
|
+
dict->Dict.set(
|
|
474
461
|
key,
|
|
475
462
|
value
|
|
476
463
|
->Utils.String.replaceAll("\x00", "")
|
|
@@ -508,13 +495,11 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
|
|
|
508
495
|
if data["isInsertValues"] {
|
|
509
496
|
let chunks = chunkArray(items, ~chunkSize=maxItemsPerQuery)
|
|
510
497
|
let responses = []
|
|
511
|
-
chunks->
|
|
498
|
+
chunks->Array.forEach(chunk => {
|
|
512
499
|
let chunkSize = chunk->Array.length
|
|
513
500
|
let isFullChunk = chunkSize === maxItemsPerQuery
|
|
514
501
|
|
|
515
|
-
let params = data["convertOrThrow"](
|
|
516
|
-
chunk->(Utils.magic: array<'item> => array<unknown>),
|
|
517
|
-
)
|
|
502
|
+
let params = data["convertOrThrow"](chunk->(Utils.magic: array<'item> => array<unknown>))
|
|
518
503
|
// Use prepared query only for full batches where the cached query is reused.
|
|
519
504
|
// Partial chunks generate unique SQL each time, so preparation has no benefit.
|
|
520
505
|
let response = isFullChunk
|
|
@@ -523,7 +508,7 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
|
|
|
523
508
|
makeInsertValuesSetQuery(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
|
|
524
509
|
params,
|
|
525
510
|
)
|
|
526
|
-
responses->
|
|
511
|
+
responses->Array.push(response)->ignore
|
|
527
512
|
})
|
|
528
513
|
let _ = await Promise.all(responses)
|
|
529
514
|
} else {
|
|
@@ -535,14 +520,14 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
|
|
|
535
520
|
}
|
|
536
521
|
} catch {
|
|
537
522
|
| S.Raised(_) as exn =>
|
|
538
|
-
|
|
523
|
+
throw(
|
|
539
524
|
Persistence.StorageError({
|
|
540
525
|
message: `Failed to convert items for table "${table.tableName}"`,
|
|
541
526
|
reason: exn,
|
|
542
527
|
}),
|
|
543
528
|
)
|
|
544
529
|
| exn =>
|
|
545
|
-
|
|
530
|
+
throw(
|
|
546
531
|
Persistence.StorageError({
|
|
547
532
|
message: `Failed to insert items into table "${table.tableName}"`,
|
|
548
533
|
reason: exn->Utils.prettifyExn,
|
|
@@ -615,7 +600,7 @@ let getConnectedPsqlExec = {
|
|
|
615
600
|
| Null =>
|
|
616
601
|
resolve(
|
|
617
602
|
Ok(
|
|
618
|
-
`${binary} -h ${pgHost} -p ${pgDockerServicePort->
|
|
603
|
+
`${binary} -h ${pgHost} -p ${pgDockerServicePort->Int.toString} -U ${pgUser} -d ${pgDatabase}`,
|
|
619
604
|
),
|
|
620
605
|
)
|
|
621
606
|
}
|
|
@@ -625,7 +610,7 @@ let getConnectedPsqlExec = {
|
|
|
625
610
|
| Null =>
|
|
626
611
|
resolve(
|
|
627
612
|
Ok(
|
|
628
|
-
`${binary} -h ${pgHost} -p ${pgPort->
|
|
613
|
+
`${binary} -h ${pgHost} -p ${pgPort->Int.toString} -U ${pgUser} -d ${pgDatabase}`,
|
|
629
614
|
),
|
|
630
615
|
)
|
|
631
616
|
}
|
|
@@ -659,7 +644,7 @@ let deleteByIdsOrThrow = async (sql, ~pgSchema, ~ids, ~table: Table.table) => {
|
|
|
659
644
|
}
|
|
660
645
|
) {
|
|
661
646
|
| exception exn =>
|
|
662
|
-
|
|
647
|
+
throw(
|
|
663
648
|
Persistence.StorageError({
|
|
664
649
|
message: `Failed deleting "${table.tableName}" from storage by ids`,
|
|
665
650
|
reason: exn,
|
|
@@ -682,11 +667,11 @@ let makeInsertDeleteUpdatesQuery = (~entityConfig: Internal.entityConfig, ~pgSch
|
|
|
682
667
|
| DerivedFrom(_) => None
|
|
683
668
|
}
|
|
684
669
|
)
|
|
685
|
-
allHistoryFieldNames->
|
|
686
|
-
allHistoryFieldNames->
|
|
670
|
+
allHistoryFieldNames->Array.push(EntityHistory.checkpointIdFieldName)->ignore
|
|
671
|
+
allHistoryFieldNames->Array.push(EntityHistory.changeFieldName)->ignore
|
|
687
672
|
|
|
688
673
|
let allHistoryFieldNamesStr =
|
|
689
|
-
allHistoryFieldNames->Belt.Array.map(name => `"${name}"`)->
|
|
674
|
+
allHistoryFieldNames->Belt.Array.map(name => `"${name}"`)->Array.joinUnsafe(", ")
|
|
690
675
|
|
|
691
676
|
// Build the SELECT part: id from unnest, envio_checkpoint_id from unnest, 'DELETE' for action, NULL for all other fields
|
|
692
677
|
let selectParts = allHistoryFieldNames->Belt.Array.map(fieldName => {
|
|
@@ -699,7 +684,7 @@ let makeInsertDeleteUpdatesQuery = (~entityConfig: Internal.entityConfig, ~pgSch
|
|
|
699
684
|
| _ => "NULL"
|
|
700
685
|
}
|
|
701
686
|
})
|
|
702
|
-
let selectPartsStr = selectParts->
|
|
687
|
+
let selectPartsStr = selectParts->Array.joinUnsafe(", ")
|
|
703
688
|
|
|
704
689
|
// Get the PostgreSQL type for the checkpoint ID field
|
|
705
690
|
let checkpointIdPgType = Table.getPgFieldType(
|
|
@@ -764,7 +749,7 @@ let rec writeBatch = async (
|
|
|
764
749
|
let entitiesToSet = []
|
|
765
750
|
let idsToDelete = []
|
|
766
751
|
|
|
767
|
-
updates->
|
|
752
|
+
updates->Array.forEach(row => {
|
|
768
753
|
switch row {
|
|
769
754
|
| {latestChange: Set({entity})} => entitiesToSet->Belt.Array.push(entity)
|
|
770
755
|
| {latestChange: Delete({entityId})} => idsToDelete->Belt.Array.push(entityId)
|
|
@@ -787,15 +772,12 @@ let rec writeBatch = async (
|
|
|
787
772
|
let batchDeleteCheckpointIds = []
|
|
788
773
|
let batchDeleteEntityIds = []
|
|
789
774
|
|
|
790
|
-
updates->
|
|
775
|
+
updates->Array.forEach(update => {
|
|
791
776
|
switch update {
|
|
792
777
|
| {history, containsRollbackDiffChange} =>
|
|
793
|
-
history->
|
|
778
|
+
history->Array.forEach(
|
|
794
779
|
(change: Change.t<'a>) => {
|
|
795
780
|
if !containsRollbackDiffChange {
|
|
796
|
-
// For every update we want to make sure that there's an existing history item
|
|
797
|
-
// with the current entity state. So we backfill history with checkpoint id 0,
|
|
798
|
-
// before writing updates. Don't do this if the update has a rollback diff change.
|
|
799
781
|
backfillHistoryIds->Utils.Set.add(change->Change.getEntityId)->ignore
|
|
800
782
|
}
|
|
801
783
|
switch change {
|
|
@@ -805,7 +787,7 @@ let rec writeBatch = async (
|
|
|
805
787
|
->Belt.Array.push(change->Change.getCheckpointId)
|
|
806
788
|
->ignore
|
|
807
789
|
}
|
|
808
|
-
| Set(_) => batchSetUpdates->
|
|
790
|
+
| Set(_) => batchSetUpdates->Array.push(change)->ignore
|
|
809
791
|
}
|
|
810
792
|
},
|
|
811
793
|
)
|
|
@@ -828,18 +810,21 @@ let rec writeBatch = async (
|
|
|
828
810
|
sql
|
|
829
811
|
->Postgres.preparedUnsafe(
|
|
830
812
|
makeInsertDeleteUpdatesQuery(~entityConfig, ~pgSchema),
|
|
831
|
-
(
|
|
813
|
+
(
|
|
814
|
+
batchDeleteEntityIds,
|
|
815
|
+
batchDeleteCheckpointIds->Utils.BigInt.arrayToStringArray,
|
|
816
|
+
)->Obj.magic,
|
|
832
817
|
)
|
|
833
|
-
->Promise.ignoreValue,
|
|
818
|
+
->Utils.Promise.ignoreValue,
|
|
834
819
|
)
|
|
835
820
|
}
|
|
836
821
|
|
|
837
822
|
if batchSetUpdates->Utils.Array.notEmpty {
|
|
838
823
|
if shouldRemoveInvalidUtf8 {
|
|
839
|
-
let entities = batchSetUpdates->
|
|
824
|
+
let entities = batchSetUpdates->Array.map(batchSetUpdate => {
|
|
840
825
|
switch batchSetUpdate {
|
|
841
826
|
| Set({entity}) => entity
|
|
842
|
-
| _ =>
|
|
827
|
+
| _ => JsError.throwWithMessage("Expected Set action")
|
|
843
828
|
}
|
|
844
829
|
})
|
|
845
830
|
entities->removeInvalidUtf8InPlace
|
|
@@ -848,7 +833,7 @@ let rec writeBatch = async (
|
|
|
848
833
|
let entityHistory = getEntityHistory(~entityConfig)
|
|
849
834
|
|
|
850
835
|
promises
|
|
851
|
-
->
|
|
836
|
+
->Array.push(
|
|
852
837
|
sql->setOrThrow(
|
|
853
838
|
~items=batchSetUpdates,
|
|
854
839
|
~itemSchema=entityHistory.setChangeSchema,
|
|
@@ -888,13 +873,13 @@ let rec writeBatch = async (
|
|
|
888
873
|
| exn => {
|
|
889
874
|
/* Note: Entity History doesn't return StorageError yet, and directly throws JsError */
|
|
890
875
|
let normalizedExn = switch exn {
|
|
891
|
-
|
|
|
876
|
+
| JsExn(_) => exn
|
|
892
877
|
| Persistence.StorageError({reason: exn}) => exn
|
|
893
878
|
| _ => exn
|
|
894
|
-
}->
|
|
879
|
+
}->JsExn.anyToExnInternal
|
|
895
880
|
|
|
896
881
|
switch normalizedExn {
|
|
897
|
-
|
|
|
882
|
+
| JsExn(error) =>
|
|
898
883
|
// Workaround for https://github.com/enviodev/hyperindex/issues/446
|
|
899
884
|
// We do escaping only when we actually got an error writing for the first time.
|
|
900
885
|
// This is not perfect, but an optimization to avoid escaping for every single item.
|
|
@@ -912,7 +897,7 @@ let rec writeBatch = async (
|
|
|
912
897
|
| _ => specificError.contents = Some(exn->Utils.prettifyExn)
|
|
913
898
|
| exception _ => ()
|
|
914
899
|
}
|
|
915
|
-
| S.Raised(_) =>
|
|
900
|
+
| S.Raised(_) => throw(normalizedExn) // But rethrow this one, since it's not a PG error
|
|
916
901
|
| _ => ()
|
|
917
902
|
}
|
|
918
903
|
|
|
@@ -932,7 +917,7 @@ let rec writeBatch = async (
|
|
|
932
917
|
| Some(rollbackTargetCheckpointId) =>
|
|
933
918
|
Some(
|
|
934
919
|
sql => {
|
|
935
|
-
let promises = allEntities->
|
|
920
|
+
let promises = allEntities->Array.map(entityConfig => {
|
|
936
921
|
sql->EntityHistory.rollback(
|
|
937
922
|
~pgSchema,
|
|
938
923
|
~entityName=entityConfig.name,
|
|
@@ -941,7 +926,7 @@ let rec writeBatch = async (
|
|
|
941
926
|
)
|
|
942
927
|
})
|
|
943
928
|
promises
|
|
944
|
-
->
|
|
929
|
+
->Array.push(
|
|
945
930
|
sql->InternalTable.Checkpoints.rollback(~pgSchema, ~rollbackTargetCheckpointId),
|
|
946
931
|
)
|
|
947
932
|
->ignore
|
|
@@ -993,12 +978,12 @@ let rec writeBatch = async (
|
|
|
993
978
|
await setOperations
|
|
994
979
|
->Belt.Array.map(dbFunc => sql->dbFunc)
|
|
995
980
|
->Promise.all
|
|
996
|
-
->Promise.ignoreValue
|
|
981
|
+
->Utils.Promise.ignoreValue
|
|
997
982
|
|
|
998
983
|
switch sinkPromise {
|
|
999
984
|
| Some(sinkPromise) =>
|
|
1000
985
|
switch await sinkPromise {
|
|
1001
|
-
| Some(exn) =>
|
|
986
|
+
| Some(exn) => throw(exn)
|
|
1002
987
|
| None => ()
|
|
1003
988
|
}
|
|
1004
989
|
| None => ()
|
|
@@ -1015,12 +1000,12 @@ let rec writeBatch = async (
|
|
|
1015
1000
|
|
|
1016
1001
|
// Just in case, if there's a not PG-specific error.
|
|
1017
1002
|
switch specificError.contents {
|
|
1018
|
-
| Some(specificError) =>
|
|
1003
|
+
| Some(specificError) => throw(specificError)
|
|
1019
1004
|
| None => ()
|
|
1020
1005
|
}
|
|
1021
1006
|
} catch {
|
|
1022
1007
|
| exn =>
|
|
1023
|
-
|
|
1008
|
+
throw(
|
|
1024
1009
|
switch specificError.contents {
|
|
1025
1010
|
| Some(specificError) => specificError
|
|
1026
1011
|
| None => exn
|
|
@@ -1064,7 +1049,7 @@ let makeGetRollbackRestoredEntitiesQuery = (~entityConfig: Internal.entityConfig
|
|
|
1064
1049
|
)
|
|
1065
1050
|
|
|
1066
1051
|
let dataFieldsCommaSeparated =
|
|
1067
|
-
dataFieldNames->Belt.Array.map(name => `"${name}"`)->
|
|
1052
|
+
dataFieldNames->Belt.Array.map(name => `"${name}"`)->Array.joinUnsafe(", ")
|
|
1068
1053
|
|
|
1069
1054
|
let historyTableName = EntityHistory.historyTableName(
|
|
1070
1055
|
~entityName=entityConfig.name,
|
|
@@ -1117,7 +1102,7 @@ let make = (
|
|
|
1117
1102
|
// Must match PG_CONTAINER in packages/cli/src/docker_env.rs
|
|
1118
1103
|
let containerName = "envio-postgres"
|
|
1119
1104
|
let psqlExecOptions: NodeJs.ChildProcess.execOptions = {
|
|
1120
|
-
env:
|
|
1105
|
+
env: Dict.fromArray([("PGPASSWORD", pgPassword), ("PATH", %raw(`process.env.PATH`))]),
|
|
1121
1106
|
}
|
|
1122
1107
|
|
|
1123
1108
|
let cacheDirPath = NodeJs.Path.resolve([
|
|
@@ -1146,40 +1131,39 @@ let make = (
|
|
|
1146
1131
|
getConnectedPsqlExec(~pgUser, ~pgHost, ~pgDatabase, ~pgPort, ~containerName),
|
|
1147
1132
|
)) {
|
|
1148
1133
|
| (Ok(entries), Ok(psqlExec)) => {
|
|
1149
|
-
let cacheFiles = entries->
|
|
1150
|
-
entry->
|
|
1134
|
+
let cacheFiles = entries->Array.filter(entry => {
|
|
1135
|
+
entry->String.endsWith(".tsv")
|
|
1151
1136
|
})
|
|
1152
1137
|
|
|
1153
|
-
let _ =
|
|
1154
|
-
|
|
1155
|
-
->
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
(
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
)
|
|
1180
|
-
})
|
|
1138
|
+
let _ = await cacheFiles
|
|
1139
|
+
->Array.map(entry => {
|
|
1140
|
+
let effectName = entry->String.slice(~start=0, ~end=-4)
|
|
1141
|
+
let table = Internal.makeCacheTable(~effectName)
|
|
1142
|
+
|
|
1143
|
+
sql
|
|
1144
|
+
->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false))
|
|
1145
|
+
->Promise.then(() => {
|
|
1146
|
+
let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString
|
|
1147
|
+
|
|
1148
|
+
let command = `${psqlExec} -c 'COPY "${pgSchema}"."${table.tableName}" FROM STDIN WITH (FORMAT text, HEADER);' < ${inputFile}`
|
|
1149
|
+
|
|
1150
|
+
Promise.make(
|
|
1151
|
+
(resolve, reject) => {
|
|
1152
|
+
NodeJs.ChildProcess.execWithOptions(
|
|
1153
|
+
command,
|
|
1154
|
+
psqlExecOptions,
|
|
1155
|
+
(~error, ~stdout, ~stderr as _) => {
|
|
1156
|
+
switch error {
|
|
1157
|
+
| Value(error) => reject(error)
|
|
1158
|
+
| Null => resolve(stdout)
|
|
1159
|
+
}
|
|
1160
|
+
},
|
|
1161
|
+
)
|
|
1162
|
+
},
|
|
1163
|
+
)
|
|
1181
1164
|
})
|
|
1182
|
-
|
|
1165
|
+
})
|
|
1166
|
+
->Promise.all
|
|
1183
1167
|
|
|
1184
1168
|
Logging.info("Successfully uploaded cache.")
|
|
1185
1169
|
}
|
|
@@ -1193,15 +1177,16 @@ let make = (
|
|
|
1193
1177
|
}
|
|
1194
1178
|
}
|
|
1195
1179
|
|
|
1196
|
-
let cacheTableInfo: array<schemaCacheTableInfo> =
|
|
1197
|
-
|
|
1180
|
+
let cacheTableInfo: array<schemaCacheTableInfo> = await sql->Postgres.unsafe(
|
|
1181
|
+
makeSchemaCacheTableInfoQuery(~pgSchema),
|
|
1182
|
+
)
|
|
1198
1183
|
|
|
1199
1184
|
if withUpload && cacheTableInfo->Utils.Array.notEmpty {
|
|
1200
1185
|
// Integration with other tools like Hasura
|
|
1201
1186
|
switch onNewTables {
|
|
1202
1187
|
| Some(onNewTables) =>
|
|
1203
1188
|
await onNewTables(
|
|
1204
|
-
~tableNames=cacheTableInfo->
|
|
1189
|
+
~tableNames=cacheTableInfo->Array.map(info => {
|
|
1205
1190
|
info.tableName
|
|
1206
1191
|
}),
|
|
1207
1192
|
)
|
|
@@ -1209,17 +1194,18 @@ let make = (
|
|
|
1209
1194
|
}
|
|
1210
1195
|
}
|
|
1211
1196
|
|
|
1212
|
-
let cache =
|
|
1213
|
-
cacheTableInfo->
|
|
1214
|
-
let effectName = tableName->
|
|
1215
|
-
cache->
|
|
1197
|
+
let cache = Dict.make()
|
|
1198
|
+
cacheTableInfo->Array.forEach(({tableName, count}) => {
|
|
1199
|
+
let effectName = tableName->String.slice(~start=cacheTablePrefixLength)
|
|
1200
|
+
cache->Dict.set(effectName, ({effectName, count}: Persistence.effectCacheRecord))
|
|
1216
1201
|
})
|
|
1217
1202
|
cache
|
|
1218
1203
|
}
|
|
1219
1204
|
|
|
1220
1205
|
let initialize = async (~chainConfigs=[], ~entities=[], ~enums=[]): Persistence.initialState => {
|
|
1221
|
-
let schemaTableNames: array<schemaTableName> =
|
|
1222
|
-
|
|
1206
|
+
let schemaTableNames: array<schemaTableName> = await sql->Postgres.unsafe(
|
|
1207
|
+
makeSchemaTableNamesQuery(~pgSchema),
|
|
1208
|
+
)
|
|
1223
1209
|
|
|
1224
1210
|
// The initialization query will completely drop the schema and recreate it from scratch.
|
|
1225
1211
|
// So we need to check if the schema is not used for anything else than envio.
|
|
@@ -1231,14 +1217,13 @@ let make = (
|
|
|
1231
1217
|
// Otherwise should throw if there's a table, but no envio specific one
|
|
1232
1218
|
// This means that the schema is used for something else than envio.
|
|
1233
1219
|
!(
|
|
1234
|
-
schemaTableNames->
|
|
1220
|
+
schemaTableNames->Array.some(table =>
|
|
1235
1221
|
table.tableName === InternalTable.Chains.table.tableName ||
|
|
1236
|
-
// Case for indexer before envio@2.28
|
|
1237
1222
|
table.tableName === "event_sync_state"
|
|
1238
1223
|
)
|
|
1239
1224
|
)
|
|
1240
1225
|
) {
|
|
1241
|
-
|
|
1226
|
+
JsError.throwWithMessage(
|
|
1242
1227
|
`Cannot run Envio migrations on PostgreSQL schema "${pgSchema}" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: "pnpm envio local db-migrate down"\n2. Or specify a different schema name by setting the "ENVIO_PG_SCHEMA" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.`,
|
|
1243
1228
|
)
|
|
1244
1229
|
}
|
|
@@ -1262,9 +1247,32 @@ let make = (
|
|
|
1262
1247
|
let _ = await sql->Postgres.beginSql(sql => {
|
|
1263
1248
|
// Promise.all might be not safe to use here,
|
|
1264
1249
|
// but it's just how it worked before.
|
|
1265
|
-
Promise.all(queries->
|
|
1250
|
+
Promise.all(queries->Array.map(query => sql->Postgres.unsafe(query)))
|
|
1266
1251
|
})
|
|
1267
1252
|
|
|
1253
|
+
// Populate config addresses into envio_addresses with registration_block/log = -1
|
|
1254
|
+
let ids = []
|
|
1255
|
+
let addrChainIds = []
|
|
1256
|
+
let addrContractNames = []
|
|
1257
|
+
chainConfigs->Array.forEach(chain => {
|
|
1258
|
+
chain.contracts->Array.forEach(contract => {
|
|
1259
|
+
contract.addresses->Array.forEach(
|
|
1260
|
+
address => {
|
|
1261
|
+
ids->Array.push(Config.EnvioAddresses.makeId(~chainId=chain.id, ~address))->ignore
|
|
1262
|
+
addrChainIds->Array.push(chain.id)->ignore
|
|
1263
|
+
addrContractNames->Array.push(contract.name)->ignore
|
|
1264
|
+
},
|
|
1265
|
+
)
|
|
1266
|
+
})
|
|
1267
|
+
})
|
|
1268
|
+
if ids->Array.length > 0 {
|
|
1269
|
+
await sql->Postgres.unpreparedUnsafe(
|
|
1270
|
+
`INSERT INTO "${pgSchema}"."${Config.EnvioAddresses.table.tableName}" ("id", "chain_id", "registration_block", "registration_log_index", "contract_name")
|
|
1271
|
+
SELECT id, chain_id, -1, -1, contract_name FROM unnest($1::text[],$2::int[],$3::text[]) AS t(id, chain_id, contract_name);`,
|
|
1272
|
+
(ids, addrChainIds, addrContractNames)->(Utils.magic: _ => unknown),
|
|
1273
|
+
)
|
|
1274
|
+
}
|
|
1275
|
+
|
|
1268
1276
|
let cache = await restoreEffectCache(~withUpload=true)
|
|
1269
1277
|
|
|
1270
1278
|
// Integration with other tools like Hasura
|
|
@@ -1277,7 +1285,7 @@ let make = (
|
|
|
1277
1285
|
cleanRun: true,
|
|
1278
1286
|
cache,
|
|
1279
1287
|
reorgCheckpoints: [],
|
|
1280
|
-
chains: chainConfigs->
|
|
1288
|
+
chains: chainConfigs->Array.map((chainConfig): Persistence.initialChainState => {
|
|
1281
1289
|
id: chainConfig.id,
|
|
1282
1290
|
startBlock: chainConfig.startBlock,
|
|
1283
1291
|
endBlock: chainConfig.endBlock,
|
|
@@ -1286,7 +1294,7 @@ let make = (
|
|
|
1286
1294
|
numEventsProcessed: 0.,
|
|
1287
1295
|
firstEventBlockNumber: None,
|
|
1288
1296
|
timestampCaughtUpToHeadOrEndblock: None,
|
|
1289
|
-
|
|
1297
|
+
indexingAddresses: ChainFetcher.configAddresses(chainConfig),
|
|
1290
1298
|
sourceBlockNumber: 0,
|
|
1291
1299
|
}),
|
|
1292
1300
|
checkpointId: InternalTable.Checkpoints.initialCheckpointId,
|
|
@@ -1309,7 +1317,7 @@ let make = (
|
|
|
1309
1317
|
}
|
|
1310
1318
|
) {
|
|
1311
1319
|
| exception exn =>
|
|
1312
|
-
|
|
1320
|
+
throw(
|
|
1313
1321
|
Persistence.StorageError({
|
|
1314
1322
|
message: `Failed loading "${table.tableName}" from storage by ids`,
|
|
1315
1323
|
reason: exn,
|
|
@@ -1318,7 +1326,7 @@ let make = (
|
|
|
1318
1326
|
| rows =>
|
|
1319
1327
|
try rows->S.parseOrThrow(rowsSchema) catch {
|
|
1320
1328
|
| exn =>
|
|
1321
|
-
|
|
1329
|
+
throw(
|
|
1322
1330
|
Persistence.StorageError({
|
|
1323
1331
|
message: `Failed to parse "${table.tableName}" loaded from storage by ids`,
|
|
1324
1332
|
reason: exn,
|
|
@@ -1338,7 +1346,7 @@ let make = (
|
|
|
1338
1346
|
) => {
|
|
1339
1347
|
let params = try [fieldValue->S.reverseConvertToJsonOrThrow(fieldSchema)]->Obj.magic catch {
|
|
1340
1348
|
| exn =>
|
|
1341
|
-
|
|
1349
|
+
throw(
|
|
1342
1350
|
Persistence.StorageError({
|
|
1343
1351
|
message: `Failed loading "${table.tableName}" from storage by field "${fieldName}". Couldn't serialize provided value.`,
|
|
1344
1352
|
reason: exn,
|
|
@@ -1355,7 +1363,7 @@ let make = (
|
|
|
1355
1363
|
params,
|
|
1356
1364
|
) {
|
|
1357
1365
|
| exception exn =>
|
|
1358
|
-
|
|
1366
|
+
throw(
|
|
1359
1367
|
Persistence.StorageError({
|
|
1360
1368
|
message: `Failed loading "${table.tableName}" from storage by field "${fieldName}"`,
|
|
1361
1369
|
reason: exn,
|
|
@@ -1364,7 +1372,7 @@ let make = (
|
|
|
1364
1372
|
| rows =>
|
|
1365
1373
|
try rows->S.parseOrThrow(rowsSchema) catch {
|
|
1366
1374
|
| exn =>
|
|
1367
|
-
|
|
1375
|
+
throw(
|
|
1368
1376
|
Persistence.StorageError({
|
|
1369
1377
|
message: `Failed to parse "${table.tableName}" loaded from storage by ids`,
|
|
1370
1378
|
reason: exn,
|
|
@@ -1397,10 +1405,9 @@ let make = (
|
|
|
1397
1405
|
let {table, itemSchema} = effect.storageMeta
|
|
1398
1406
|
|
|
1399
1407
|
if initialize {
|
|
1400
|
-
let _ =
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
)
|
|
1408
|
+
let _ = await sql->Postgres.unsafe(
|
|
1409
|
+
makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false),
|
|
1410
|
+
)
|
|
1404
1411
|
// Integration with other tools like Hasura
|
|
1405
1412
|
switch onNewTables {
|
|
1406
1413
|
| Some(onNewTables) => await onNewTables(~tableNames=[table.tableName])
|
|
@@ -1414,9 +1421,9 @@ let make = (
|
|
|
1414
1421
|
let dumpEffectCache = async () => {
|
|
1415
1422
|
try {
|
|
1416
1423
|
let cacheTableInfo: array<schemaCacheTableInfo> =
|
|
1417
|
-
(await sql
|
|
1418
|
-
|
|
1419
|
-
|
|
1424
|
+
(await sql->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema)))->Array.filter(i =>
|
|
1425
|
+
i.count > 0
|
|
1426
|
+
)
|
|
1420
1427
|
|
|
1421
1428
|
if cacheTableInfo->Utils.Array.notEmpty {
|
|
1422
1429
|
// Create .envio/cache directory if it doesn't exist
|
|
@@ -1435,14 +1442,14 @@ let make = (
|
|
|
1435
1442
|
| Ok(psqlExec) => {
|
|
1436
1443
|
Logging.info(
|
|
1437
1444
|
`Dumping cache: ${cacheTableInfo
|
|
1438
|
-
->
|
|
1445
|
+
->Array.map(({tableName, count}) =>
|
|
1439
1446
|
tableName ++ " (" ++ count->Belt.Int.toString ++ " rows)"
|
|
1440
1447
|
)
|
|
1441
|
-
->
|
|
1448
|
+
->Array.joinUnsafe(", ")}`,
|
|
1442
1449
|
)
|
|
1443
1450
|
|
|
1444
|
-
let promises = cacheTableInfo->
|
|
1445
|
-
let cacheName = tableName->
|
|
1451
|
+
let promises = cacheTableInfo->Array.map(async ({tableName}) => {
|
|
1452
|
+
let cacheName = tableName->String.slice(~start=cacheTablePrefixLength)
|
|
1446
1453
|
let outputFile =
|
|
1447
1454
|
NodeJs.Path.join(cacheDirPath, cacheName ++ ".tsv")->NodeJs.Path.toString
|
|
1448
1455
|
|
|
@@ -1483,13 +1490,13 @@ let make = (
|
|
|
1483
1490
|
rawInitialStates->Belt.Array.map((rawInitialState): Persistence.initialChainState => {
|
|
1484
1491
|
id: rawInitialState.id,
|
|
1485
1492
|
startBlock: rawInitialState.startBlock,
|
|
1486
|
-
endBlock: rawInitialState.endBlock->
|
|
1493
|
+
endBlock: rawInitialState.endBlock->Null.toOption,
|
|
1487
1494
|
maxReorgDepth: rawInitialState.maxReorgDepth,
|
|
1488
|
-
firstEventBlockNumber: rawInitialState.firstEventBlockNumber->
|
|
1489
|
-
timestampCaughtUpToHeadOrEndblock: rawInitialState.timestampCaughtUpToHeadOrEndblock->
|
|
1495
|
+
firstEventBlockNumber: rawInitialState.firstEventBlockNumber->Null.toOption,
|
|
1496
|
+
timestampCaughtUpToHeadOrEndblock: rawInitialState.timestampCaughtUpToHeadOrEndblock->Null.toOption,
|
|
1490
1497
|
numEventsProcessed: rawInitialState.numEventsProcessed,
|
|
1491
1498
|
progressBlockNumber: rawInitialState.progressBlockNumber,
|
|
1492
|
-
|
|
1499
|
+
indexingAddresses: rawInitialState.indexingAddresses,
|
|
1493
1500
|
sourceBlockNumber: rawInitialState.sourceBlockNumber,
|
|
1494
1501
|
})
|
|
1495
1502
|
}),
|
|
@@ -1500,16 +1507,21 @@ let make = (
|
|
|
1500
1507
|
->Postgres.unsafe(InternalTable.Checkpoints.makeGetReorgCheckpointsQuery(~pgSchema))
|
|
1501
1508
|
->(
|
|
1502
1509
|
Utils.magic: promise<array<unknown>> => promise<
|
|
1503
|
-
array<{
|
|
1510
|
+
array<{
|
|
1511
|
+
"id": string,
|
|
1512
|
+
"chain_id": int,
|
|
1513
|
+
"block_number": int,
|
|
1514
|
+
"block_hash": string,
|
|
1515
|
+
}>,
|
|
1504
1516
|
>
|
|
1505
1517
|
),
|
|
1506
1518
|
))
|
|
1507
1519
|
|
|
1508
|
-
let checkpointId = (checkpointIdResult->Belt.Array.getUnsafe(0))["id"]->BigInt.
|
|
1520
|
+
let checkpointId = (checkpointIdResult->Belt.Array.getUnsafe(0))["id"]->BigInt.fromStringOrThrow
|
|
1509
1521
|
|
|
1510
1522
|
// Convert string checkpoint IDs from DB to bigint
|
|
1511
1523
|
let reorgCheckpoints = Belt.Array.map(reorgCheckpoints, (raw): Internal.reorgCheckpoint => {
|
|
1512
|
-
checkpointId: raw["id"]->BigInt.
|
|
1524
|
+
checkpointId: raw["id"]->BigInt.fromStringOrThrow,
|
|
1513
1525
|
chainId: raw["chain_id"],
|
|
1514
1526
|
blockNumber: raw["block_number"],
|
|
1515
1527
|
blockHash: raw["block_hash"],
|
|
@@ -1532,7 +1544,7 @@ let make = (
|
|
|
1532
1544
|
|
|
1533
1545
|
let reset = async () => {
|
|
1534
1546
|
let query = `DROP SCHEMA IF EXISTS "${pgSchema}" CASCADE;`
|
|
1535
|
-
await sql->Postgres.unsafe(query)->Promise.ignoreValue
|
|
1547
|
+
await sql->Postgres.unsafe(query)->Utils.Promise.ignoreValue
|
|
1536
1548
|
}
|
|
1537
1549
|
|
|
1538
1550
|
let setChainMeta = chainsData =>
|
|
@@ -1602,19 +1614,20 @@ let make = (
|
|
|
1602
1614
|
Some(
|
|
1603
1615
|
sink.writeBatch(~batch, ~updatedEntities)
|
|
1604
1616
|
->Promise.thenResolve(_ => {
|
|
1605
|
-
Prometheus.
|
|
1606
|
-
~
|
|
1617
|
+
Prometheus.StorageWrite.increment(
|
|
1618
|
+
~storage=sink.name,
|
|
1607
1619
|
~timeSeconds=timerRef->Hrtime.timeSince->Hrtime.toSecondsFloat,
|
|
1608
1620
|
)
|
|
1609
1621
|
None
|
|
1610
1622
|
})
|
|
1611
1623
|
// Otherwise it fails with unhandled exception
|
|
1612
|
-
->Promise.catchResolve(exn => Some(exn)),
|
|
1624
|
+
->Utils.Promise.catchResolve(exn => Some(exn)),
|
|
1613
1625
|
)
|
|
1614
1626
|
}
|
|
1615
1627
|
| None => None
|
|
1616
1628
|
}
|
|
1617
1629
|
|
|
1630
|
+
let primaryTimerRef = Hrtime.makeTimer()
|
|
1618
1631
|
await writeBatch(
|
|
1619
1632
|
sql,
|
|
1620
1633
|
~batch,
|
|
@@ -1629,9 +1642,14 @@ let make = (
|
|
|
1629
1642
|
~updatedEntities,
|
|
1630
1643
|
~sinkPromise,
|
|
1631
1644
|
)
|
|
1645
|
+
Prometheus.StorageWrite.increment(
|
|
1646
|
+
~storage="postgres",
|
|
1647
|
+
~timeSeconds=primaryTimerRef->Hrtime.timeSince->Hrtime.toSecondsFloat,
|
|
1648
|
+
)
|
|
1632
1649
|
}
|
|
1633
1650
|
|
|
1634
1651
|
{
|
|
1652
|
+
name: "postgres",
|
|
1635
1653
|
isInitialized,
|
|
1636
1654
|
initialize,
|
|
1637
1655
|
resumeInitialState,
|
|
@@ -1648,3 +1666,109 @@ let make = (
|
|
|
1648
1666
|
writeBatch: writeBatchMethod,
|
|
1649
1667
|
}
|
|
1650
1668
|
}
|
|
1669
|
+
|
|
1670
|
+
let makeStorageFromEnv = (
|
|
1671
|
+
~config: Config.t,
|
|
1672
|
+
~sql=makeClient(),
|
|
1673
|
+
~pgSchema=Env.Db.publicSchema,
|
|
1674
|
+
~isHasuraEnabled=Env.Hasura.enabled,
|
|
1675
|
+
) => {
|
|
1676
|
+
make(
|
|
1677
|
+
~sql,
|
|
1678
|
+
~pgSchema,
|
|
1679
|
+
~pgHost=Env.Db.host,
|
|
1680
|
+
~pgUser=Env.Db.user,
|
|
1681
|
+
~pgPort=Env.Db.port,
|
|
1682
|
+
~pgDatabase=Env.Db.database,
|
|
1683
|
+
~pgPassword=Env.Db.password,
|
|
1684
|
+
~sink=?{
|
|
1685
|
+
// Internally ClickHouse storage is implemented as a sync of the
|
|
1686
|
+
// Postgres storage. Required env vars are validated here only when
|
|
1687
|
+
// the user opts in via `storage.clickhouse: true` in config.yaml.
|
|
1688
|
+
if config.storage.clickhouse {
|
|
1689
|
+
let host = Env.ClickHouse.host()
|
|
1690
|
+
let username = Env.ClickHouse.username()
|
|
1691
|
+
let password = Env.ClickHouse.password()
|
|
1692
|
+
let missing = []
|
|
1693
|
+
let checkEnv = (opt, name) =>
|
|
1694
|
+
switch opt {
|
|
1695
|
+
| Some(_) => ()
|
|
1696
|
+
| None => missing->Array.push(name)->ignore
|
|
1697
|
+
}
|
|
1698
|
+
host->checkEnv("ENVIO_CLICKHOUSE_HOST")
|
|
1699
|
+
username->checkEnv("ENVIO_CLICKHOUSE_USERNAME")
|
|
1700
|
+
password->checkEnv("ENVIO_CLICKHOUSE_PASSWORD")
|
|
1701
|
+
if missing->Array.length > 0 {
|
|
1702
|
+
JsError.throwWithMessage(
|
|
1703
|
+
`ClickHouse storage is enabled but required env vars are not set: ${missing->Array.joinUnsafe(
|
|
1704
|
+
", ",
|
|
1705
|
+
)}. Please set them, disable clickhouse in the \`storage\` config, or run \`envio dev\` for a pre-configured local ClickHouse.`,
|
|
1706
|
+
)
|
|
1707
|
+
}
|
|
1708
|
+
Some(
|
|
1709
|
+
Sink.makeClickHouse(
|
|
1710
|
+
~host=host->Option.getUnsafe,
|
|
1711
|
+
~database=Env.ClickHouse.database(),
|
|
1712
|
+
~username=username->Option.getUnsafe,
|
|
1713
|
+
~password=password->Option.getUnsafe,
|
|
1714
|
+
),
|
|
1715
|
+
)
|
|
1716
|
+
} else {
|
|
1717
|
+
None
|
|
1718
|
+
}
|
|
1719
|
+
},
|
|
1720
|
+
~onInitialize=?{
|
|
1721
|
+
if isHasuraEnabled {
|
|
1722
|
+
Some(
|
|
1723
|
+
() => {
|
|
1724
|
+
Hasura.trackDatabase(
|
|
1725
|
+
~endpoint=Env.Hasura.graphqlEndpoint,
|
|
1726
|
+
~auth={
|
|
1727
|
+
role: Env.Hasura.role,
|
|
1728
|
+
secret: Env.Hasura.secret,
|
|
1729
|
+
},
|
|
1730
|
+
~pgSchema,
|
|
1731
|
+
~userEntities=config.userEntities,
|
|
1732
|
+
~responseLimit=Env.Hasura.responseLimit,
|
|
1733
|
+
~schema=Schema.make(config.allEntities->Belt.Array.map(e => e.table)),
|
|
1734
|
+
~aggregateEntities=Env.Hasura.aggregateEntities,
|
|
1735
|
+
)->Promise.catch(err => {
|
|
1736
|
+
Logging.errorWithExn(err->Utils.prettifyExn, `Error tracking tables`)->Promise.resolve
|
|
1737
|
+
})
|
|
1738
|
+
},
|
|
1739
|
+
)
|
|
1740
|
+
} else {
|
|
1741
|
+
None
|
|
1742
|
+
}
|
|
1743
|
+
},
|
|
1744
|
+
~onNewTables=?{
|
|
1745
|
+
if isHasuraEnabled {
|
|
1746
|
+
Some(
|
|
1747
|
+
(~tableNames) => {
|
|
1748
|
+
Hasura.trackTables(
|
|
1749
|
+
~endpoint=Env.Hasura.graphqlEndpoint,
|
|
1750
|
+
~auth={
|
|
1751
|
+
role: Env.Hasura.role,
|
|
1752
|
+
secret: Env.Hasura.secret,
|
|
1753
|
+
},
|
|
1754
|
+
~pgSchema,
|
|
1755
|
+
~tableNames,
|
|
1756
|
+
)->Promise.catch(err => {
|
|
1757
|
+
Logging.errorWithExn(
|
|
1758
|
+
err->Utils.prettifyExn,
|
|
1759
|
+
`Error tracking new tables`,
|
|
1760
|
+
)->Promise.resolve
|
|
1761
|
+
})
|
|
1762
|
+
},
|
|
1763
|
+
)
|
|
1764
|
+
} else {
|
|
1765
|
+
None
|
|
1766
|
+
}
|
|
1767
|
+
},
|
|
1768
|
+
~isHasuraEnabled,
|
|
1769
|
+
)
|
|
1770
|
+
}
|
|
1771
|
+
|
|
1772
|
+
let makePersistenceFromConfig = (~config: Config.t, ~storage=makeStorageFromEnv(~config)) => {
|
|
1773
|
+
Persistence.make(~userEntities=config.userEntities, ~allEnums=config.allEnums, ~storage)
|
|
1774
|
+
}
|